blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40b3fb764eacd4c5c449999356ba8136fbaeaba5 | 3b27795a631ec2ce69b3ef4c49a67526d73934b5 | /pi/DiscoveryBotApi/build/lib.linux-armv7l-2.7/discovery_bot/light.py | 62726005e9ea5a9f395cef5e9297609dd0fd86c0 | [] | no_license | pi-bot/tiddlybotv1.1 | e226f4fba23c0ab4b866b68aceba3e063e2a89f8 | c2bc54a034659d723ef50ecd028645dddc85c1de | refs/heads/master | 2021-01-11T14:45:43.771643 | 2017-02-28T16:30:41 | 2017-02-28T16:30:41 | 80,209,999 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | import time
import RPi.GPIO as io
import pins
class Light(object):
def __init__(self, colour):
self.colour = colour
io.setmode(io.BCM)
io.setup(self.colour, io.OUT)
io.output(self.colour, False)
def cleanup(self):
io.output(self.colour, False)
def on(self, time_int = None):
io.output(self.colour, True)
if time_int != None:
time.sleep(time_int)
io.output(self.colour, False)
def off(self):
io.output(self.colour, False)
#red = Light(pins.LED_RED)
#blue = Light(pins.LED_BLUE)
#green = Light(pins.LED_GREEN)
#while True:
# red.on()
# blue.on()
# green.on()
# time.sleep(1)
# red.off()
# blue.off()
# green.off()
# time.sleep(1)
| [
"harry@pibot.org"
] | harry@pibot.org |
f19ffd2d0507b157f08f52ba3d59cf3a0d343ef3 | ca59d18e503ef22fbc920c6de48ffc8eac5a1443 | /tools/pytorch-quantization/pytorch_quantization/nn/modules/quant_conv.py | 9aaf19a265cb987212f13e71af3c256ce3cfe589 | [
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"ISC",
"MIT"
] | permissive | boh-inspur/TensorRT | 9fc0ae0ad4e31da040d10728b63d9dc284852b67 | e4d2f7f4406f1c8f4632cc67de33728cef90ca29 | refs/heads/master | 2023-04-13T21:24:13.912673 | 2021-04-23T09:55:18 | 2021-04-23T09:55:18 | 265,431,588 | 0 | 0 | Apache-2.0 | 2021-04-23T09:55:19 | 2020-05-20T02:49:58 | null | UTF-8 | Python | false | false | 16,356 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Quantized convolution
Base code is from nn.Conv, details of Module and original argument can be found there.
Module names are intentionally kept same as unquantized version so that they can be dropped into preexisting model
easily, and load pretrained weight. Aliases with Quant prefix are defined and are encouraged to be used explicitly
when start scratch.
"""
import torch
import torch.nn
import torch.nn.functional as F
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.modules.conv import _ConvTransposeNd
from pytorch_quantization import tensor_quant
from . import _utils
__all__ = [
"Conv2d", "QuantConv2d", "Conv3d", "QuantConv3d", "Conv1d", "QuantConv1d", "ConvTranspose1d", "ConvTranspose2d",
"ConvTranspose3d", "QuantConvTranspose1d", "QuantConvTranspose2d", "QuantConvTranspose3d"
]
class _QuantConvNd(torch.nn.modules.conv._ConvNd, _utils.QuantMixin):
"""base class of quantized Conv inherited from _ConvNd
Comments of original arguments can be found in torch.nn.modules.conv
Arguments:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of input.
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of weight.
Raises:
ValueError: If unsupported arguments are passed in.
Readonly properties:
- input_quantizer:
- weight_quantizer:
Static methods:
- set_default_quant_desc_input: Set default_quant_desc_input
- set_default_quant_desc_weight: Set default_quant_desc_weight
"""
default_quant_desc_input = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding,
groups, bias, padding_mode, quant_desc_input, quant_desc_weight):
super(_QuantConvNd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation,
transposed, output_padding, groups, bias, padding_mode)
self.init_quantizer(quant_desc_input, quant_desc_weight)
def _quant(self, input):
"""Apply quantization on input and weight
Function called by the classes lower in the hierarchy, which actually performs the quantization before forward
in the derivate class the particular Function.
Arguments:
input: in_features to quantize
Returns:
A tuple: (quant_in_feature, quant_weight)
"""
quant_input = self._input_quantizer(input)
quant_weight = self._weight_quantizer(self.weight)
return (quant_input, quant_weight)
class QuantConv2d(_QuantConvNd):
"""Quantized 2D conv"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV2D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_pair(0), groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
quant_input, quant_weight = self._quant(input)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv2d(F.pad(quant_input, expanded_padding, mode='circular'),
quant_weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
else:
output = F.conv2d(quant_input, quant_weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
return output
class QuantConv3d(_QuantConvNd):
"""Quantized 3D Conv"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV3D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConv3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_triple(0), groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
quant_input, quant_weight = self._quant(input)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[2] + 1) // 2, self.padding[2] // 2,
(self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv3d(F.pad(quant_input, expanded_padding, mode='circular'),
quant_weight, self.bias, self.stride, _triple(0),
self.dilation, self.groups)
else:
output = F.conv3d(quant_input, quant_weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
return output
class QuantConv1d(_QuantConvNd):
"""Quantized 1D Conv"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONV1D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
**kwargs):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConv1d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False,
_single(0), groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input):
# the actual quantization happens in the next level of the class hierarchy
quant_input, quant_weight = self._quant(input)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[0] + 1) // 2, self.padding[0] // 2)
output = F.conv1d(F.pad(quant_input, expanded_padding, mode='circular'),
quant_weight, self.bias, self.stride,
_single(0), self.dilation, self.groups)
else:
output = F.conv1d(quant_input, quant_weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
return output
class _QuantConvTransposeNd(torch.nn.modules.conv._ConvTransposeNd, _utils.QuantMixin):
"""base class of quantized Transposed Conv inherited from _ConvTransposeNd
Comments of original arguments can be found in torch.nn.modules.conv
Arguments:
quant_desc_input: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of input.
quant_desc_weight: An instance of :class:`QuantDescriptor <pytorch_quantization.tensor_quant.QuantDescriptor>`.
Quantization descriptor of weight.
Raises:
ValueError: If unsupported arguments are passed in.
Readonly properties:
- input_quantizer:
- weight_quantizer:
Static methods:
- set_default_quant_desc_input: Set default_quant_desc_input
- set_default_quant_desc_weight: Set default_quant_desc_weight
"""
default_quant_desc_input = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_PER_TENSOR
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups, bias, padding_mode, quant_desc_input, quant_desc_weight):
super(_QuantConvTransposeNd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation,
transposed, output_padding, groups, bias, padding_mode)
self.init_quantizer(quant_desc_input, quant_desc_weight)
def _quant(self, input):
"""Apply quantization on input and weight
Function called by the classes lower in the hierarchy, which actually performs the quantization before forward
in the derivate class the particular Function.
Arguments:
input: in_features to quantize
Returns:
A tuple: (quant_in_feature, quant_weight)
"""
quant_input = self._input_quantizer(input)
quant_weight = self._weight_quantizer(self.weight)
return (quant_input, quant_weight)
class QuantConvTranspose1d(_QuantConvTransposeNd):
"""Quantized ConvTranspose1d"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE1D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
**kwargs):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
output_padding = _single(output_padding)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConvTranspose1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input, output_size=None):
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for QuantConvTranspose1d')
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size)
quant_input, quant_weight = self._quant(input)
output = F.conv_transpose1d(quant_input, quant_weight, self.bias, self.stride, self.padding, output_padding,
self.groups, self.dilation)
return output
class QuantConvTranspose2d(_QuantConvTransposeNd):
"""Quantized ConvTranspose2d"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE2D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
**kwargs):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
output_padding = _pair(output_padding)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConvTranspose2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input, output_size=None):
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for QuantConvTranspose2d')
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size)
quant_input, quant_weight = self._quant(input)
output = F.conv_transpose2d(quant_input, quant_weight, self.bias, self.stride, self.padding, output_padding,
self.groups, self.dilation)
return output
class QuantConvTranspose3d(_QuantConvTransposeNd):
"""Quantized ConvTranspose3d"""
default_quant_desc_weight = tensor_quant.QUANT_DESC_8BIT_CONVTRANSPOSE3D_WEIGHT_PER_CHANNEL
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
**kwargs):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
quant_desc_input, quant_desc_weight = _utils.pop_quant_desc_in_kwargs(self.__class__, **kwargs)
super(QuantConvTranspose3d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode,
quant_desc_input=quant_desc_input, quant_desc_weight=quant_desc_weight)
def forward(self, input, output_size=None):
if self.padding_mode != 'zeros':
raise ValueError('Only `zeros` padding mode is supported for QuantConvTranspose3d')
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size)
quant_input, quant_weight = self._quant(input)
output = F.conv_transpose3d(quant_input, quant_weight, self.bias, self.stride, self.padding, output_padding,
self.groups, self.dilation)
return output
# Define alias with Quant prefix
_ConvNd = _QuantConvNd
Conv1d = QuantConv1d
Conv2d = QuantConv2d
Conv3d = QuantConv3d
ConvTranspose1d = QuantConvTranspose1d
ConvTranspose2d = QuantConvTranspose2d
ConvTranspose3d = QuantConvTranspose3d
| [
"rajeevsrao@users.noreply.github.com"
] | rajeevsrao@users.noreply.github.com |
d6c42962c8c27b0253171b232edbef46fb681496 | b1182238bf0d26451d567e3100cea940be771ff1 | /hd-thrift-idl/hd-thrift-idl-social/src/main/python/SocialAdminService/ISocialAdminServiceDeleteAdminPost.py | 2ea38cd7fffadcfa9fbbd5dc18012fea42a09f4d | [] | no_license | ybg555/vue-tvBox | af6df0e07848efc1c2ac80ee8b7c16c65b790a40 | 57e3849e7f8272794e5a38d5e49bb68f7a44f286 | refs/heads/master | 2021-01-15T15:42:23.728423 | 2016-10-02T09:36:08 | 2016-10-02T09:36:08 | 55,936,790 | 1 | 0 | null | 2016-04-12T01:07:09 | 2016-04-11T02:52:05 | Python | UTF-8 | Python | false | true | 6,833 | py | #coding=utf-8
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def deleteAdminPost(self, post):
"""
删除帖子
@param post
@return
@author zhijian.li
Parameters:
- post
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def deleteAdminPost(self, post):
"""
删除帖子
@param post
@return
@author zhijian.li
Parameters:
- post
"""
self.send_deleteAdminPost(post)
return self.recv_deleteAdminPost()
def send_deleteAdminPost(self, post):
self._oprot.writeMessageBegin('deleteAdminPost', TMessageType.CALL, self._seqid)
args = deleteAdminPost_args()
args.post = post
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deleteAdminPost(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deleteAdminPost_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteAdminPost failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["deleteAdminPost"] = Processor.process_deleteAdminPost
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_deleteAdminPost(self, seqid, iprot, oprot):
args = deleteAdminPost_args()
args.read(iprot)
iprot.readMessageEnd()
result = deleteAdminPost_result()
result.success = self._handler.deleteAdminPost(args.post)
oprot.writeMessageBegin("deleteAdminPost", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class deleteAdminPost_args:
"""
Attributes:
- post
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'post', (TAdminPost.ttypes.TAdminPost, TAdminPost.ttypes.TAdminPost.thrift_spec), None, ), # 1
)
def __init__(self, post=None,):
self.post = post
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.post = TAdminPost.ttypes.TAdminPost()
self.post.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAdminPost_args')
if self.post is not None:
oprot.writeFieldBegin('post', TType.STRUCT, 1)
self.post.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.post)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deleteAdminPost_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deleteAdminPost_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| [
"93745726@qq.com"
] | 93745726@qq.com |
4ea78129b575c0fa392a02973b2e72fc68d1979c | c37414be07a423968c897887b0844830e349741f | /fastestimator/backend/to_number.py | 766952ad187cb9f583dbacd315fbeb6d65a050c5 | [
"Apache-2.0"
] | permissive | vbvg2008/fastestimator-future | 5175047a1acac42f7172f8b9bb326486ed25a5a0 | dbf7d597d1f97140f837345f6b06f1773d4fa299 | refs/heads/master | 2022-03-30T22:48:59.349348 | 2020-01-06T08:35:04 | 2020-01-06T08:35:04 | 227,687,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | import tensorflow as tf
import torch
def to_number(data):
if isinstance(data, tf.Tensor):
data = data.numpy()
elif isinstance(data, torch.Tensor):
data = data.data.numpy()
return data
| [
"shawnmengdong@gmail.com"
] | shawnmengdong@gmail.com |
bbbd325743c38a316ba45a9043c3c7381ec663d9 | 22101a0f9ffc8a41f24bf09da03b798f7a321021 | /B.py | 723b8bf95fca5e6e17eb09cb69c502037f2d826e | [] | no_license | RyusukeNakamura/educational_dp | 4b0022abdd0ed408787b2ee36e6830864877971d | 02adec5916e2f6ab0b074dad4ce0e0f8add53ab5 | refs/heads/main | 2023-03-05T10:13:57.379491 | 2021-02-14T03:34:29 | 2021-02-14T03:34:29 | 322,280,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | n, k = map(int, input().split())
h = list(map(int, input().split()))
dp = [10 ** 9 for _ in range(n)]
dp[0] = 0
for i in range(1, n):
j = max(0, i - k)
hi = h[i]
dp[i] = min(dpj + abs(hi - hj) for dpj, hj in zip(dp[j:i], h[j:i]))
print(dp[-1]) | [
"leiden3535@gmail.com"
] | leiden3535@gmail.com |
d4b6091ec2ac16976849edd40296bf767472a246 | 4c14f3f73d4bf5d903a8108b22ab04523ec4d259 | /signbank/dictionary/update.py | d4ec23aafd3361c4da4b65e57af9c4f33c302ce3 | [
"BSD-3-Clause"
] | permissive | mbencherif/FinSL-signbank | acb8c8494a6fd644773abc06bea280dcea9be8d5 | a9b3dca3f01ac4672b81de7524af1371f603f604 | refs/heads/master | 2020-03-29T22:34:57.117754 | 2018-08-01T08:25:13 | 2018-08-01T08:25:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,013 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import csv
import codecs
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, HttpResponseBadRequest, Http404, \
HttpResponseNotAllowed, HttpResponseServerError
from django.shortcuts import render, get_object_or_404, redirect, reverse
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import permission_required, login_required
from django.db.models.fields import NullBooleanField
from django.utils.translation import ugettext as _
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from tagging.models import TaggedItem, Tag
from guardian.shortcuts import get_perms, get_objects_for_user
from .models import Gloss, Dataset, Translation, Keyword, Language, Dialect, GlossURL, \
GlossRelation, GlossTranslations, FieldChoice, MorphologyDefinition, RelationToForeignSign, Relation
from .models import build_choice_list
from .forms import TagsAddForm, TagUpdateForm, TagDeleteForm, GlossRelationForm, RelationForm, \
RelationToForeignSignForm, MorphologyForm, CSVUploadForm
from ..video.models import GlossVideo
@permission_required('dictionary.change_gloss')
def update_gloss(request, glossid):
"""View to update a gloss model from the jeditable jquery form
We are sent one field and value at a time, return the new value once we've updated it."""
# Get the gloss object or raise a Http404 exception if the object does not exist.
gloss = get_object_or_404(Gloss, id=glossid)
# Make sure that the user has rights to edit this datasets glosses.
if 'view_dataset' not in get_perms(request.user, gloss.dataset):
return HttpResponseForbidden(_("You do not have permissions to edit Glosses of this dataset/lexicon."))
if request.method == "POST":
# Update the user on Gloss.updated_by from request.user
gloss.updated_by = request.user
old_idgloss = str(gloss)
field = request.POST.get('id', '')
value = request.POST.get('value', '')
if len(value) == 0:
value = ' '
elif value[0] == '_':
value = value[1:]
# in case we need multiple values
values = request.POST.getlist('value[]')
if field.startswith('keywords_'):
language_code_2char = field.split('_')[1]
return update_keywords(gloss, field, value, language_code_2char=language_code_2char)
elif field.startswith('relationforeign'):
return update_relationtoforeignsign(gloss, field, value)
# Had to add field != 'relation_between_articulators' because I changed its field name, and it conflicted here.
elif field.startswith('relation') and field != 'relation_between_articulators':
return update_relation(gloss, field, value)
elif field.startswith('morphology-definition'):
return update_morphology_definition(gloss, field, value)
elif field == 'dialect':
# expecting possibly multiple values
try:
gloss.dialect.clear()
for value in values:
lang = Dialect.objects.get(name=value)
gloss.dialect.add(lang)
gloss.save()
newvalue = ", ".join([str(g.name)
for g in gloss.dialect.all()])
except:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s %s" % _("Unknown Dialect"), values, content_type='text/plain')
elif field.startswith('video_title'):
# If editing video title, update the GlossVideo's title
if request.user.has_perm('video.change_glossvideo'):
# Get pk after string "video_title"
video_pk = field.split('video_title')[1]
newvalue = value
try:
video = GlossVideo.objects.get(pk=video_pk)
video.title = value
video.save()
except GlossVideo.DoesNotExist:
return HttpResponseBadRequest('{error} {values}'.format(error=_('GlossVideo does not exist'), values=values),
content_type='text/plain')
else:
return HttpResponseForbidden('Missing permission: video.change_glossvideo')
elif field.startswith('glossurl-'):
if field == 'glossurl-create':
GlossURL.objects.create(url=value, gloss_id=glossid)
return HttpResponseRedirect(reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}))
else:
if request.user.has_perm('dictionary.change_gloss'):
glossurl_pk = field.split('glossurl-')[1]
newvalue = value
try:
glossurl = GlossURL.objects.get(pk=glossurl_pk)
glossurl.url = value
glossurl.save()
except GlossURL.DoesNotExist:
pass
else:
# Find if field is not in Gloss classes fields.
if field not in [f.name for f in Gloss._meta.get_fields()]:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Unknown field"), content_type='text/plain')
# Translate the value if a boolean
if isinstance(Gloss._meta.get_field(field), NullBooleanField):
newvalue = value
value = (value == 'Yes')
if value != ' ' or value != '':
# See if the field is a ForeignKey
if gloss._meta.get_field(field).get_internal_type() == "ForeignKey":
gloss.__setattr__(field, FieldChoice.objects.get(machine_value=value))
else:
gloss.__setattr__(field, value)
gloss.save()
# If the value is not a Boolean, return the new value
if not isinstance(value, bool):
f = Gloss._meta.get_field(field)
# for choice fields we want to return the 'display' version of the value
# Try to use get_choices to get correct choice names for FieldChoices
# If it doesn't work, go to exception and get flatchoices
try:
# valdict = dict(f.get_choices(include_blank=False))
valdict = dict(build_choice_list(field))
except:
valdict = dict(f.flatchoices)
# Some fields take ints
# if valdict.keys() != [] and type(valdict.keys()[0]) == int:
try:
newvalue = valdict.get(int(value), value)
# else:
except:
# either it's not an int or there's no flatchoices
# so here we use get with a default of the value itself
newvalue = valdict.get(value, value)
# If field is idgloss and if the value has changed
# Then change the filename on system and in glossvideo.videofile
if field == 'idgloss' and newvalue != old_idgloss:
try:
GlossVideo.rename_glosses_videos(gloss)
except (OSError, IOError):
# Catch error, but don't do anything for now.
return HttpResponseServerError(_("Error: Unable to change videofiles names."))
return HttpResponse(newvalue, content_type='text/plain')
else:
return HttpResponseNotAllowed(['POST'])
def update_keywords(gloss, field, value, language_code_2char):
"""Update the keyword field for the selected language"""
# Try to get the language object based on the language_code.
try:
language = Language.objects.get(language_code_2char=language_code_2char)
except Language.DoesNotExist:
# If the language_code does not exist in any Language.language_code_2char, return 400 Bad Request.
return HttpResponseBadRequest(_('A Language does not exist with language_code: ') + language_code_2char,
content_type='text/plain')
except Language.MultipleObjectsReturned:
# If multiple Languages exist with the same language_code_2char
return HttpResponseBadRequest(_('Multiple Languages with the same language_code exist, cannot edit because it '
'is unclear which languages translations to edit.'),
content_type='text/plain')
(glosstranslations, created) = GlossTranslations.objects.get_or_create(gloss=gloss, language=language)
glosstranslations.translations = value
glosstranslations.save()
# Save updated_by and updated_at field for Gloss
gloss.save()
return HttpResponse(value, content_type='text/plain')
def update_relation(gloss, field, value):
"""Update one of the relations for this gloss"""
(what, relid) = field.split('_')
what = what.replace('-', '_')
try:
rel = Relation.objects.get(id=relid)
except Relation.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Bad Relation ID"), relid, content_type='text/plain')
if not rel.source == gloss:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Relation doesn't match gloss"), content_type='text/plain')
if what == 'relationdelete':
print(("DELETE: ", rel))
rel.delete()
return HttpResponseRedirect(reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}))
elif what == 'relationrole':
# rel.role = value
try:
rel.role = FieldChoice.objects.get(machine_value=value)
except FieldChoice.DoesNotExist:
rel.role = value
rel.save()
# newvalue = rel.get_role_display()
newvalue = rel.role
elif what == 'relationtarget':
target = gloss_from_identifier(value)
if target:
rel.target = target
rel.save()
newvalue = str(target)
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Badly formed gloss identifier"), value,
content_type='text/plain')
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Unknown form field"), field, content_type='text/plain')
return HttpResponse(newvalue, content_type='text/plain')
def update_relationtoforeignsign(gloss, field, value):
"""Update one of the relations for this gloss"""
(what, relid) = field.split('_')
what = what.replace('-', '_')
try:
rel = RelationToForeignSign.objects.get(id=relid)
except RelationToForeignSign.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Bad RelationToForeignSign ID"), relid,
content_type='text/plain')
if not rel.gloss == gloss:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Relation doesn't match gloss"), content_type='text/plain')
if what == 'relationforeigndelete':
print(("DELETE: ", rel))
rel.delete()
return HttpResponseRedirect(reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}))
elif what == 'relationforeign_loan':
rel.loan = value == 'Yes'
rel.save()
elif what == 'relationforeign_other_lang':
rel.other_lang = value
rel.save()
elif what == 'relationforeign_other_lang_gloss':
rel.other_lang_gloss = value
rel.save()
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Unknown form field"), field, content_type='text/plain')
return HttpResponse(value, content_type='text/plain')
def gloss_from_identifier(value):
"""Given an id of the form idgloss (pk) return the
relevant gloss or None if none is found"""
# We need another way to add a Relation to a Gloss. One textfield can't serve all the possible ways of adding.
# One possible solution is to add two fields, one that serves adding by ID and other with Gloss name or name+id.
# However, no one is going to memorize or check for the id numbers and they will probably add with Gloss name only.
# Therefore the only useful implementation is to do it with the Gloss name only or with Glossname + id.
# TODO: Decide what to do here
"""
# See if 'value' is an int, should match if the user uses only an 'id' as a search string
try:
int(value)
is_int = True
except:
is_int = False
# If value is already int, try to match the int as IDGloss id.
if is_int:
try:
target = Gloss.objects.get(pk=int(value))
except ObjectDoesNotExist:
# If the int doesn't match anything, return
return HttpResponseBadRequest(_("Target gloss not found."), content_type='text/plain')
return target
# If 'value' is not int, then try to catch a string like "CAMEL (10)"
else:"""
# This regex looks from the Beginning of a string for IDGLOSS and then the id
# For example: "CAMEL (10)", idgloss="CAMEL" and pk=10
match = re.match('(.*) \((\d+)\)', value)
if match:
# print "MATCH: ", match
idgloss = match.group(1)
pk = match.group(2)
# print "INFO: ", idgloss, pk
# Try if target Gloss exists, if not, assign None to target, then it returns None
try:
target = Gloss.objects.get(pk=int(pk))
except ObjectDoesNotExist:
target = None
# print "TARGET: ", target
return target
# If regex doesn't match, return None
else:
return None
def add_relation(request):
"""Add a new relation instance"""
if request.method == "POST":
form = RelationForm(request.POST)
if form.is_valid():
role = form.cleaned_data['role']
sourceid = form.cleaned_data['sourceid']
targetid = form.cleaned_data['targetid']
try:
source = Gloss.objects.get(pk=int(sourceid))
except Gloss.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Source gloss not found."), content_type='text/plain')
target = gloss_from_identifier(targetid)
if target:
rel = Relation(source=source, target=target, role=role)
rel.save()
return HttpResponseRedirect(
reverse('dictionary:admin_gloss_view', kwargs={'pk': source.id}) + '?editrel')
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Target gloss not found."), content_type='text/plain')
else:
print(form)
# fallback to redirecting to the requesting page
return HttpResponseRedirect('/')
def add_relationtoforeignsign(request):
"""Add a new relationtoforeignsign instance"""
if request.method == "POST":
form = RelationToForeignSignForm(request.POST)
if form.is_valid():
sourceid = form.cleaned_data['sourceid']
loan = form.cleaned_data['loan']
other_lang = form.cleaned_data['other_lang']
other_lang_gloss = form.cleaned_data['other_lang_gloss']
try:
gloss = Gloss.objects.get(pk=int(sourceid))
except Gloss.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Source gloss not found."), content_type='text/plain')
rel = RelationToForeignSign(gloss=gloss, loan=loan, other_lang=other_lang,
other_lang_gloss=other_lang_gloss)
rel.save()
return HttpResponseRedirect(
reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}) + '?editrelforeign')
else:
print(form)
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Form not valid"), content_type='text/plain')
# fallback to redirecting to the requesting page
return HttpResponseRedirect('/')
def add_morphology_definition(request):
if request.method == "POST":
form = MorphologyForm(request.POST)
if form.is_valid():
parent_gloss = form.cleaned_data['parent_gloss_id']
role = form.cleaned_data['role']
morpheme_id = form.cleaned_data['morpheme_id']
morpheme = gloss_from_identifier(morpheme_id)
thisgloss = get_object_or_404(Gloss, pk=parent_gloss)
# create definition, default to not published
morphdef = MorphologyDefinition(
parent_gloss=thisgloss, role=role, morpheme=morpheme)
morphdef.save()
return HttpResponseRedirect(
reverse('dictionary:admin_gloss_view', kwargs={'pk': thisgloss.id}) + '?editmorphdef')
# Translators: Htt404
raise Http404(_('Incorrect request'))
def update_morphology_definition(gloss, field, value):
"""Update one of the relations for this gloss"""
(what, morph_def_id) = field.split('_')
what = what.replace('-', '_')
try:
morph_def = MorphologyDefinition.objects.get(id=morph_def_id)
except MorphologyDefinition.DoesNotExist:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Bad Morphology Definition ID"), morph_def_id,
content_type='text/plain')
if not morph_def.parent_gloss == gloss:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest(_("Morphology Definition doesn't match gloss"), content_type='text/plain')
if what == 'morphology_definition_delete':
print(("DELETE: ", morph_def))
morph_def.delete()
return HttpResponseRedirect(reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}))
elif what == 'morphology_definition_role':
# morph_def.role = value
morph_def.role = FieldChoice.objects.get(machine_value=value)
morph_def.save()
# newvalue = morph_def.get_role_display()
newvalue = morph_def.role.english_name
elif what == 'morphology_definition_morpheme':
morpheme = gloss_from_identifier(value)
if morpheme:
morph_def.morpheme = morpheme
morph_def.save()
newvalue = str(morpheme)
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Badly formed gloss identifier"), value,
content_type='text/plain')
else:
# Translators: HttpResponseBadRequest
return HttpResponseBadRequest("%s '%s'" % _("Unknown form field"), field, content_type='text/plain')
return HttpResponse(newvalue, content_type='text/plain')
@permission_required('dictionary.change_gloss')
def add_tag(request, glossid):
"""View to add a tag to a gloss"""
# default response
response = HttpResponse('invalid', content_type='text/plain')
if request.method == "POST":
gloss = get_object_or_404(Gloss, id=glossid)
if 'view_dataset' not in get_perms(request.user, gloss.dataset):
# If user has no permissions to dataset, raise PermissionDenied to show 403 template.
msg = _("You do not have permissions to add tags to glosses of this lexicon.")
messages.error(request, msg)
raise PermissionDenied(msg)
form = TagDeleteForm(request.POST)
if form.is_valid():
if form.cleaned_data['delete']:
tag = form.cleaned_data['tag']
# get the relevant TaggedItem
ti = get_object_or_404(
TaggedItem, object_id=gloss.id, tag__name=tag,
content_type=ContentType.objects.get_for_model(Gloss))
ti.delete()
response = HttpResponse(
'deleted', content_type='text/plain')
return response
form = TagUpdateForm(request.POST)
if form.is_valid():
tag = form.cleaned_data['tag']
# we need to wrap the tag name in quotes since it might contain spaces
Tag.objects.add_tag(gloss, '"%s"' % tag)
# response is new HTML for the tag list and form
response = render(request, 'dictionary/glosstags.html',
{'gloss': gloss, 'tagsaddform': TagsAddForm()})
else:
# If we are adding (multiple) tags, this form should validate.
form = TagsAddForm(request.POST)
if form.is_valid():
tags = form.cleaned_data['tags']
[Tag.objects.add_tag(gloss, str(x)) for x in tags]
response = render(request, 'dictionary/glosstags.html',
{'gloss': gloss, 'tagsaddform': TagsAddForm()})
return response
@login_required
@permission_required('dictionary.import_csv')
def import_gloss_csv(request):
"""
Check which objects exist and which not. Then show the user a list of glosses that will be added if user confirms.
Store the glosses to be added into sessions.
"""
glosses_new = []
glosses_exists = []
# Make sure that the session variables are flushed before using this view.
if 'dataset_id' in request.session: del request.session['dataset_id']
if 'glosses_new' in request.session: del request.session['glosses_new']
if request.method == 'POST':
form = CSVUploadForm(request.POST, request.FILES)
if form.is_valid():
dataset = form.cleaned_data['dataset']
if 'view_dataset' not in get_perms(request.user, dataset):
# If user has no permissions to dataset, raise PermissionDenied to show 403 template.
msg = _("You do not have permissions to import glosses to this lexicon.")
messages.error(request, msg)
raise PermissionDenied(msg)
try:
glossreader = csv.reader(codecs.iterdecode(form.cleaned_data['file'], 'utf-8'), delimiter=',', quotechar='"')
except csv.Error as e:
# Can't open file, remove session variables
if 'dataset_id' in request.session: del request.session['dataset_id']
if 'glosses_new' in request.session: del request.session['glosses_new']
# Set a message to be shown so that the user knows what is going on.
messages.add_message(request, messages.ERROR, _('Cannot open the file:' + str(e)))
return render(request, "dictionary/import_gloss_csv.html", {'import_csv_form': CSVUploadForm()}, )
else:
try:
for row in glossreader:
if glossreader.line_num == 1:
# Skip first line of CSV file.
continue
try:
# Find out if the gloss already exists, if it does add to list of glosses not to be added.
gloss = Gloss.objects.get(dataset=dataset, idgloss=row[0])
glosses_exists.append(gloss)
except Gloss.DoesNotExist:
# If gloss is not already in list, add glossdata to list of glosses to be added as a tuple.
if not any(row[0] in s for s in glosses_new):
glosses_new.append(tuple(row))
except IndexError:
# If row[0] does not exist, continue to next iteration of loop.
continue
except UnicodeDecodeError as e:
# File is not UTF-8 encoded.
messages.add_message(request, messages.ERROR, _('File must be UTF-8 encoded!'))
return render(request, "dictionary/import_gloss_csv.html", {'import_csv_form': CSVUploadForm()}, )
# Store dataset's id and the list of glosses to be added in session.
request.session['dataset_id'] = dataset.id
request.session['glosses_new'] = glosses_new
return render(request, "dictionary/import_gloss_csv_confirmation.html",
{#'import_csv_form': CSVUploadForm(),
'glosses_new': glosses_new,
'glosses_exists': glosses_exists,
'dataset': dataset,})
else:
# If form is not valid, set a error message and return to the original form.
messages.add_message(request, messages.ERROR, _('The provided CSV-file does not meet the requirements '
'or there is some other problem.'))
return render(request, "dictionary/import_gloss_csv.html", {'import_csv_form': form}, )
else:
# If request type is not POST, return to the original form.
csv_form = CSVUploadForm()
allowed_datasets = get_objects_for_user(request.user, 'dictionary.view_dataset')
# Make sure we only list datasets the user has permissions to.
csv_form.fields["dataset"].queryset = csv_form.fields["dataset"].queryset.filter(
id__in=[x.id for x in allowed_datasets])
return render(request, "dictionary/import_gloss_csv.html",
{'import_csv_form': csv_form}, )
@login_required
@permission_required('dictionary.import_csv')
def confirm_import_gloss_csv(request):
"""This view adds the data to database if the user confirms the action"""
if request.method == 'POST':
if 'cancel' in request.POST:
# If user cancels adding data, flush session variables
if 'dataset_id' in request.session: del request.session['dataset_id']
if 'glosses_new' in request.session: del request.session['glosses_new']
# Set a message to be shown so that the user knows what is going on.
messages.add_message(request, messages.WARNING, _('Cancelled adding CSV data.'))
return HttpResponseRedirect(reverse('dictionary:import_gloss_csv'))
elif 'confirm' in request.POST:
glosses_added = []
dataset = None
if 'glosses_new' and 'dataset_id' in request.session:
dataset = Dataset.objects.get(id=request.session['dataset_id'])
for gloss in request.session['glosses_new']:
# If the Gloss does not already exist, continue adding.
if not Gloss.objects.filter(dataset=dataset, idgloss=gloss[0]).exists():
try:
new_gloss = Gloss(dataset=dataset, idgloss=gloss[0], idgloss_en=gloss[1],
created_by=request.user, updated_by=request.user)
except IndexError:
# If we get IndexError, idgloss_en was probably not provided
new_gloss = Gloss(dataset=dataset, idgloss=gloss[0],
created_by=request.user, updated_by=request.user)
new_gloss.save()
glosses_added.append((new_gloss.idgloss, new_gloss.idgloss_en))
# Flush request.session['glosses_new'] and request.session['dataset']
del request.session['glosses_new']
del request.session['dataset_id']
# Set a message to be shown so that the user knows what is going on.
messages.add_message(request, messages.SUCCESS, _('Glosses were added succesfully.'))
return render(request, "dictionary/import_gloss_csv_confirmation.html", {'glosses_added': glosses_added,
'dataset': dataset.name})
else:
return HttpResponseRedirect(reverse('dictionary:import_gloss_csv'))
else:
# If request method is not POST, redirect to the import form
return HttpResponseRedirect(reverse('dictionary:import_gloss_csv'))
def gloss_relation(request):
"""Processes Gloss Relations"""
if request.method == "POST":
form = GlossRelationForm(request.POST)
if "delete" in form.data:
glossrelation = get_object_or_404(GlossRelation, id=int(form.data["delete"]))
if 'view_dataset' not in get_perms(request.user, glossrelation.source.dataset):
# If user has no permissions to dataset, raise PermissionDenied to show 403 template.
msg = _("You do not have permissions to delete relations from glosses of this lexicon.")
messages.error(request, msg)
raise PermissionDenied(msg)
ct = ContentType.objects.get_for_model(GlossRelation)
# Delete TaggedItems and the GlossRelation
TaggedItem.objects.filter(object_id=glossrelation.id, content_type=ct).delete()
glossrelation.delete()
if "HTTP_REFERER" in request.META:
return redirect(request.META["HTTP_REFERER"])
return redirect("/")
if form.is_valid():
source = get_object_or_404(Gloss, id=form.cleaned_data["source"])
if 'view_dataset' not in get_perms(request.user, source.dataset):
# If user has no permissions to dataset, raise PermissionDenied to show 403 template.
msg = _("You do not have permissions to add relations to glosses of this lexicon.")
messages.error(request, msg)
raise PermissionDenied(msg)
target = get_object_or_404(Gloss, id=form.cleaned_data["target"])
glossrelation = GlossRelation.objects.create(source=source, target=target)
if form.cleaned_data["tag"]:
Tag.objects.add_tag(glossrelation, form.cleaned_data["tag"].name)
if "HTTP_REFERER" in request.META:
return redirect(request.META["HTTP_REFERER"])
return redirect("/")
return HttpResponseBadRequest("Bad request.")
return HttpResponseForbidden()
| [
"henri.nieminen@gmail.com"
] | henri.nieminen@gmail.com |
bed9cb10f6453c7018cc7f08aefc7153fb29f8cd | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/ETFMM_K/YW_ETFMM_SZSJ_408_K.py | 1bed5c3ce3bd01583d66e471f4a5c2ac987176e2 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ETFMM_SZSJ_408_K(xtp_test_case):
# YW_ETFMM_SZSJ_408_K
def test_YW_ETFMM_SZSJ_408_K(self):
title = '深圳A股股票交易日五档即成转撤销委托卖-错误的业务类型'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11000370,
'errorMSG': queryOrderErrorMsg(11000370),
'是否生成报单': '否',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '14', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_IPOS'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price': stkparm['随机中间价'],
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
185cfb45dc8a5e5efeac3dda4eae303eb6ef80e2 | c275d4dff2777093de4680a462176765abc750a5 | /CODEFORCES/contest1094/B.py | 87aec63469662c8801500807fc3bd146c57878ef | [] | no_license | alex-grandson/tramvaichik | 185bf99ef6d00ac0f1dbea457b466ef36f9158fc | 5473b610c1f81f9adbecda14e8bd28807a54c2be | refs/heads/master | 2023-05-31T06:22:54.194898 | 2021-06-24T01:17:13 | 2021-06-24T01:17:13 | 301,530,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | def notContainNums(s):
for i in range(len(s)):
if s[i]>='0' and s[i]<='9':
return True
else:
return False
s = input()
i = 0
while (notContainNums(s)):
while (s[i] != ')'):
i+=1
while (s[i] != '('):
i-=1
s = s[0:i] + s[i+1:]
num = ""
sub = ""
while s[i]>='0' and s[i]<='9':
num += s[i]
s = s[0:i] + s[i+1:]
while s[i] != ')':
sub += s[i]
s = s[0:i] + s[i+1:]
s = s[0:i] + int(num)*sub + s[i+1:]
print(s)
| [
"70079537+alex-grandson@users.noreply.github.com"
] | 70079537+alex-grandson@users.noreply.github.com |
8c0f7766a2aeefb01138ea3f11ef51a3f0df4ec5 | 4d76813a0d3053e9125b83dff5a3087d78abff37 | /illiadTextGeneration/IlliadTextReformatted/illiadTest.py | ae08180c5a354b9b3991b2ad7382a1e863e260f4 | [] | no_license | EISCH08/MachineLearningFinalProject | 579df9be8663ef9b32e41e10f6dc4c6ffa471b31 | acdd46fbd46a69cbdb0f8a604f88c53f4cd0ea5a | refs/heads/master | 2020-05-14T18:50:08.486091 | 2019-05-02T16:17:56 | 2019-05-02T16:17:56 | 181,916,118 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | import sys
import numpy
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
#load ascii text and covert to lowercase
filename = "illiad.txt"
raw_text = open(filename).read()
raw_text = raw_text.lower()
raw_text = raw_text.replace(' ','')
raw_text = raw_text.replace(' ','')
raw_text = raw_text.replace('\n\n\n', '\n')
# create mapping of unique chars to integers, and a reverse mapping
chars = sorted(list(set(raw_text)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
# summarize the loaded data
n_chars = len(raw_text)
n_vocab = len(chars)
print ("Total Characters: ", n_chars)
print ("Total Vocab: ", n_vocab)
# prepare the dataset of input to output pairs encoded as integers
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print ("Total Patterns: ", n_patterns)
# reshape X to be [samples, time steps, features]
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
# normalize
X = X / float(n_vocab)
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# define the LSTM model
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
# load the network weights
filename = "weights-improvement-14-1.8881.hdf5"
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# pick a random seed
start = numpy.random.randint(0, len(dataX)-1)
pattern = dataX[start]
print ("Seed:")
print ("\"", ''.join([int_to_char[value] for value in pattern]), "\"")
# generate characters
for i in range(1000):
x = numpy.reshape(pattern, (1, len(pattern), 1))
x = x / float(n_vocab)
prediction = model.predict(x, verbose=0)
index = numpy.argmax(prediction)
result = int_to_char[index]
seq_in = [int_to_char[value] for value in pattern]
sys.stdout.write(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
print ("\nDone.")
| [
"parkereischen@gmail.com"
] | parkereischen@gmail.com |
ed3275f5cabb94e045549dc01a19953f8b9f9240 | c0cfff0edf2ce2aa11462ef6e2033b716c872055 | /request.py | 39acffbbbff3ab4d6367c38814a9298a75e96e45 | [] | no_license | zoujunqi/cmput404_lab1 | 566764c0c26088f172b9c2f4cc02a0d66596a6cd | 375af29ce7bf9be0ab17f52f53f9fcaf0cb7a8be | refs/heads/main | 2023-07-14T23:48:36.225677 | 2021-09-10T15:59:49 | 2021-09-10T15:59:49 | 405,043,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | import requests
response = requests.get("http://www.google.com/")
print(response)
my_url = 'https://github.com/zoujunqi/cmput404_lab1/blob/main/request.py'
content = requests.get(my_url)
print(content.text)
| [
"noreply@github.com"
] | zoujunqi.noreply@github.com |
600c9d08eb357a7891be01e4e360772022e0f549 | 032872dcb12d499e1a465bc28eb089d8d417b69e | /drone/Video.py | 95243f054384918a3867478b985ea8c0eceadcfc | [] | no_license | kmangame0/robot-drone-project | 4dca3482567043fe8fddfc2e02843c7b36a415ca | 690173a75eecbe9c0fff1885e4e04927de142914 | refs/heads/master | 2021-01-11T07:38:07.124198 | 2017-02-01T23:28:13 | 2017-02-01T23:28:13 | 69,899,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,601 | py | import time, sys
import ps_drone
from PIL import Image
class Video:
def __init__(self, *args, **kwargs):
self.drone = ps_drone.Drone()
drone = self.drone
drone.startup()
drone.reset()
#time.sleep(20)
while (drone.getBattery()[0] == -1): time.sleep(0.1)
print "Battery: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1])
drone.useDemoMode(False)
drone.setConfigAllID()
drone.setMConfig("video:video_channel","1")
drone.setMConfig("video:video_codec","128")
drone.setMConfig("video:bitrate",5000)
drone.setMConfig("video:codec_fps",5)
CDC = drone.ConfigDataCount
while CDC == drone.ConfigDataCount: time.sleep(0.0001)
drone.startVideo()
drone.showVideo()
self.IMC = drone.VideoImageCount
self.stop = False
self.ground = True
self.takeOff = False
self.processingImage = False
self.mostGreen = 1
self.previousGreen = 0
self.previousTally = 0
self.currentTally = 0
self.videoProcessing()
self.startProcessing = False
self.lostGreen = False
self.red = 0
def videoProcessing(self):
self.previousGreen = 0
self.red= 0
drone = self.drone
drone.setSpeed(0.1)
## drone.getNDpackage(["demo"])
## firstYaw = drone.NavData["demo"][2][2]
xtime = time.localtime()
xSeconds = xtime[5]
while not self.stop:
self.IMC = drone.VideoImageCount
if self.processingImage == False:
self.processingImage = True
try:
self.mostGreen = 0
self.previousTally = 0
self.currentTally = 0
img = Image.open('img.png')
if self.takeOff == False:
self.takeOff = True
time.sleep(10)
print "Taking off"
drone.takeoff()
time.sleep(10)
print "Hovering"
drone.hover()
time.sleep(1)
self.processingImage = False
print "Done Calibrating"
## drone.getNDpackage(["demo"])
## secondYaw = drone.NavData["demo"][2][2]
## firstYaw = firstYaw + secondYaw
self.processingImage = False
else:
## if abs(secondYaw-firstYaw) > 3.5:
## drone.getNDpackage(["demo"])
## secondYaw = drone.NavData["demo"][2][2]
## if secondYaw > firstYaw:
## drone.turnAngle(-2,0.25)
## else:
## drone.turnAngle(2,0.25)
## self.processingImage = False
#Top Left
for x in range (0,53):
for y in range (0,30):
r, g, b = img.getpixel((x,y))
if r <155 and g < 30 and b < 70:
self.red = self.red + 1
self.currentTally = self.currentTally + 1
if self.currentTally > self.previousTally:
self.mostGreen = 1
self.previousTally = self.currentTally
#Top Center
self.currentTally = 0
for x in range (54,106):
for y in range (0,30):
r, g, b= img.getpixel((x,y))
if r <155 and g < 30 and b < 70:
self.red = self.red + 1
self.currentTally = self.currentTally + 1
if self.currentTally > self.previousTally:
self.mostGreen = 2
self.previousTally = self.currentTally
#Top Right
self.currentTally = 0
for x in range (107,159):
for y in range (0,30):
r, g, b= img.getpixel((x,y))
if r <155 and g < 30 and b < 70:
self.red = self.red + 1
self.currentTally = self.currentTally + 1
if self.currentTally > self.previousTally:
self.mostGreen = 3
self.previousTally = self.currentTally
#Left
self.currentTally = 0
for x in range (0,53):
for y in range (30,60):
r, g, b= img.getpixel((x,y))
if r <155 and g < 30 and b < 70:
self.red = self.red + 1
self.currentTally = self.currentTally + 1
if self.currentTally > self.previousTally:
self.mostGreen = 4
self.previousTally = self.currentTally
#Center
self.currentTally = 0
for x in range (54,106):
for y in range (30,60):
r, g, b= img.getpixel((x,y))
if r <155 and g < 30 and b < 70:
self.red = self.red + 1
self.currentTally = self.currentTally + 1
if self.currentTally > self.previousTally:
self.mostGreen = 5
self.previousTally = self.currentTally
#Right
self.currentTally = 0
for x in range (107,159):
for y in range (30,60):
r, g, b= img.getpixel((x,y))
if r <155 and g < 30 and b < 70:
self.red = self.red + 1
self.currentTally = self.currentTally + 1
if self.currentTally > self.previousTally:
self.mostGreen = 6
self.previousTally = self.currentTally
#Bottom Left
self.currentTally = 0
for x in range (0,53):
for y in range (60,90):
r, g, b= img.getpixel((x,y))
if r <155 and g < 30 and b < 70:
self.red = self.red + 1
self.currentTally = self.currentTally + 1
if self.currentTally > self.previousTally:
self.mostGreen = 7
self.previousTally = self.currentTally
#Bottom Center
self.currentTally = 0
for x in range (54,106):
for y in range (60,90):
r, g, b= img.getpixel((x,y))
if r <155 and g < 30 and b < 70:
self.red = self.red + 1
self.currentTally = self.currentTally + 1
if self.currentTally > self.previousTally:
self.mostGreen = 8
self.previousTally = self.currentTally
#Bottom Right
self.currentTally = 0
for x in range (107,159):
for y in range (60,90):
r, g, b= img.getpixel((x,y))
if r <155 and g < 30 and b < 70:
self.red = self.red + 1
self.currentTally = self.currentTally + 1
if self.currentTally > self.previousTally:
self.mostGreen = 9
self.previousTally = self.currentTally
print "Most Red Quadrant: " + str(self.mostGreen)
if self.red > 500:
drone.land()
self.stop = True
exit(9)
if self.mostGreen == 0:
print "Lost Green"
self.mostGreen = self.previousGreen
if self.previousGreen == 1:
self.mostGreen = 9
elif self.previousGreen == 2:
self.mostGreen = 8
elif self.previousGreen == 3:
self.mostGreen = 7
elif self.previousGreen == 4:
self.mostGreen = 6
elif self.previousGreen == 5:
self.mostGreen = 5
elif self.previousGreen == 6:
self.mostGreen = 4
elif self.previousGreen == 7:
self.mostGreen = 3
elif self.previousGreen == 8:
self.mostGreen = 2
elif self.previousGreen == 9:
self.mostGreen = 1
if self.mostGreen == 1:
print "Forward, Left"
drone.moveForward(0.1)
time.sleep(0.1)
drone.stop()
#time.sleep(1)
drone.moveLeft(0.1)
time.sleep(0.1)
drone.stop()
#time.sleep(1)
if self.mostGreen == 2:
print "Forward"
drone.moveForward(0.1)
time.sleep(0.1)
drone.stop()
#time.sleep(1)
if self.mostGreen == 3:
print "Forward, Right"
drone.moveForward(0.1)
time.sleep(0.1)
drone.stop()
#time.sleep(1)
drone.moveRight(0.1)
time.sleep(0.1)
drone.stop()
#time.sleep(1)
if self.mostGreen == 4:
print "Left"
drone.moveLeft(0.1)
time.sleep(0.1)
drone.stop()
#time.sleep(1)
if self.mostGreen == 5:
#if alt < 90:
drone.land()
#time.sleep(1)
self.processingImage = True
self.stop = True
if self.mostGreen == 6:
print "Right"
drone.moveRight(0.1)
time.sleep(0.1)
drone.stop()
#time.sleep(1)
if self.mostGreen == 7:
print "Back, Left"
drone.moveBackward(0.1)
time.sleep(0.1)
drone.stop()
#time.sleep(1)
drone.moveLeft()
time.sleep(0.1)
drone.stop()
#time.sleep(1)
if self.mostGreen == 8:
print "Back"
drone.moveBackward(0.1)
time.sleep(0.1)
drone.stop()
#time.sleep(1)
if self.mostGreen == 9:
print "Back, Right"
drone.moveBackward(0.1)
time.sleep(0.1)
drone.stop()
#time.sleep(1)
drone.moveRight()
time.sleep(0.1)
drone.stop()
#time.sleep(1)
self.processingImage = False
self.red = 0
if self.mostGreen != 0:
self.previousGreen = self.mostGreen
except:
#print "Error"
self.processingImage = False
continue
Video()
| [
"kmangame0@gmail.com"
] | kmangame0@gmail.com |
86adaa2fb11a63616355c40d88ee9dc6fb6f5168 | 90dd10908aa90fcbc01163f7b49a82f87d6348fb | /singleton.py | f832e9772246de346e2f3e76e5f2fee50a1247ae | [] | no_license | joobih/design_module | 4de179120b8f65d3e987e6f0d29140f248547638 | ae293cf70e1362d45dbf55b7764e3a1c7dae18ea | refs/heads/master | 2021-01-10T09:59:00.547843 | 2016-03-25T04:35:39 | 2016-03-25T04:35:39 | 54,694,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | #encoding=utf-8
from functools import wraps
def singleton(cls):
instance = {}
def _singleton(*args, **kw):
if cls not in instance:
instance[cls] = cls(*args, **kw)
return instance
return _singleton
| [
"1006536507@qq.com"
] | 1006536507@qq.com |
353a00ea245bd48d5edb282337afca376150a842 | 3e5d58d6bf6316aaf44bba6ee2f047fcdc6a2e17 | /leetcode/candies/a.py | 0e5ffe8de6a11ec0e4451979b06afab10ff5b728 | [] | no_license | shunwuyu/lesson_shuidi | 87fa9c36d4aa6005dd7243d4c52e2e06bc9b7e9d | abf6c10d70725c616d9dcb230b4a88d54f29e38e | refs/heads/master | 2023-01-10T08:41:08.611479 | 2020-09-26T01:26:05 | 2020-09-26T01:26:05 | 216,329,814 | 2 | 0 | null | 2023-01-05T09:09:27 | 2019-10-20T08:22:54 | JavaScript | UTF-8 | Python | false | false | 234 | py | from typing import List
class Solution:
def distributeCandies(self, candies: List[int]) -> int:
return min(len(set(candies)), len(candies) >> 1)
x = Solution()
print("最大的种类数为", x.distributeCandies([1,1,2,2,3,3])) | [
"shunwu2001@163.com"
] | shunwu2001@163.com |
9c1513fc38a50fa093602c41444c8be32727345d | f47fe8a7d8cd87b3bfa2e172b4a9fc93e3a4abc2 | /2015/AST1/vezbovni/David/habl.py | 1b759c2b4f14d8433cbff56e9e18ac1feecb3585 | [] | no_license | ispastlibrary/Titan | a4a7e4bb56544d28b884a336db488488e81402e0 | f60e5c6dc43876415b36ad76ab0322a1f709b14d | refs/heads/master | 2021-01-17T19:23:32.839966 | 2016-06-03T13:47:44 | 2016-06-03T13:47:44 | 60,350,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | import numpy as np
#import matplot.pyplot as plt
d, V, wi, deltav = np.loadtxt(habl.txt, unpac=True)
sum_wi = np.sum(wi)
sum_wy = np.sum()
sum_wx = np.sum()
sum_wxy = np.sum(wi*d*V)
sum_wx2 = no.sum(wi*d*d)
b = (sum_wxy * sum_wi -sum_wy * sum_wx) / (sum_wi * sum_wx2 - (sum_wx)**2)
print(b)
| [
"ispast.library@gmail.com"
] | ispast.library@gmail.com |
f64eb836bd3aef01d84eee7433095c5a87b3fe6a | 93ba8a5eb99b7c59a2a6c324671156f62545bbd9 | /lib/forward/models/telnet/bandwidthConfig.py | f8ecbd940508ad24fe966c2d5c1afb1766496262 | [] | no_license | YuanDdQiao/forward | c0b724b23ac7b8d249eff5bc4e9110b19a216247 | 2e701879e48aaa1b7801fa9507dc2a9292edb5b9 | refs/heads/master | 2021-06-04T23:53:03.634663 | 2016-10-10T08:15:15 | 2016-10-10T08:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,891 | py | #!/usr/bin/evn python
#coding:utf-8
""" It applies only to models of network equipment mx960
See the detailed comments C6506.py
"""
import sys,re,os
from forward.models_utils.forwardError import ForwardError
from forward.models_utils.deviceListSplit import DEVICELIST
class Bandwidth(object):
def __init__(self,shell = '', ip = '',bandwidth = ''):
device = DEVICELIST(['112.33.0.0-112.33.10.255','221.176.53.0-221.176.54.255']) # mobile cloud IP
self.ipRange = device.getIpList()
self.targetIP = ip # bind ip
self.shell = shell # device shell
self.bandwidth = bandwidth # bind bandwidth
self.resolvTermAndIP()
self.getTerm()
self.checkBandwidthCommand="""show configuration firewall family inet prefix-action Action-Name-{bandwidth}M""".format(bandwidth=self.bandwidth) # wheather check bandwidth exist
self.checkIPAndBandwidthCommand1="""show configuration firewall filter Policer-For-Each-Address term Filter-Term-Name-{ip} then prefix-action""".format(ip=self.ip)
def getTerm(self):
# Get IP terminal number
for term in self.termAndIP.keys():
if self.ip in self.termAndIP[term]: # find it
self.termNumber=term
break
def resolvTermAndIP(self):
term0=DEVICELIST(["221.176.53.0-221.176.53.255"])
term1=DEVICELIST(["221.176.54.0-221.176.54.255"])
term3=DEVICELIST(["112.33.0.0-112.33.0.255"])
term4=DEVICELIST(["112.33.1.0-112.33.1.255"])
term5=DEVICELIST(["112.33.2.0-112.33.2.255"])
term6=DEVICELIST(["112.33.3.0-112.33.3.255"])
term7=DEVICELIST(["112.33.4.0-112.33.4.255"])
term8=DEVICELIST(["112.33.5.0-112.33.5.255"])
term9=DEVICELIST(["112.33.6.0-112.33.6.255"])
term10=DEVICELIST(["112.33.7.0-112.33.7.255"])
term11=DEVICELIST(["112.33.8.0-112.33.8.255"])
term12=DEVICELIST(["112.33.9.0-112.33.9.255"])
term13=DEVICELIST(["112.33.10.0-112.33.10.255"])
self.termAndIP = {
"0":term0.getIpList(),
"1":term1.getIpList(),
"3":term3.getIpList(),
"4":term4.getIpList(),
"5":term5.getIpList(),
"6":term6.getIpList(),
"7":term7.getIpList(),
"8":term8.getIpList(),
"9":term9.getIpList(),
"10":term10.getIpList(),
"11":term11.getIpList(),
"12":term12.getIpList(),
"13":term13.getIpList(),
}
def ipStatus(self):
# To judge whether the IP is legal
njInfo={
"content":"",
"errLog":"The specify ip is not valid",
"status":False
}
if self.targetIP in self.ipRange:
self.getTerm() # self.termNumber
njInfo["status"]=True
return njInfo
def createBandwidth(self):
modeData = self.shell._configMode()
if modeData["status"]:
# switch to config mode success
self.shell.execute("set firewall family inet prefix-action Action-Name-{bandwidth}M policer Policer-CIDC-T-BW-11100000-{bandwidth}M".format(bandwidth=self.bandwidth))
self.shell.execute("set firewall family inet prefix-action Action-Name-{bandwidth}M filter-specific")
self.shell.execute("set firewall family inet prefix-action Action-Name-{bandwidth}M subnet-prefix-length 24")
self.shell.execute("set firewall family inet prefix-action Action-Name-{bandwidth}M source-prefix-length 32")
self.shell.execute("set firewall policer Policer-CIDC-T-BW-11100000-{bandwidth}M if-exceeding bandwidth-limit {bandwidth}m".format(bandwidth=self.bandwidth))
self.shell.execute("set firewall policer Policer-CIDC-T-BW-11100000-{bandwidth}M if-exceeding burst-size-limit 512k".format(bandwidth=self.bandwidth))
self.shell.execute("set firewall policer Policer-CIDC-T-BW-11100000-{bandwidth}M then discard".format(bandwidth=self.bandwidth))
# commit
commitData = self._commit()
if commitData["status"]:
# commit success
data = self._exitConfigMode() # exit config mode
if not data["status"]:
# exit failed
raise ForwardError(data["errLog"])
# Check whether bandwidth to create success
data = self.bandwidthConfigExist()
if not data["status"]:
# create bandwidth ,but failed
data["errLog"] = "create bandwidth {bandwidth}M is failed".format(bandwidth=self.bandwidth)
else:
pass
# create bandwid is successed.
else:
# commit failed
data = commitData
else:
# failed
data = modeData
return data
def bandwidthConfigExist(self):
# wheather check bandwidth config exist
njInfo = {
"content":"",
"errLog":"The specify bandwidth config is not exist.",
"status":False
}
result = self.shell.execute(self.checkBandwidthCommand)
if result["status"]:
content = result["content"]
if re.search("""policer Policer\-CIDC\-T\-BW\-[0-9]{3,}\-%sM;"""% (self.bandwidth) ,content):
njInfo["status"] = True
# bandwidth config is exist
else:
# bandwidth config is not exist
njInfo["status"] = False
else:
raise ForwardError(result["errLog"])
return njInfo
def ipAndBandwidthExist(self):
njInfo = {
"content":"",
"errLog":"",
"status":False
}
try:
data = self.shell.execute(self.checkIPAndBandwidthCommand) # check it
if data["status"]:
if re.search("""prefix\-action Action\-Name\-%sM""" % self.bandwidth,data["content"]):
# exist bind
# get term num
# check again
data = self.shell.execute("""show configuration firewall filter Policer-For-Each-Address term {term} | match {ip}/32""".format(term=self.termNumber,ip=self.ip))
if data["status"]:
if re.search("%s/32 except" % (self.ip),data["content"] ):
# success
njInfo["status"] = True
else:
raise ForwardError("not exist bind")
else:
raise ForwardError(data["errLog"])
njInfo["status"] = True
else:
raise ForwardError("not exist bind")
# not exist bind
else:
raise FowardError(data["errLog"])
except Exception,e:
njInfo["status"] = False
njInfo["errLog"] = str(e)
return njInfo
def bindIPAndBandwidth(self):
njInfo={
"content":"",
"errLog":"",
"status":False
}
try:
data = self._configMode()
if data["status"]:
# switch to config mode successed
self.shell.execute("""set firewall filter Policer-For-Each-Address term Filter-Term-Name-{ip} from address {ip}/32""".format(ip=self.ip))
self.shell.execute("""set firewall filter Policer-For-Each-Address term Filter-Term-Name-{ip} then forwarding-class queue_2""".format(ip=slef.ip))
self.shell.execute("""set firewall filter Policer-For-Each-Address term Filter-Term-Name-{ip} then accept""".format(ip=self.ip))
self.shell.execute("""set firewall filter Policer-For-Each-Address term Filter-Term-Name-{ip} then prefix-action Action-Name-{bandwidth}M""".format(bandwidth=self.bandwidth,ip=self.ip))
self.shell.execute("""set firewall filter Policer-For-Each-Address term {term} from address {ip}/32 except""".format(ip=self.ip,term=self.termNumber))
self.shell.execute("""insert firewall filter Policer-For-Each-Address term Filter-Term-Name-{ip} before term {term}""".format(term=self.termNumber,ip=self.ip))
# commit
data = self._commit()
if data["status"]:
# commit success
data = self._exitConfigMode()
if data["status"]:
data = self.ipAndBandwidthExist() # check it
if data["status"]:
njInfo["status"] = True
# bind successed
else:
raise ForwardError("bind ip and bandwidth failed,Error:",data["errLog"])
else:
raise ForwardError(data["errLog"])
else:
raise ForwardError(data["errLog"])
else:
raise ForwardError(data["errLog"])
except Exception,e:
njInfo["status"] = False
njInfo["errLog"] = str(e)
return njInfo
def modifyIPAndBandwidth(self):
njInfo={
"content":"",
"errLog":"",
"status":False
}
try:
data=self.shell._configMode()
if data["status"]:
# switch to config mode successed
self.shell.execute("""set firewall filter Policer-For-Each-Address term Filter-Term-Name-{ip} then prefix-action Action-Name-{bandwidth}M""".format(bandwidth=self.bandwidth,ip=self.ip))
data = self._commit() # commit
if data["status"]:
data = self._exitConfigMode()
if not data["status"]:
# exit failed
raise ForwardError(data["errLog"])
else:
# Check whether the binding is successful
data = self.ipAndBandwidthExist()
if data["status"]:
# modify successed
njInfo["status"] = True
else:
raise ForwardError("Modify the binding failed")
else:
# commit failed
raise ForwardError(data["errLog"])
else:
raise ForwardError(data["errLog"])
except Exception,e:
njInfo["status"] = False
njInfo["errLog"] = str(e)
return njInfo
def bindBandwidth(self):
njInfo={
"content":"",
"errLog":"",
"status":False
}
try:
ipIsTrue=self.ipStatus()
if ipIsTrue["status"]:
# ip is invalid
if self.bandwidthConfigExist()["status"]:
# bandwidth config is exist
pass
else:
# bandwidth config is not exist,then create it
data = self.createBandwidth()
if data["status"]:
# create bandwidth is successed
# check ip and bandwidth is not bind
data = self.ipAndBandwidthExist()
if data["status"]:
# Have binding, should modify it
data = self.modifyIPAndBandwidth()
if data["status"]:
# successed
njInfo["status"] = True
else:
raise ForwardError(data["errLog"])
else:
# Have not binding, chould create it
data = self.bindIPAndBandwidth()
if data["status"]:
# successed
njInfo["status"] = True
else:
raise ForwardError(data["errLog"])
else:
# failed
raise ForwardError(data["errLog"])
else:
# ip is not valid
raise ForwardError(ipIsTrue["errLog"])
except Exception,e:
njInfo["status"] = False
njInfo["errLog"] = str(e)
return njInfo
def deleteBindIPAndBandwidth(self):
njInfo={
"content":"",
"errLog":"",
"status":False
}
try:
ipIsTrue = self.ipStatus()
if ipIsTrue["status"]:
data = self._configMode()
if data["status"]:
self.shell.execute("""delete firewall filter Policer-For-Each-Address term Filter-Term-Name-{ip}""".format(ip=self.ip))
self.shell.execute("""delete firewall filter Policer-For-Each-Address term {term} from address {ip}/32""".format(ip=self.ip,term=self.termNumber))
data = self._commit() # commit
if data["status"]:
# commit succcessed
data = self._exitConfigMode()
if data["status"]:
njInfo["status"] = True
else:
raise ForwardError(data["errLog"]) # failed.
else:
raise ForwardError(data["errLog"]) # failed.
else:
raise ForwardError(data["errLog"]) # switch to config mode failed.
else:
raise ForwardError("The specify ip is not valid.")
except Exception,e:
njInfo["status"] = False
njInfo["errLog"] = str(e)
return njInfo
| [
"leannmak@139.com"
] | leannmak@139.com |
f31dc60f9d3b228389bf28bd150e6776ddfe7cc1 | c2be187155aabf59a4c0d3f5065bc26239c0b827 | /special_crawler/extract_statelinetack_data.py | 309d78b9da4250c853051dd1e955893f8a217962 | [] | no_license | dankCodeNugs/tmtext | 1d6c54f79399bfa5e6f3905c0f72ba0be59d8d0d | 8e2d834775f440def7f57294674b8109b46ee191 | refs/heads/master | 2023-03-16T08:49:38.456929 | 2016-12-20T19:45:54 | 2016-12-20T19:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,954 | py | #!/usr/bin/python
import urllib
import re
import sys
import json
from lxml import html, etree
import time
import requests
from extract_data import Scraper
class StateLineTackScraper(Scraper):
'''
NOTES :
no_image examples:
http://www.statelinetack.com/item/horseware-pony-liner-200g/E012435/
'''
##########################################
############### PREP
##########################################
# holds a data from an external request for loading
bazaar = None
INVALID_URL_MESSAGE = "Expected URL format is http://www.statelinetack.com/item/<product-name>/<product-id>/"
def check_url_format(self):
"""Checks product URL format for this scraper instance is valid.
Returns:
True if valid, False otherwise
"""
#m = re.match("^http://www.amazon.com/dp/[a-zA-Z0-9]+$", self.product_page_url)
m = re.match(r"^http://www.statelinetack.com/.*?$", self.product_page_url)
return not not m
##########################################
############### CONTAINER : NONE
##########################################
def _url(self):
return self.product_page_url
def _event(self):
return None
def _product_id(self):
product_id = self.tree_html.xpath('//input[@id="ctl00_ctl00_CenterContentArea_MainContent_HidBaseNo"]/@value')[0]
return product_id
def _site_id(self):
return None
def _status(self):
return "success"
##########################################
############### CONTAINER : PRODUCT_INFO
##########################################
def _product_name(self):
a = self.tree_html.xpath('//*[@itemprop="name"]/text()')[0]
if a is not None and len(a)>3:
return a
return self._product_title()
def _product_title(self):
return self.tree_html.xpath("//meta[@property='og:title']/@content")[0]
def _title_seo(self):
return self.tree_html.xpath("//title//text()")[0].strip()
def _model(self):
return None
def _upc(self):
return None
def _features(self):
desc, feat = self._feature_helper()
return feat
def _feature_count(self):
desc, feat = self._feature_helper()
return len(feat)
def _feature_helper(self):
tree = self.tree_html
tree = str(etree.tostring(tree))
print re.findall(r'\s*<strong>\s*(.*)\s*</strong>\s*', tree)# take care of some crazy spacing issues
tree = re.sub(r'\s*<strong>\s*(.*)\s*</strong>\s*', r'\1', tree)
tree = re.sub(r'\n', '', tree)
tree = html.fromstring(tree)
full_description = [x.strip() for x in tree.xpath('//div[@id="ItemPageProductSummaryBoxMain"]//div[@class="GreyBoxMiddle"]//text()') if len(x.strip())>0]
full_description = [x for x in full_description if len(x)>3]
feat_index = [i for i in range(len(full_description)) if re.findall(r'^.{0,10}(F|f)eatures.{0,4}$', full_description[i])]
spec_index = [i for i in range(len(full_description)) if re.findall(r'^.{0,10}(S|s)pecifications.{0,4}$', full_description[i])]
if len(feat_index)>0:
feat_index = feat_index[0]
else:
feat_index = 0
if len(spec_index)>0:
spec_index = spec_index[0]
else:
spec_index = None
if spec_index>0:
feat = full_description[feat_index+1:spec_index]
else:
feat = full_description[feat_index+1:]
if feat_index>0:
desc = full_description[0:feat_index]
else:
desc = full_description[0]
if isinstance(desc, str) or isinstance(desc, unicode):
temp = []
temp.append(desc)
desc = temp
return desc, feat
def _model_meta(self):
return None
def _description(self):
# description = ([x.strip() for x in self.tree_html.xpath('//div[@id="ItemPageProductSummaryBoxMain"]//div[@class="GreyBoxMiddle"]//text()') if len(x.strip())>0])
# for row in range(0,6):
# if len(description[row]) > 3:#to avoid the heading "product description"
# return description[row]
# return None
desc, feat = self._feature_helper()
return ' '.join(desc)
def _long_description(self):
return None
##########################################
############### CONTAINER : PAGE_ATTRIBUTES
##########################################
def _no_image(self):
return None
def _mobile_image_same(self):
return None
def _image_urls(self):
#metaimg comes from meta tag
#metaimg = self.tree_html.xpath('//meta[@property="og:image"]/@content')
#imgurl comes from the carousel
imageurl = self.tree_html.xpath('//img[@class="swatch"]/@src')
if(len(imageurl) == 0):
imageurl = self.tree_html.xpath('//meta[@property="og:image"]/@content')
return imageurl
def _image_count(self):
imgurls = self._image_urls()
return len(imgurls)
def _video_urls(self):
#"url":"http://ecx.images-amazon.com/images/I/B1d2rrt0oJS.mp4"
video_url = self.tree_html.xpath('//script[@type="text/javascript"]')
temp = []
for v in video_url:
r = re.findall("[\'\"]url[\'\"]:[\'\"](http://.+?\.mp4)[\'\"]", str(v.xpath('.//text()')))
if r:
temp.extend(r)
return temp
def _video_count(self):
return len(self._video_urls())
def _pdf_urls(self):
moreinfo = self.tree_html.xpath('//div[@class="ItemPageDownloadableResources"]//div//a/@href')
pdfurl = []
print '\n\n'
for a in moreinfo:
p = re.findall(r'(.*\.pdf)', a)
pdfurl.extend(p)
baseurl = 'http://www.statelinetack.com/'
pdfurl = [baseurl + x[1:] for x in pdfurl]
return pdfurl
def _pdf_count(self):
return len(self._pdf_urls())
def _webcollage(self):
return None
def _htags_from_tree(self):
htags_dict = {}
# add h1 tags text to the list corresponding to the "h1" key in the dict
htags_dict["h1"] = map(lambda t: self._clean_text(t), self.tree_html.xpath("//h1//text()[normalize-space()!='']"))
# add h2 tags text to the list corresponding to the "h2" key in the dict
htags_dict["h2"] = map(lambda t: self._clean_text(t), self.tree_html.xpath("//h2//text()[normalize-space()!='']"))
return htags_dict
def _keywords(self):
return None
##########################################
############### CONTAINER : REVIEWS
##########################################
#bazaar for ratings
def get_bazaar(self):
if self.bazaar != None:
return self.bazaar
else:
url = 'http://tabcomstatelinetack.ugc.bazaarvoice.com/3421-en_us/%s/reviews.djs?format=embeddedhtml'
url = url % (self._product_id())
contents = urllib.urlopen(url).read()
# tree = re.findall(r'var materials=(\{.*?\}.*\})', contents)[0]
# tree = re.sub(r'\\(.)', r'\1', tree)
# tree = re.findall(r'(\<.*\>)', tree)[0]
# tree = html.fromstring(contents)
return contents
#extract average review, and total reviews
def _average_review(self):
bazaar = self.get_bazaar()
# avg = bazaar.xpath('//*[contains(@class, "BVRRRatingNumber")]//text()')
# avg = re.findall(r'<span class=\\"BVRRNumber BVRRRatingRangeNumber\\">(.*?)<\\/span>', bazaar)
avg = re.findall(r'<span class=\\"BVRRNumber BVRRRatingNumber\\">([0-9.]*?)<\\/span>', bazaar)
return avg[0]
def _review_count(self):
bazaar = self.get_bazaar()
# num = bazaar.xpath('//*[contains(@class, "BVRRRatingRangeNumber")]//text()')
num = re.findall(r'\<span class\=\\"BVRRNumber\\"\>([0-9]*?)\<\\/span\> review', bazaar)
return num[0]
def _max_review(self):
return None
def _min_review(self):
return None
##########################################
############### CONTAINER : SELLERS
##########################################
def _price(self):
price = self.tree_html.xpath("//span[@id='lowPrice']//text()")
if price:
return price[0].strip()
return None
def _in_stores_only(self):
return None
def _in_stores(self):
return None
def _owned(self):
return 1
def _owned_out_of_stock(self):
return None
def _marketplace(self):
return 0
def _marketplace_sellers(self):
return None
def _marketplace_lowest_price(self):
return None
##########################################
############### CONTAINER : SELLERS
##########################################
def _category_name(self):
all = self._categories()
all = map(lambda t: self._clean_text(t), all)
return all[-1]
def _categories(self):
all = self.tree_html.xpath('//div[@id="ItemPageBreadCrumb"]//a/text()')
return all
def _brand(self):
return None
#########################################
################ HELPER FUNCTIONS
##########################################
# clean text inside html tags - remove html entities, trim spaces
def _clean_text(self, text):
return re.sub(" ", " ", text).strip()
##########################################
################ RETURN TYPES
##########################################
# dictionaries mapping type of info to be extracted to the method that does it
# also used to define types of data that can be requested to the REST service
DATA_TYPES = { \
# CONTAINER : NONE
"url" : _url, \
"event" : _event, \
"product_id" : _product_id, \
"site_id" : _site_id, \
"status" : _status, \
# CONTAINER : PRODUCT_INFO
"product_name" : _product_name, \
"product_title" : _product_title, \
"title_seo" : _title_seo, \
"model" : _model, \
"upc" : _upc,\
"features" : _features, \
"feature_count" : _feature_count, \
"model_meta" : _model_meta, \
"description" : _description, \
"long_description" : _long_description, \
# CONTAINER : PAGE_ATTRIBUTES
"image_urls" : _image_urls, \
"image_count" : _image_count, \
"video_urls" : _video_urls, \
"video_count" : _video_count, \
"pdf_urls" : _pdf_urls, \
"pdf_count" : _pdf_count, \
"webcollage" : _webcollage, \
"htags" : _htags_from_tree, \
"keywords" : _keywords, \
# CONTAINER : REVIEWS
"average_review" : _average_review, \
"review_count" : _review_count, \
"max_review" : _max_review, \
"min_review" : _min_review, \
# CONTAINER : SELLERS
"price" : _price, \
"in_stores_only" : _in_stores_only, \
"in_stores" : _in_stores, \
"owned" : _owned, \
"owned_out_of_stock" : _owned_out_of_stock, \
"marketplace": _marketplace, \
"marketplace_sellers" : _marketplace_sellers, \
"marketplace_lowest_price" : _marketplace_lowest_price, \
# CONTAINER : CLASSIFICATION
"categories" : _categories, \
"category_name" : _category_name, \
"brand" : _brand, \
"loaded_in_seconds": None \
}
# special data that can't be extracted from the product page
# associated methods return already built dictionary containing the data
DATA_TYPES_SPECIAL = { \
"mobile_image_same" : _mobile_image_same, \
"no_image" : _no_image,\
}
# def _anchors_from_tree(self):
# description_node = self.tree_html.xpath('//div[contains(@class, "GreyBoxMiddle")]/div/span/span/span/div[3]')[0]
# links = description_node.xpath(".//a")
# nr_links = len(links)
# links_dicts = []
# for link in links:
# links_dicts.append({"href" : link.xpath("@href")[0], "text" : link.xpath("text()")[0]})
# ret = {"quantity" : nr_links, "links" : links_dicts}
# return ret
# def _seller_meta_from_tree(self):
# return self.tree_html.xpath("//meta[@itemprop='brand']/@content")[0]
# def _meta_description(self):
# return self.tree_html.xpath("//meta[@name='Description']/@content")[0]
# def _meta_keywords(self):
# return self.tree_html.xpath("//meta[@name='Keywords']/@content")[0]
# def main(args):
# # check if there is an argument
# if len(args) <= 1:
# sys.stderr.write("ERROR: No product URL provided.\nUsage:\n\tpython crawler_service.py <amazon_product_url>\n")
# sys.exit(1)
# product_page_url = args[1]
# # check format of page url
# if not check_url_format(product_page_url):
# sys.stderr.write(INVALID_URL_MESSAGE)
# sys.exit(1)
# return json.dumps(product_info(sys.argv[1], ["name", "short_desc", "keywords", "price", "load_time", "anchors", "long_desc"]))
| [
"life.long.learner127@outlook.com"
] | life.long.learner127@outlook.com |
71b438fcd454a5cd1822d284cd3c0f0883860aa6 | c35d3146adde1203c0a8c65aeeffc9c62f718be6 | /day1/day1_inputs.py | 0bb4b62f87eda94074a35f35d5d5fa182b5c539c | [] | no_license | wkozlowski-tyro/advent-of-code-2019 | 84b44c4e0a3e450d7e1ddf79330a60414fcd29ea | 9cf556dbe80df496bd0dc7c0aae484590e0b3085 | refs/heads/master | 2020-09-26T08:39:59.588902 | 2019-12-06T02:14:21 | 2019-12-06T02:14:21 | 226,219,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | import array as arr
input_1_1 = [
141589,
93261,
104320,
81961,
99212,
80661,
78734,
76783,
148694,
114382,
141508,
114659,
107687,
83845,
79690,
59366,
133984,
121431,
144033,
60628,
112095,
78560,
142103,
128943,
109209,
108999,
144208,
113134,
76591,
57098,
127233,
143194,
85736,
128733,
132275,
128871,
115164,
50617,
138648,
73023,
98822,
63572,
102841,
54817,
123579,
113025,
90063,
112330,
117131,
87661,
147299,
146812,
102343,
58763,
59569,
135997,
146057,
108574,
70215,
74304,
93988,
128150,
76391,
110718,
135513,
62057,
72921,
76889,
67794,
79041,
71987,
148584,
145472,
131139,
78569,
62584,
85610,
106800,
128550,
81694,
105892,
91250,
69465,
115222,
73511,
75887,
74891,
127555,
131553,
140892,
69685,
108927,
105759,
105884,
112178,
109708,
116894,
63459,
133853,
111303]
input_1_2 = [
141589,
93261,
104320,
81961,
99212,
80661,
78734,
76783,
148694,
114382,
141508,
114659,
107687,
83845,
79690,
59366,
133984,
121431,
144033,
60628,
112095,
78560,
142103,
128943,
109209,
108999,
144208,
113134,
76591,
57098,
127233,
143194,
85736,
128733,
132275,
128871,
115164,
50617,
138648,
73023,
98822,
63572,
102841,
54817,
123579,
113025,
90063,
112330,
117131,
87661,
147299,
146812,
102343,
58763,
59569,
135997,
146057,
108574,
70215,
74304,
93988,
128150,
76391,
110718,
135513,
62057,
72921,
76889,
67794,
79041,
71987,
148584,
145472,
131139,
78569,
62584,
85610,
106800,
128550,
81694,
105892,
91250,
69465,
115222,
73511,
75887,
74891,
127555,
131553,
140892,
69685,
108927,
105759,
105884,
112178,
109708,
116894,
63459,
133853,
111303,
]
| [
"wkozlowski@tyro.com"
] | wkozlowski@tyro.com |
ee7a8a7db2f0651090d1c74b6fcceff173cc4622 | fafa4f8c0b0418d0c57aca81fe87787d92ebe3ae | /bbt/mailldr1off.py | f5ee6d07a68ee6fabff6196968344d2502c921ee | [] | no_license | cahyoyunan/AndroidToPi-ServerSide | 5eb75b8eed97a824b87086bae63e3efb69a43aec | ab9909414ac661ad1ca5ac3747039ff77e1d4596 | refs/heads/main | 2023-04-02T01:54:44.357337 | 2021-03-22T16:22:26 | 2021-03-22T16:22:26 | 350,403,964 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | import smtplib
fromaddr = 'raspiandrotopi@gmail.com'
toaddrs = 'cahyoadianto10@gmail.com'
msg = 'Lampu 2 telah mati!'
# Credentials (if needed)
username = 'raspiandrotopi'
password = 'Skyeats4airplane'
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username,password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
| [
"noreply@github.com"
] | cahyoyunan.noreply@github.com |
6994ac553be2ac40dc26effd7c6d5fdc2ce1a567 | d053395f300a59955d3c5c640f6e1e9ae41a102b | /tutorial/spiders/cnet.py | 415e9de3a845ff18411864aad4c768eb76e86f76 | [] | no_license | fubuki/Scrapy-News | 240cb42cee08b66794e278e320009d75d52dcd0c | 1101740d92aea5a5672db71fe5328af560327292 | refs/heads/master | 2016-09-06T12:02:12.504487 | 2014-04-08T14:56:57 | 2014-04-08T14:56:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | # coding: utf-8
from datetime import datetime
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from tutorial.items import NewsItem
class CNetSpider(CrawlSpider):
name = 'cnet'
allowed_domains = ['news.cnet.com']
start_urls = [
'http://news.cnet.com/8324-12_3-0.html',
]
rules = [
# 正規表現 'begin=201312' にマッチするリンクを辿る
Rule(SgmlLinkExtractor(allow=(r'begin=201312', ), restrict_xpaths=('/html', ))),
# 正規表現 '/[\d_-]+/[^/]+/$' にマッチするリンクをparse_newsメソッドでパースする
Rule(SgmlLinkExtractor(allow=(r'/[\d_-]+/[^/]+/$', ), restrict_xpaths=('/html', )),
callback='parse_news'),
]
def parse_news(self, response):
item = NewsItem()
sel = Selector(response)
item['title'] = sel.xpath('//h1/text()').extract()[0]
item['body'] = u'\n'.join(
u''.join(p.xpath('.//text()').extract()) for p in sel.css('#contentBody .postBody p'))
item['time'] = datetime.strptime(
sel.xpath('//time[@class="datestamp"]/text()').extract()[0].strip()[:-4],
u'%B %d, %Y %I:%M %p')
yield item | [
"fubuki@users.noreply.github.com"
] | fubuki@users.noreply.github.com |
af6cb40a0ba749adff2529d4f3d19a168d1267ff | 216fb9641dfe46c2e8c0628423cc001fb06af387 | /get_QApairs/lang_divide/src/bayes_model.py | 688a36b249de71dbfe62ca99f4d9ff55b889b4dc | [] | no_license | CBVon/corpus-processing | 8989f46d2fdb3d21b2a82db789ad4857f3803db5 | c787ef63f4fd1e9ff6a8c5c26daab923fcba91a8 | refs/heads/master | 2021-09-23T14:49:57.824164 | 2018-09-24T21:08:49 | 2018-09-24T21:08:49 | 131,257,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,344 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This script is used for bayes model
# Author yannnli, Mail wangzehui@sogou-inc.com
import random
from collections import defaultdict
from basicFunc import get_dicts_count,_letter_unis,createOrderedWordModel
from loadFunc import normalizeProb,lang_probs,grammaps
from conf.config import bayes_settings
from conf.config import settings
verbose = settings['VERBOSE']
UNKNOWN = settings['UNKNOWN']
#This func is the whole bayes model
def language_check_bayes():
#1. check the probability of the login word in the sentence
dict_freq = get_dicts_count(text)
if dict_freq < bayes_settings['DICT_FREQ_LOW']:
return UNKNOWN
#2. letter unis get and combineds with the former unis
letter_unis = _letter_unis(text)
if verbose:
print text.encode('utf-8')+"'s letter unis is"
print letter_unis
print text.encode('utf-8')+"'s word unis is"
print seq_unis
print
unis = list(set(unis).intersection(set(letter_unis)))
#3. original result get
original_freq = word_sequence_check(text,unis)
if verbose:
print text.encode('utf-8')+"'s original freq is"
print original_freq
#4. sequence unis get
seq_unis = _word_unis(text)
if len(seq_unis)==0:
return original_freq
#5. mix sequence result and original result
total_dict = {}
word_unis_freq = word_sequence_check(text,seq_unis)
if isinstance(original_freq,list):
for i in range(0,len(original_freq)):
total_dict[original_freq[i][0]]=original_freq[i][1]*(1-bayes_settings['WORD_UNIS_FREQ'])
if word_unis_freq==UNKNOWN and original_freq==UNKNOWN:
return UNKNOWN
for word_pair in word_unis_freq:
word_lang = word_pair[0]
word_freq = word_pair[1]
if total_dict.has_key(word_lang):
total_dict[word_lang]+=WORD_UNIS_FREQ*word_freq
else:
total_dict[word_lang] = WORD_UNIS_FREQ*word_freq
if verbose:
print text.encode('utf-8')+"'s new freq is"
print word_unis_freq
return sorted(total_dict.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
#This func is used to go through all the text items and return
def word_sequence_check(text,unis):
#1. get the ngram model from the text
alpha = bayes_settings["ALPHA_DEFAULT"]
model = createOrderedWordModel(text)
if verbose:
print text.encode('utf-8') + "'s model ngrams are:"
for model_r in model:
print model_r.encode('utf-8'),
#2. init the language prob
langprob = defaultdict(float)
for lang_name in unis:
langprob[lang_name]=0.0
uni_count = 0
for uni in unis:
if lang_probs.has_key(uni):
uni_count += 1
for uni in unis:
if lang_probs.has_key(uni):
# langprob[uni] = 1.0/float(uni_count) /average pooling
langprob[uni] = lang_probs.get(uni)
langprob,max_prob_t = normalizeProb(langprob)
if verbose:
print "inital prob is "
print langprob
#3. init the variants
alpha = alpha + random.normalvariate(0,1)*bayes_settings['ALPHA_WIDTH']
count = 0
hit_count = 0
last_champion = ''
#4. calculate the prob
for model_r in model:
if not grammaps.has_key(model_r):
continue
if model_r == ' ':
continue
langprob = update_prob(langprob,model_r,alpha)
count += 1
if count % 5 == 1:
langprob,maxprob = normalizeProb(langprob)
if maxprob > bayes_settings['CONV_THRESHOLD'] or count >= ['ITERATION_LIMIT']:
if count >= bayes_settings['ITERATION_LIMIT']:
break
for lang_pos in langprob.keys():
if langprob.get(lang_pos)==maxprob:
champion = lang_pos
break
if champion == last_champion:
hit_count += 1
if hit_count == 2:
break
else:
last_champion = champion
hit_count = 0
langprob,maxprob = normalizeProb(langprob)
#5. output the result
if maxprob<0.1:
return UNKNOWN
langs = sorted(langprob.keys(), key=lambda k: (-langprob[k], k))
if verbose:
print text.encode('utf-8') +"'s possible languages are: "
for lang_pos in langs:
if langprob.get(lang_pos)<0.1:
break
print lang_pos+"======"+str(langprob.get(lang_pos))
max_prob = langprob.get(langs[0])
return_list = {}
for lang_pos in langs:
lang_prob = langprob.get(lang_pos)
if lang_prob/max_prob > bayes_settings['OUTPUT_THRESHOLD']:
return_list[lang_pos] = lang_prob
return_list = sorted(return_list.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
return return_list
#this func is used to random choose word from the word items,
#until prob has been calculated certain times or the result comes up to some criteria
def word_check(text,unis):
#1. get the word model from the text
alpha = bayes_settings["ALPHA"]
model = createOrderedWordModel(text)
#2. init the lang probability
langprob = defaultdict(float)
for lang_name in unis:
langprob[lang_name]=0.0
#3. calculate the result
for index in range(0,bayes_settings['n_trail']):
# prob_tmp = init_prob(unis)
prob_tmp = {}
uni_count = 0
for uni in unis:
if lang_probs.has_key(uni):
uni_count += 1
for uni in unis:
if lang_probs.has_key(uni):
prob_tmp[uni] = 1.0/float(uni_count)
alpha = alpha + random.normalvariate(0,1)*bayes_settings['ALPHA_WIDTH']
count = 0
hit_count = 0
last_champion = ''
while True:
r = random.randint(0,len(model)-1)
if grammaps.has_key(model[r]):
if model[r]==' ':
continue
prob_tmp = update_prob(prob_tmp,model[r],alpha)
count = count + 1
if count%5==1:
prob_tmp,maxprob = normalizeProb(prob_tmp)
if maxprob>bayes_settings['CONV_THRESHOLD'] or count >=bayes_settings['ITERATION_LIMIT']:
if count >= bayes_settings['ITERATION_LIMIT']:
break
for lang_tmp in prob_tmp.keys():
if prob_tmp.get(lang_tmp)==maxprob:
champion = lang_tmp
break
if champion == last_champion:
hit_count += 1
if hit_count == 2:
break
else:
last_champion = champion
hit_count = 0
for uni in langprob.keys():
if not prob_tmp.has_key(uni):
continue
langprob[uni]+=prob_tmp[uni]/bayes_settings['n_trail']
langprob,maxprob = normalizeProb(langprob)
# output the result
if max(langprob.values())<0.1:
return UNKNOWN
langs = sorted(langprob.keys(), key=lambda k: (-langprob[k], k))
if verbose:
print text.encode('utf-8') +"'s possible languages are: "
for lang_pos in langs:
if langprob.get(lang_pos)<0.1:
break
print lang_pos+"======"+str(langprob.get(lang_pos))
return langs[0]
#this func is used to update the probability
def update_prob(prob,gram,alpha):
weight = alpha/bayes_settings['BASE_FREQ']
langprobmap = grammaps[gram]
'''if verbose:
print "gram "+gram.encode('utf-8')+"'s unis are:"
count = 0
langprob_sorted = sorted(langprobmap.iteritems(), key=lambda d:d[1], reverse = True)
print langprob_sorted[0:5]'''
for uni in prob.keys():
if not langprobmap.has_key(uni):
prob[uni]*=weight
continue
prob[uni]*=weight+langprobmap[uni]
return prob
#this func is used to init the prob
def init_prob(unis):
initprob = {}
total_prob = 0.0
for uni in unis:
if not lang_probs.has_key(uni):
continue
initprob[uni]=lang_probs[uni]
total_prob += initprob[uni]
for uni in initprob.keys():
initprob[uni] = initprob[uni]/total_prob
return initprob
| [
"798487179@qq.com"
] | 798487179@qq.com |
5f128fb68243663e883d88fa1d5b7a5eb8f2793b | c66cdcf6564febc2af61560c9ad09cc63ab6b1c0 | /Imaging/FaceDetection.py | 8efbb61d42a2ac2c33744ad43de32bd573b71ac8 | [] | no_license | FarooqueAhmeed/WebScraping_Projects | a3a3bcd54232b60967080f1bc5c844e31409ddfd | 7b80da96bf8fbb1de76a65fba0b9d125d91fe189 | refs/heads/master | 2022-11-24T09:08:38.251062 | 2020-06-06T14:08:52 | 2020-06-06T14:08:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('boris.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"zizo11@gmail.com"
] | zizo11@gmail.com |
e94ae2714701ae992267301df804111eefdcb1b2 | 9439f5955cd6cc23fe8768b04420955b10fe218d | /Code/Regression.py | 35c3778c5fafb17fbe8d0b3862f2f2768c05175c | [] | no_license | kundanks/VC_Fintech_Research | 57fd968b3b289f06057e7ebda1cf3184b8ab8061 | ec65e660a9e51af426bc627b8bb09f018c5f8dba | refs/heads/master | 2022-10-13T12:29:08.549038 | 2020-06-14T00:26:25 | 2020-06-14T00:26:25 | 266,226,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | import pandas as pd
df = pd.read_csv('output.csv')
df1 = df.drop(columns=['Unnamed: 0'])
df1 = df1.loc[df1['Fintech'] == True]
df1 = df1[df1['Company MSA'].isin(['San Francisco, CA','San Jose, CA','Boston, MA-NH','New York, NY','Chicago, IL','San Diego, CA','Los Angeles-Long Beach, CA','Washington, DC-MD-VA-WV','Seattle-Bellevue-Everett, WA','Dallas, TX'])]
df2 = df1.groupby(['Company MSA','Year'])['CPI Adjusted Round Amount'].sum()
df1.shape
df1['Round Date'] = pd.to_datetime(df1['Round Date'])
df1['Year'] = df1['Round Date'].dt.year
df1 = df.drop(columns=['Unnamed: 0', 'First', 'Common'])
df1.loc[(df1['Fintech'] == True), 'Industry'] = 'Fintech'
df2.to_csv("regression.csv") | [
"noreply@github.com"
] | kundanks.noreply@github.com |
66a44724d5179391df3bc13557b79879dea668bb | 0cb968140d83adfd6dffc8532f1e69d317acde50 | /src/rabbitmq/consumer/VerifyDocuementConsumer.py | c8a4cfc64948d38ca01f157ea95d566e2cf07acd | [] | no_license | SkaaRJik/neat-data-preprocessing | 61573e7b1917b4f0436bc04572b88b5774defa3c | b571d2b6d2ff2f8ad9d57968369f01952d5a2baf | refs/heads/master | 2023-06-19T23:54:37.529928 | 2021-07-14T03:27:47 | 2021-07-14T03:27:47 | 350,046,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,172 | py | import json
import logging.config
import os
from pandas import DataFrame
from pika.exchange_type import ExchangeType
from src.config.RabbitMQConfig import RabbitMQConfig
from src.processing.verification.dataset_verification import DatasetVerification
from src.processing.verification.dataset_verification_pandas import DatasetVerificationPandas
from src.rabbitmq.consumer.Consumer import Consumer
from src.samba.SambaWorker import SambaWorker
logging.config.fileConfig('../resources/logging.conf')
LOGGER = logging.getLogger('exampleApp')
class VerifyDocumentConsumer(Consumer):
def __init__(self, rabbit_mq_config: RabbitMQConfig, queue: str, routing_key: str,
samba_worker: SambaWorker,
exchange: str = "", exchange_type: ExchangeType = ExchangeType.direct):
self._dataset_verification: DatasetVerification = DatasetVerificationPandas()
self._samba_worker = samba_worker
super().__init__(rabbit_mq_config, queue, routing_key, exchange, exchange_type)
def on_message(self, _unused_channel, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel _unused_channel: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param bytes body: The message body
"""
LOGGER.info('Received message # %s from %s: %s',
basic_deliver.delivery_tag, properties.app_id, body)
decoded_body: dict = json.loads(body)
project_id = decoded_body.get("projectId")
project_folder = decoded_body.get("projectFolder")
file_path: str = decoded_body.get("filePath")
username: str = decoded_body.get("username")
only_filename_without_extension = self.eject_filename(file_path)
try:
dataframe_to_save: DataFrame = None
file = self._samba_worker.download(file_path, 'ver-{0}-{1}'.format(username, only_filename_without_extension))
legend_error_protocol, legend_info_protocol, legend_inc, legend_values, headers_error_protocol, legend_header, \
data_headers, values_error_protocol, values_info_protocol, rows, dataframe_to_save = self._dataset_verification.verify_excel(
file)
file.close()
os.remove(file.name)
temp_filename = '/tmp/{0}.csv'.format(project_id)
path_to_save = f'{project_folder}/ver-{only_filename_without_extension}.csv'
dataframe_to_save.to_csv(temp_filename, sep=';', index=False)
self._samba_worker.upload(path_to_save=path_to_save, file=temp_filename)
os.remove(temp_filename)
errors = self.pack_error_protocols(legend_error_protocol=legend_error_protocol,
headers_error_protocol=headers_error_protocol,
values_error_protocol=values_error_protocol)
verification_protocol = {
"projectId": project_id,
"errors": errors,
"info": self.pack_info_protocols(legend_info_protocol=legend_info_protocol,
values_info_protocol=values_info_protocol),
"verifiedFile": path_to_save,
"legend": {
"header": legend_header,
"data": legend_values,
"increment": legend_inc
},
"rows": rows,
"logIsAllowed": None if errors is not None else (True if dataframe_to_save.values.min() > 0 else False),
"headers": data_headers,
"status": 'VERIFIED' if errors is None else 'VERIFICATION_ERROR'
}
except BaseException as ex:
LOGGER.error('verification: username - {0}, filename - {1}'.format(username, file_path))
LOGGER.exception(ex)
verification_protocol = {
"projectId": project_id,
"errors": None,
"info": None,
"verifiedFile": None,
"legend": None,
"logIsAllowed": False,
"headers": None,
"rows": None,
"status": 'VERIFICATION_SERVICE_ERROR'
}
encoded_body = json.dumps(verification_protocol, default=self.np_encoder)
queue_config = self._rabbit_mq_config.OUTPUT_VERIFICATION_RESULT_CONFIG
self._rabbit_mq_writer.writeMessage(exchange=queue_config.get("exchange"),
routing_key=queue_config.get("routingKey"),
message=encoded_body)
self.acknowledge_message(basic_deliver.delivery_tag)
@staticmethod
def pack_error_protocols(legend_error_protocol, headers_error_protocol, values_error_protocol) -> dict:
is_all_protocols_empty = (len(legend_error_protocol) == 0) and (len(headers_error_protocol) == 0) and len(
values_error_protocol) == 0
if is_all_protocols_empty:
return None
else:
return {
"legend_errors": legend_error_protocol,
"header_errors": headers_error_protocol,
"values_errors": values_error_protocol,
}
@staticmethod
def pack_info_protocols(legend_info_protocol, values_info_protocol) -> dict:
is_all_protocols_empty = (len(legend_info_protocol) == 0) and (len(values_info_protocol) == 0)
if is_all_protocols_empty:
return None
else:
return {
"legend_info": legend_info_protocol,
"values_info": values_info_protocol,
}
| [
"fva@sibdigital.net"
] | fva@sibdigital.net |
d1b458086f30749db526c9bcb5ec38a67ced3e2a | 743c066a0ae6741b6b43ed00fc20ff9a45c5b296 | /website/smart_outlet_site/manage.py | dc0e4c7212adb650360c14a9c9530fa67b4fb1c6 | [] | no_license | anwarsgithub/smartoutlet | 291d3fe4150981910f4ce144a8c7d530a63ce32b | a111899603732138852be35afde3acbf3546be0c | refs/heads/master | 2020-04-22T12:21:33.623420 | 2019-02-12T22:52:26 | 2019-02-12T22:52:26 | 170,368,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'smart_outlet_site.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"lguti068@fiu.edu"
] | lguti068@fiu.edu |
c4df7c63423c3a49db936b9f39c74a73fd7c1a08 | eb9b4a6b439699c8a46a6c6b88fd7472f002cbfc | /euler44.py | 043ae0b9fe64dcf191f03659c64c935021834b44 | [] | no_license | klq/euler_project | f1a8fdbb905321dcc295d8a3d2b31dd635825b7b | 1a33427765e19728357bec8a3f2fa1ef88668a14 | refs/heads/master | 2020-12-24T14:53:53.118922 | 2015-01-09T08:18:07 | 2015-01-09T08:18:07 | 18,789,886 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | def euler44():
"""https://projecteuler.net/problem=44
Pentagon numbers
"""
# get all pentagon numbers
all_pentagon_nums = []
for n in range(1,5000):
all_pentagon_nums.append( n * (3*n-1) / 2)
diff = []
for i in range(0,2500):
for j in range(j+1,2500):
a = all_pentagon_nums[i]
b = all_pentagon_nums[j]
if b-a in all_pentagon_nums and b+a in all_pentagon_nums:
diff.append(b-a)
print min(diff)
# Answer is:
# 5482660
def generate_pentagonals(x):
for i in xrange(1, x+1):
yield i * (3 * i - 1) / 2
def euler44_better_solution():
pentagonals =set(generate_pentagonals(2500))
print min({x-y for x in pentagonals for y in pentagonals if x + y in pentagonals and x - y in pentagonals})
def main():
euler44()
if __name__ == '__main__':
main() | [
"konglq@gmail.com"
] | konglq@gmail.com |
d92d6cad775474a83071c020f2ff3f72b2737c34 | a1777e8950fb347aae9c59fb5f73ce5534bb21ef | /env/bin/flask | fe28bca3d1c2496dde8b9cc76d7641a92f69229b | [] | no_license | cjmsousa/fourier-drawing | 7d54fb4275d9d0d55b34dbef633b728878eedccc | f2639878bff55b6cd81b330e27e4370bfbd17ded | refs/heads/master | 2023-03-13T03:08:22.528789 | 2021-02-28T19:28:43 | 2021-02-28T19:28:43 | 211,674,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/home/cjmsousa/Projects/fourier-drawing/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"cjmsousa@gmail.com"
] | cjmsousa@gmail.com | |
71f095e6308f3f4c5975cdf5b89e717e20981ad7 | 542e87f06d259e3f17941ba6d18575b86c1dd417 | /test/functional/p2p_unrequested_blocks.py | b8d383d19c1aa82655e38b543e5e09ac31eefa06 | [
"MIT"
] | permissive | bitcoinmoneydigital/source | 04d08b8a53a6244edc368d50b31f73b5ac410595 | 41417256cb00f34df6ccfa708a61866443b6c1d0 | refs/heads/master | 2020-07-04T21:01:45.756061 | 2019-08-14T19:48:40 | 2019-08-14T19:48:40 | 202,415,926 | 3 | 4 | MIT | 2020-03-11T17:36:44 | 2019-08-14T19:47:42 | C++ | UTF-8 | Python | false | false | 14,693 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOINMONEYD", "bitcoinmoneyd"),
help="bitcoinmoneyd binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].nVersion = 0x20000000
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_h1f.nVersion = 0x20000000
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_h2f.nVersion = 0x20000000
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.nVersion = 0x20000000
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.nVersion = 0x20000000
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.nVersion = 0x20000000
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.nVersion = 0x20000000
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
block_291.nVersion = 0x20000000
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.nVersion = 0x20000000
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.nVersion = 0x20000000
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| [
"bitcoinmoneydigital@gmail.com"
] | bitcoinmoneydigital@gmail.com |
b991c00150762c1c24dd25c04370d5218658ef18 | e3982f8d03e731bf0aeaf5e3afa3471a9fa4f469 | /levelorder.py | b36b14b82a676e93c939142c6d850af04e01df06 | [] | no_license | poojamalviya/ds-algo | 428e5ca5a48411ed31bc577d5f72a873f91b571d | 180897f40f569a3c624a576d76daf21a8cd0cd53 | refs/heads/main | 2023-02-19T01:34:49.310420 | 2021-01-20T18:59:18 | 2021-01-20T18:59:18 | 331,399,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def levelOder(root):
if root is None:
return
queue = []
res, loc =[], []
queue.append(root)
queue.append('-')
while len(queue) > 0:
curr = queue.pop(0)
if curr != '-':
loc.append(curr.data)
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
if curr == '-':
res.append(loc)
loc = []
if queue:
queue.append('-')
print res
# 1
# 2 3
# 4 5 6 7
myTree = Node(1)
myTree.left= Node(2)
myTree.right= Node(3)
myTree.left.left= Node(4)
myTree.left.right = Node(5)
myTree.right.left= Node(6)
myTree.right.right = Node(7)
levelOder(myTree)
# def levelOder(root):
# if root == None:
# return
# else:
# queue = []
# queue.append(root)
# while len(queue)>0:
# curr = queue.pop(0)
# print (curr.data)
# if curr.left:
# queue.append(curr.left)
# if curr.right:
# queue.append(curr.right) | [
"pooja.malviya@happay.in"
] | pooja.malviya@happay.in |
616e98b6f2f3eb3a4e3c93b1edbbc1edbf65ef3e | 075cdb245b59df047017690fe1ab4057020a1998 | /scripts/CNN.2.py | 8a961aac9cb985dd184772e4b2a973b003f6c731 | [] | no_license | iallabs/Data_Preprocessing_ML | ff0ac70a345ac4c28f7e69b6eba3d4bf54c74c59 | f76413541b9129bff177a882da03723076ad479e | refs/heads/master | 2020-03-12T23:28:18.628676 | 2018-05-27T23:41:05 | 2018-05-27T23:41:05 | 130,866,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,055 | py | from image_pro.tfRecord.decode import tfrec_data_input_fn, tfrec_data_catvdog
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from image_pro.parameters import *
num_classes = 2
num_step = 1500
num_epochs = 125
learning_rate = 0.009
beta = 0.0004
def create_placeholders(n_H0, n_W0, n_C0, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_H0 -- scalar, height of an input image
n_W0 -- scalar, width of an input image
n_C0 -- scalar, number of channels of the input
n_y -- scalar, number of classes
Returns:
X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype "float"
Y -- placeholder for the input labels, of shape [None, n_y] and dtype "float"
"""
X = tf.placeholder(tf.float32,shape=[None,n_H0, n_W0, n_C0])
Y = tf.placeholder(tf.float32,shape=[None, n_y])
return X, Y
def initialize_parameters():
"""
Initializes weight parameters to build a neural network with tensorflow. The shapes are:
Returns:
parameters -- a dictionary of tensors containing W1, W2
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
W1 = tf.get_variable("W1", [5,5,1,4], initializer=tf.contrib.layers.xavier_initializer(seed=0))
W2 = tf.get_variable("W2", [3,3,4,8], initializer=tf.contrib.layers.xavier_initializer(seed=0))
W3 = tf.get_variable("W3", [5,5,8,12], initializer=tf.contrib.layers.xavier_initializer(seed=0))
W4 = tf.get_variable("W4", [3,3,12,16], initializer=tf.contrib.layers.xavier_initializer(seed=0))
W5 = tf.get_variable("W5", [5,5,16,24], initializer=tf.contrib.layers.xavier_initializer(seed=0))
parameters = {"W1": W1,
"W2": W2,
"W3": W3,
"W4": W4,
"W5": W5
}
return parameters
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "W2"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
W2 = parameters['W2']
W3 = parameters['W3']
W4 = parameters['W4']
W5 = parameters['W5']
# CONV2D: stride of 1, padding 'SAME'
Z1 = tf.nn.conv2d(X,W1, strides = [1,1,1,1], padding = 'SAME')
# RELU
A1 = tf.nn.relu(Z1)
# MAXPOOL: window 4x4, sride 1, padding 'SAME'
P1 = tf.nn.max_pool(A1, ksize = [1,4,4,1], strides = [1,1,1,1], padding = 'SAME')
# CONV2D: filters W2, stride 1, padding 'SAME'
Z2 = tf.nn.conv2d(P1, W2, strides=[1,1,1,1], padding='SAME')
# RELU
A2 = tf.nn.relu(Z2)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P2 = tf.nn.max_pool(A2, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME')
# CONV2D: filters W2, stride 1, padding 'SAME'
Z3 = tf.nn.conv2d(P2, W3, strides=[1,1,1,1], padding='SAME')
# RELU
A3 = tf.nn.relu(Z3)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P3 = tf.nn.max_pool(A3, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME')
# CONV2D: filters W4, stride 1, padding 'SAME'
Z4 = tf.nn.conv2d(P3, W4, strides=[1,1,1,1], padding='SAME')
# RELU
A4 = tf.nn.relu(Z4)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P4 = tf.nn.max_pool(A4, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME')
# CONV2D: filters W4, stride 1, padding 'SAME'
Z5 = tf.nn.conv2d(P4, W5, strides=[1,1,1,1], padding='SAME')
# RELU
A5 = tf.nn.relu(Z5)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P5 = tf.nn.max_pool(A5, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME')
# FLATTEN
P5 = tf.contrib.layers.flatten(P5)
# FULLY-CONNECTED without non-linear activation function (not not call softmax).
# 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
Z6 = tf.contrib.layers.fully_connected(P5, num_classes, activation_fn=None)
return Z6
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y))
return cost
iterator = tfrec_data_input_fn("tftrain.tfrecords", num_epochs=num_epochs)
alp = iterator()
"""
Implements a three-layer ConvNet in Tensorflow:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X_train -- training set, of shape (None, 256, 256, 1)
Y_train -- test set, of shape (None, n_y = 2)
X_test -- training set, of shape (None, 256, 256, 1)
Y_test -- test set, of shape (None, n_y = 2)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
train_accuracy -- real number, accuracy on the train set (X_train)
test_accuracy -- real number, testing accuracy on the test set (X_test)
parameters -- parameters learnt by the model. They can then be used to predict.
"""
# Create Placeholders of the correct shape
X, Y = create_placeholders(T_HEIGHT, T_WIDTH, T_Channnels, num_classes)
# Initialize parameters
parameters = initialize_parameters()
# Forward propagation: Build the forward propagation in the tensorflow graph
Z3 = forward_propagation(X, parameters)
# Cost function: Add cost function to tensorflow graph
cost = compute_cost(Z3, Y)
regularizers = tf.nn.l2_loss(parameters["W1"]) + tf.nn.l2_loss(parameters["W2"]) + \
tf.nn.l2_loss(parameters["W3"]) + tf.nn.l2_loss(parameters["W4"]) + \
tf.nn.l2_loss(parameters["W5"])
cost = tf.reduce_mean(cost+beta*regularizers)
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Calculate the correct predictions
predict_op = tf.argmax(Z3, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Initialize all the variables globally
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
costs = []
acc_train = []
# Run the initialization
sess.run(init)
sess.run(alp.initializer)
images, label = alp.get_next()
# Do the training loop
for epoch in range(num_epochs):
train_accuracy = 0.
minibatch_cost = 0.
# Select a minibatch
for i in range(num_step):
image, label_train = sess.run([images['image'], label])
img = image.reshape(16, T_HEIGHT,T_WIDTH,T_Channnels)
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).
_ , temp_cost = sess.run([optimizer, cost], feed_dict={X: img, Y: label_train})
temp_acc = accuracy.eval({X:img, Y: label_train})
train_accuracy += temp_acc/num_step
minibatch_cost += temp_cost / num_step
# Print the cost every epoch
if epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
if epoch % 1 == 0:
costs.append(minibatch_cost)
acc_train.append(train_accuracy)
print ("train after epoch %i: %f" % (epoch, train_accuracy))
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
iterator_test = tfrec_data_input_fn("tftest.tfrecords", batch_size = 1000)
test = iterator_test()
sess.run(test.initializer)
X_test, Y_test = test.get_next()
X_test, Y_test = sess.run([X_test['image'], Y_test])
img_test = X_test.reshape(1000, T_HEIGHT,T_WIDTH,T_Channnels)
test_accuracy = accuracy.eval({X: img_test, Y: Y_test})
print("Test Accuracy:", test_accuracy)
| [
"noreply@github.com"
] | iallabs.noreply@github.com |
df67a8677869abc9fb80e384053f911c340d731c | ce8220d07034eebaaec61d7702faa5be1d7b33c3 | /test.py | f11d228a2017080b5cc4ce3133e7c36477945c9a | [] | no_license | linickx/docker-python-alpine-easysnmp | 1d68f90082e7d1d71c083f9b653050a4d9eafb65 | ff7dc92f559c07354360d0b9de04a840827cc9f3 | refs/heads/master | 2020-08-02T11:30:35.669101 | 2019-09-27T15:05:06 | 2019-09-27T15:05:06 | 211,336,111 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | #!/usr/bin/env python
# coding=utf-8
"""
easysnmp
"""
import sys
import logging
logging.basicConfig(format='[%(levelname)s] %(asctime)s %(message)s', level=logging.DEBUG)
logger = logging.getLogger("test")
try:
from easysnmp import snmp_get
logger.info("easysnmp Works!")
except:
logger.critical("easysnmp Not installed")
logger.debug("Exception: %s", sys.exc_info()[1])
sys.exit(1)
# Grab a single piece of information using an SNMP GET
result = snmp_get('sysName.0', hostname='localhost', community='public', version=2)
print(result.value)
| [
"linickx@gmail.com"
] | linickx@gmail.com |
dc2ee57aa68a3c836eaaba2909622793a0c5c959 | 82e0c0ef19b69320a7ba3dc8d76ad6cee816edb6 | /StairClimbingRobot.py | cd00d5a94d5cd6bc4d191e206cf694e499db6398 | [] | no_license | NicPi/Code | 60bb300fc4cc8ffae124a93800ef0186b06708e1 | 247fd282ece63dfb2102cd73af4da618d08065f1 | refs/heads/master | 2021-07-04T05:47:51.598910 | 2017-09-28T10:13:39 | 2017-09-28T10:13:39 | 105,134,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | # Stair climbing robot to reach the top of the stairs
#Fibonnaci number
def fib(n):
if n <= 1:
return n
return fib(n-1) + fib(n-2)
#Find number of ways to reach the top
def climb(s):
return fib(s +1)
#Variables
s = 5
print(climb(s))
| [
"32358584+NicPi@users.noreply.github.com"
] | 32358584+NicPi@users.noreply.github.com |
68c29eb891292fa04ab772cb3285ba82fe3d42f1 | 098ff62eaa4af51ef18a0eab2646e5f082257e83 | /src/python/tornado-poc/src/hello_world/controllers/hello_world_controller.py | 83309691ae14e9df6fc328014546867c4003ae67 | [] | no_license | ext-yordan-georgiev-at-posti-com/tornado-poc | 63ad72043128d7d5fe08799c987e0a82c0850d58 | 0f45e153f7a113535b71a66aec42a97e9cc40e84 | refs/heads/master | 2023-05-30T16:36:21.525937 | 2021-06-18T07:17:00 | 2021-06-18T07:21:41 | 378,064,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | from tornado.web import RequestHandler
class HelloWorldController(RequestHandler):
def get(self):
self.write("\n\nHello world!\n\n")
| [
"me@org.com"
] | me@org.com |
b91fb546cbb42329ea80e0be279d6f298648f0d1 | 848cf2c39afe417272ce96d738266995cb0c9ca1 | /jirani/tests.py | 4c8d790627ab1c565880f05b726771f4c571271d | [
"MIT"
] | permissive | cliffnyendwe/neighbourhood | 462c2d13d966745de6c63675e799e57cf412eca8 | 77e1a1a082a94fb5a883012a66bf2a4504e6d33b | refs/heads/master | 2020-04-14T05:41:39.902621 | 2019-01-14T08:10:40 | 2019-01-14T08:10:40 | 163,369,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py |
from django.test import TestCase
# Create your tests here.
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Neighborhood , Profile , Business
class TestUser(TestCase):
def setUp(self):
self.testuser = User(username="user", email="test@mail.com")
def test_instance(self):
self.assertIsInstance(self.testuser, User)
def test_save_user(self):
self.assertFalse(self.testuser in User.objects.all())
self.testuser.save()
self.assertTrue(self.testuser in User.objects.all())
def test_save_profile(self):
self.fuser = User(username="fuser", email="fuser@mail.com")
self.fuser.save()
| [
"cliffnyendwe"
] | cliffnyendwe |
ce4b53f95377627bbc034371852ed6502257d475 | 4fa1f74e833957f7e0afb3056adc0e928054fcbe | /PerceptronCode/Controller/main.py | afc4eac2ba7e160704395560d1d9f34475467684 | [] | no_license | FaceandControl/Neural-networks | 24fabbc3709fb38c6406690b6829a2610ad6fd06 | 74e00bca412ab690050492d44a567818484a686f | refs/heads/main | 2023-02-02T06:29:03.477578 | 2020-12-20T23:42:47 | 2020-12-20T23:42:47 | 312,286,551 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | import Model.ParseInput as prs
from View.View import View
from Model.JSONEncoder import Write, Read
from Model.Model import dlnet
Yh = []
def Debug():
x, y, path = prs.Parse("training")
nn = dlnet(x, y)
nn.gd(x, y, iter=10000)
Write(nn)
def Release():
x, y, path = prs.Parse("check")
nn = dlnet(x, y)
Read(nn)
Yh = nn.forward()
Yh *= 100
view = View(Yh, path)
info = input("Debug - 0 or Release - 1:")
if(info == "0"):
Debug()
if(info == "1"):
Release()
| [
"promaksik01@gmail.com"
] | promaksik01@gmail.com |
37add1fa52d9eda89645fd81c309f30c127ee2a2 | 2f0fe59b2309845604a8adefc3a26aab64839245 | /autoscrollbarexample.py | ff24213ba9cd5c635fa30162f6938e36833b3f6a | [] | no_license | theFilipko/TheBayesianTrapClassifier | 72b48436a5e18f2756094c9a9a46fa48599a4c4d | 69f01a6dc158d0db6b23b497d9128f39f2075ef2 | refs/heads/master | 2020-04-02T04:45:01.289954 | 2018-11-01T11:16:23 | 2018-11-01T11:16:23 | 154,032,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | from tkinter import *
class AutoScrollbar(Scrollbar):
# a scrollbar that hides itself if it's not needed. only
# works if you use the grid geometry manager.
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
# grid_remove is currently missing from Tkinter!
self.tk.call("grid", "remove", self)
else:
self.grid()
Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise TclError("cannot use pack with this widget")
def place(self, **kw):
raise TclError("cannot use place with this widget")
#
# create scrolled canvas
root = Tk()
vscrollbar = AutoScrollbar(root)
vscrollbar.grid(row=0, column=1, sticky=N + S)
hscrollbar = AutoScrollbar(root, orient=HORIZONTAL)
hscrollbar.grid(row=1, column=0, sticky=E + W)
canvas = Canvas(root,
yscrollcommand=vscrollbar.set,
xscrollcommand=hscrollbar.set)
canvas.grid(row=0, column=0, sticky=N + S + E + W)
vscrollbar.config(command=canvas.yview)
hscrollbar.config(command=canvas.xview)
# make the canvas expandable
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
#
# create canvas contents
frame = Frame(canvas)
frame.rowconfigure(1, weight=1)
frame.columnconfigure(1, weight=1)
rows = 5
for i in range(1, rows):
for j in range(1, 10):
button = Button(frame, padx=7, pady=7, text="[%d,%d]" % (i, j))
button.grid(row=i, column=j, sticky='news')
canvas.create_window(0, 0, anchor=NW, window=frame)
frame.update_idletasks()
canvas.config(scrollregion=canvas.bbox("all"))
root.mainloop() | [
"Drift2405"
] | Drift2405 |
deb53b6de3868baf6963350d894260bc4ddf78af | 982b81098d3f2c70bfc5f51ba139d99de7d870b1 | /pycrawl3/wsgi.py | 1629bbe505e5b68ae54f1a0cdc3327584c064a8a | [] | no_license | ivanwakeup/pycrawl3 | f41b782f9470fb046ae918b8e1d335d115c402bf | ce36c7d2b00617ff5bf0c852209366d735f729c6 | refs/heads/master | 2022-12-21T15:58:03.692722 | 2018-10-16T15:13:05 | 2018-10-16T15:13:05 | 143,886,834 | 0 | 1 | null | 2018-08-07T17:36:05 | 2018-08-07T14:41:19 | Python | UTF-8 | Python | false | false | 397 | py | """
WSGI config for pycrawl3 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pycrawl3.settings.dev')
application = get_wsgi_application()
| [
"ivan9miller@gmail.com"
] | ivan9miller@gmail.com |
f1227a170810af8da6b47b6b3cdb9058730b6c00 | 2d055d8d62c8fdc33cda8c0b154e2b1e81814c46 | /python/python_filemoniter/test.py | 6d457ac662a50cf232bf4c0441f111a4db455716 | [
"MIT"
] | permissive | harkhuang/harkcode | d9ff7d61c3f55ceeeac4124a2a6ba8a006cff8c9 | faab86571ad0fea04c873569a806d2d7bada2e61 | refs/heads/master | 2022-05-15T07:49:23.875775 | 2022-05-13T17:21:42 | 2022-05-13T17:21:53 | 20,355,721 | 3 | 2 | MIT | 2019-05-22T10:09:50 | 2014-05-31T12:56:19 | C | UTF-8 | Python | false | false | 106 | py | import random
b_list = range(100001,100200)
blist_webId = random.sample(b_list, 3)
print blist_webId
| [
"shiyanhk@gmail.com"
] | shiyanhk@gmail.com |
d916f89a96b6ad60738977481948517958c5bc0e | 2845fd57e7cf726477f23387116ab2c4b94e88e7 | /manage.py | 533fc66a70c2630fad460ad6e1e2813fd5471b10 | [] | no_license | Rainbowsprkl/Ontrack | ff3c1a22083f8f2ad5f75df912f7b3dc782ea608 | 142198665670980e8c3e1581aee17ca68f3d985a | refs/heads/master | 2021-05-30T20:15:38.114114 | 2016-04-12T17:48:34 | 2016-04-12T17:48:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ontrack.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"Bowleyjw@plu.edu"
] | Bowleyjw@plu.edu |
0d0eaf102c7843efdb4840b7a71b3e35df31aa02 | 97a52cb78a09b113e9cd1d7ccd31b75b423f1d5b | /Tut11_Template_Matching.py | 1c3fc48c9f5dab2b6673ce039ec27a4b995df8f0 | [] | no_license | Gomit/OpenCV_mini_projects | c3d6fed795372903cc63f52eefede25ee24b717e | 937f4bced786c2760489f78612ad6ddfc3c72000 | refs/heads/master | 2021-01-15T13:34:32.150707 | 2017-08-08T10:14:48 | 2017-08-08T10:14:48 | 99,678,095 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | import cv2
import numpy as np
img_rgb = cv2.imread('images/opencv-template-matching-python-tutorial.jpg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('images/opencv-template-for-matching.jpg',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
cv2.imwrite('res.png',res)
print(res[0].size)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,255,255), 2)
cv2.imshow('Detected',img_rgb)
cv2.imshow('res',res)
cv2.imshow('template',template)
cv2.imwrite('img_gray.png',img_gray)
#cv2.imwrite('res.png',res)
print(loc[0])
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"meron.g@live.se"
] | meron.g@live.se |
8c0262433baad3904e59bda273a37328054a31a5 | 148f5fb80cb7640dbd4419617f1f002cd6b641bf | /MP7-HBase/TablePartC.py | 9b098a231fe155383114e09437b42f7ed07f948a | [] | no_license | nhtrinh2/Cloud-Computing-and-Big-Data | 7e6ec7811f42188ed181bb72b3be7768f7546480 | c51e48e96660d7ed67f9812017124d30453a6f0a | refs/heads/master | 2023-03-17T02:57:58.801203 | 2020-04-19T19:21:57 | 2020-04-19T19:21:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | import happybase as hb
import csv
connection = hb.Connection('localhost')
connection.open()
table = connection.table('powers')
with open('input.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
table.put(row[0], {'personal:hero': row[1],
'personal:power': row[2],
'professional:name': row[3],
'professional:xp': row[4],
'custom:color':row[5]})
| [
"hongfeili365@gmail.com"
] | hongfeili365@gmail.com |
022fef7f993959e9795ef0ae1fee032d527a65ef | 1ae03694e6826c2c3224647024f66debcebd62dc | /algorithms/python/knmf/hdf5/__init__.py | 602275033b55a0483937b09a25904e30334569ba | [
"Apache-2.0"
] | permissive | Joaggi/Robust-kernels-for-robust-location-estimation | 5ad7f8f3be9a08e5d4283e03e017e5e3b9b186b8 | 9db62273de90547c982d819dc45e66ac86bfcb58 | refs/heads/master | 2023-04-17T22:41:01.652426 | 2022-08-02T23:43:31 | 2022-08-02T23:43:31 | 27,465,913 | 3 | 1 | null | 2022-08-02T23:39:44 | 2014-12-03T02:49:24 | MATLAB | UTF-8 | Python | false | false | 30 | py |
from h5OnlineReader import *
| [
"joaggi@gmail.com"
] | joaggi@gmail.com |
b86ab461b8cde04302e2d49cac9ee8e6f3ed6581 | 5ef242e73196c12ec2c5587ae6a0225c212c1319 | /simplepublisher/src/scripts/publisher.py | 90d871f94ce606323bbd729469432b1030289a55 | [] | no_license | usluenes/ROS | f756a51472a263c604507abc6bb6e1708e2215a4 | 773c675a3a3e2aa3716d9367bc7bed5045824c99 | refs/heads/master | 2021-01-01T02:04:22.152163 | 2020-02-08T15:48:23 | 2020-02-08T15:48:23 | 239,132,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #!/usr/bin/env python
# Simple String Publisher
import rospy
from std_msgs.msg import String
def simplePublisher():
simple_publisher = rospy.Publisher('publishedTopic', String, queue_size=10)
rospy.init_node('simplePublisherNode', anonymous = False)
rate = rospy.Rate(30)
topic_info = 'Suppp Bitchezzz !!'
while not rospy.is_shutdown():
simple_publisher.publish(topic_info)
rate.sleep()
if __name__ == '__main__':
try:
simplePublisher()
except rospy.ROSInterruptException:
pass
| [
"uslu.enes1@gmail.com"
] | uslu.enes1@gmail.com |
e70f718b0081ee8c8810ed55397e1910334d55c5 | cfd2e1f12208dad79bc4b899e81ce1f7de84e80c | /Brian2_scripts/sim_brian_scratch/sim_brian_Jv/sim_brian_Jv_v1_BO.py | b318d3f325f9ca58e4ad173ccd6f21b9406e0375 | [] | no_license | zhouyanasd/DL-NC | 334adafdea1dd8c4c08c7efef3abc3b623344f0d | 396521096f65b27aa24efb1deda7b215876166b2 | refs/heads/master | 2023-03-22T04:57:19.790975 | 2023-03-14T08:57:01 | 2023-03-14T08:57:01 | 64,385,964 | 41 | 9 | null | 2023-02-15T17:52:34 | 2016-07-28T10:22:45 | Python | UTF-8 | Python | false | false | 17,260 | py | # ----------------------------------------
# LSM without STDP for Jv test
# add neurons to readout layer for multi-classification(one-versus-the-rest)
# using softmax(logistic regression)
# input layer is changed to 781*1 with encoding method
# change the LSM structure according to Maass paper
# new calculate flow as Maass_ST
# simplify the coding method with only extend the rank
# for the BO in parallel run
# ----------------------------------------
from brian2 import *
from brian2tools import *
import scipy as sp
from scipy import stats
import struct
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
import pickle
from bqplot import *
import ipywidgets as widgets
import warnings
import os
from multiprocessing import Pool
import cma
import bayes_opt
from functools import partial
warnings.filterwarnings("ignore")
prefs.codegen.target = "numpy"
start_scope()
np.random.seed(100)
data_path = '../../../Data/Jv/'
# ------define general function------------
class Function():
def __init__(self):
pass
def logistic(self, f):
return 1 / (1 + np.exp(-f))
def softmax(self, z):
return np.array([(np.exp(i) / np.sum(np.exp(i))) for i in z])
def gamma(self, a, size):
return stats.gamma.rvs(a, size=size)
class Base():
def update_states(self, type='pandas', *args, **kwargs):
for seq, state in enumerate(kwargs):
if type == 'pandas':
kwargs[state] = kwargs[state].append(pd.DataFrame(args[seq]))
elif type == 'numpy':
kwargs[state] = self.np_extend(kwargs[state], args[seq], 1)
return kwargs
def normalization_min_max(self, arr):
arr_n = arr
for i in range(arr.size):
x = float(arr[i] - np.min(arr)) / (np.max(arr) - np.min(arr))
arr_n[i] = x
return arr_n
def mse(self, y_test, y):
return sp.sqrt(sp.mean((y_test - y) ** 2))
def classification(self, thea, data):
data_n = self.normalization_min_max(data)
data_class = []
for a in data_n:
if a >= thea:
b = 1
else:
b = 0
data_class.append(b)
return np.asarray(data_class), data_n
def allocate(self, G, X, Y, Z):
V = np.zeros((X, Y, Z), [('x', float), ('y', float), ('z', float)])
V['x'], V['y'], V['z'] = np.meshgrid(np.linspace(0, X - 1, X), np.linspace(0, X - 1, X),
np.linspace(0, Z - 1, Z))
V = V.reshape(X * Y * Z)
np.random.shuffle(V)
n = 0
for g in G:
for i in range(g.N):
g.x[i], g.y[i], g.z[i] = V[n][0], V[n][1], V[n][2]
n += 1
return G
def w_norm2(self, n_post, Synapsis):
for i in range(n_post):
a = Synapsis.w[np.where(Synapsis._synaptic_post == i)[0]]
Synapsis.w[np.where(Synapsis._synaptic_post == i)[0]] = a / np.linalg.norm(a)
def np_extend(self, a, b, axis=0):
if a is None:
shape = list(b.shape)
shape[axis] = 0
a = np.array([]).reshape(tuple(shape))
return np.append(a, b, axis)
def np_append(self, a, b):
shape = list(b.shape)
shape.insert(0, -1)
if a is None:
a = np.array([]).reshape(tuple(shape))
return np.append(a, b.reshape(tuple(shape)), axis=0)
def parameters_GS(self, *args, **kwargs):
#---------------
# args = [(min,max),]
# kwargs = {'parameter' = number,}
#---------------
parameters = np.zeros(tuple(kwargs.values()), [(x, float) for x in kwargs.keys()])
grids = np.meshgrid(*[np.linspace(min_max[0], min_max[1], scale)
for min_max,scale in zip(args,kwargs.values())], indexing='ij')
for index, parameter in enumerate(kwargs.keys()):
parameters[parameter] = grids[index]
parameters = parameters.reshape(-1)
return parameters
class Readout():
def readout_sk(self, X_train, X_test, y_train, y_test, **kwargs):
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(**kwargs)
lr.fit(X_train.T, y_train.T)
y_train_predictions = lr.predict(X_train.T)
y_test_predictions = lr.predict(X_test.T)
return accuracy_score(y_train_predictions, y_train.T), accuracy_score(y_test_predictions, y_test.T)
class Result():
def __init__(self):
pass
def result_save(self, path, *arg, **kwarg):
if os.path.exists(path):
os.remove(path)
fw = open(path, 'wb')
pickle.dump(kwarg, fw)
fw.close()
def result_pick(self, path):
fr = open(path, 'rb')
data = pickle.load(fr)
fr.close()
return data
def animation(self, t, v, interval, duration, a_step=10, a_interval=100, a_duration=10):
xs = LinearScale()
ys = LinearScale()
line = Lines(x=t[:interval], y=v[:, :interval], scales={'x': xs, 'y': ys})
xax = Axis(scale=xs, label='x', grid_lines='solid')
yax = Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid')
fig = Figure(marks=[line], axes=[xax, yax], animation_duration=a_duration)
def on_value_change(change):
line.x = t[change['new']:interval + change['new']]
line.y = v[:, change['new']:interval + change['new']]
play = widgets.Play(
interval=a_interval,
value=0,
min=0,
max=duration,
step=a_step,
description="Press play",
disabled=False
)
slider = widgets.IntSlider(min=0, max=duration)
widgets.jslink((play, 'value'), (slider, 'value'))
slider.observe(on_value_change, names='value')
return play, slider, fig
class Jv_classification():
def __init__(self, coding_duration):
self.coding_duration = coding_duration
def load_Data_Jv(self, t, path_value, path_label, is_norm=True):
if t == "train":
label = np.loadtxt(path_label, delimiter=None).astype(int)[1]
elif t == "test":
label = np.loadtxt(path_label, delimiter=None).astype(int)[0]
else:
raise TypeError("t must be 'train' or 'test'")
data = np.loadtxt(path_value, delimiter=None)
if is_norm:
data = MinMaxScaler().fit_transform(data)
s = open(path_value, 'r')
i = -1
size = []
while True:
lines = s.readline()
i += 1
if not lines:
break
if lines == '\n': # "\n" needed to be added at the end of the file
i -= 1
size.append(i)
continue
size_d = np.concatenate(([0], (np.asarray(size) + 1)))
data_list = [data[size_d[i]:size_d[i + 1]] for i in range(len(size_d) - 1)]
label_list = []
j = 0
for n in label:
label_list.extend([j] * n)
j += 1
data_frame = pd.DataFrame({'value': pd.Series(data_list), 'label': pd.Series(label_list)})
return data_frame
def load_Data_Jv_all(self, path, is_norm=True):
self.train = self.load_Data_Jv('train', path + 'train.txt',
path + 'size.txt', is_norm)
self.test = self.load_Data_Jv('test', path + 'test.txt',
path + 'size.txt', is_norm)
def select_data(self, fraction, data_frame, is_order=True, **kwargs):
try:
selected = kwargs['selected']
except KeyError:
selected = np.arange(9)
if is_order:
data_frame_selected = data_frame[data_frame['label'].isin(selected)].sample(
frac=fraction).sort_index().reset_index(drop=True)
else:
data_frame_selected = data_frame[data_frame['label'].isin(selected)].sample(frac=fraction).reset_index(
drop=True)
return data_frame_selected
def _encoding_cos(self, x, n, A):
encoding = []
for i in range(int(n)):
trans_cos = np.around(0.5 * A * (np.cos(x + np.pi * (i / n)) + 1)).clip(0, A - 1)
coding = [([0] * trans_cos.shape[1]) for i in range(A * trans_cos.shape[0])]
for index_0, p in enumerate(trans_cos):
for index_1, q in enumerate(p):
coding[int(q) + A * index_0][index_1] = 1
encoding.extend(coding)
return np.asarray(encoding)
def _encoding_cos_rank(self, x, n, A):
encoding = np.zeros((x.shape[0] * A, n * x.shape[1]), dtype='<i1')
for i in range(int(n)):
trans_cos = np.around(0.5 * A * (np.cos(x + np.pi * (i / n)) + 1)).clip(0, A - 1)
for index_0, p in enumerate(trans_cos):
for index_1, q in enumerate(p):
encoding[int(q) + A * index_0, index_1 * n + i] = 1
return np.asarray(encoding)
def encoding_latency_Jv(self, coding_f, analog_data, coding_n, min=0, max=np.pi):
f = lambda x: (max - min) * (x - np.min(x)) / (np.max(x) - np.min(x))
value = analog_data['value'].apply(f).apply(coding_f, n=coding_n, A=int(self.coding_duration))
return pd.DataFrame({'value': pd.Series(value), 'label': pd.Series(analog_data['label'])})
def get_series_data_list(self, data_frame, is_group=False):
data_frame_s = []
if not is_group:
for value in data_frame['value']:
data_frame_s.extend(value)
else:
for value in data_frame['value']:
data_frame_s.append(value)
label = data_frame['label']
return np.asarray(data_frame_s), label
###################################
# -----simulation parameter setting-------
coding_n = 3
dim = 12
coding_duration = 10
F_train = 1
F_test = 1
Dt = defaultclock.dt = 1 * ms
#-------class initialization----------------------
function = Function()
base = Base()
readout = Readout()
result = Result()
Jv = Jv_classification(coding_duration)
# -------data initialization----------------------
Jv.load_Data_Jv_all(data_path)
df_train = Jv.select_data(F_train, Jv.train, False)
df_test = Jv.select_data(F_test, Jv.test, False)
df_en_train = Jv.encoding_latency_Jv(Jv._encoding_cos_rank, df_train, coding_n)
df_en_test = Jv.encoding_latency_Jv(Jv._encoding_cos_rank, df_test, coding_n)
data_train_s, label_train = Jv.get_series_data_list(df_en_train, is_group=True)
data_test_s, label_test = Jv.get_series_data_list(df_en_test, is_group=True)
#-------get numpy random state------------
np_state = np.random.get_state()
############################################
# ---- define network run function----
def run_net(inputs, **parameter):
#---- set numpy random state for each run----
np.random.set_state(np_state)
# -----parameter setting-------
n_ex = 400
n_inh = int(n_ex/4)
n_input = dim * coding_n
n_read = n_ex+n_inh
R = parameter['R']
f = parameter['f']
A_EE = 30*f
A_EI = 60*f
A_IE = 19*f
A_II = 19*f
A_inE = 18*f
A_inI = 9*f
tau_ex = parameter['tau']*coding_duration
tau_inh = parameter['tau']*coding_duration
tau_read= 30
p_inE = 0.1
p_inI = 0.1
#------definition of equation-------------
neuron_in = '''
I = stimulus(t,i) : 1
'''
neuron = '''
tau : 1
dv/dt = (I-v) / (tau*ms) : 1 (unless refractory)
dg/dt = (-g)/(3*ms) : 1
dh/dt = (-h)/(6*ms) : 1
I = (g+h)+13.5: 1
x : 1
y : 1
z : 1
'''
neuron_read = '''
tau : 1
dv/dt = (I-v) / (tau*ms) : 1
dg/dt = (-g)/(3*ms) : 1
dh/dt = (-h)/(6*ms) : 1
I = (g+h): 1
'''
synapse = '''
w : 1
'''
on_pre_ex = '''
g+=w
'''
on_pre_inh = '''
h-=w
'''
# -----Neurons and Synapses setting-------
Input = NeuronGroup(n_input, neuron_in, threshold='I > 0', method='euler', refractory=0 * ms,
name = 'neurongroup_input')
G_ex = NeuronGroup(n_ex, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=3 * ms,
name ='neurongroup_ex')
G_inh = NeuronGroup(n_inh, neuron, threshold='v > 15', reset='v = 13.5', method='euler', refractory=2 * ms,
name ='neurongroup_in')
G_readout = NeuronGroup(n_read, neuron_read, method='euler', name='neurongroup_read')
S_inE = Synapses(Input, G_ex, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_inE')
S_inI = Synapses(Input, G_inh, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_inI')
S_EE = Synapses(G_ex, G_ex, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_EE')
S_EI = Synapses(G_ex, G_inh, synapse, on_pre = on_pre_ex ,method='euler', name='synapses_EI')
S_IE = Synapses(G_inh, G_ex, synapse, on_pre = on_pre_inh ,method='euler', name='synapses_IE')
S_II = Synapses(G_inh, G_inh, synapse, on_pre = on_pre_inh ,method='euler', name='synapses_I')
S_E_readout = Synapses(G_ex, G_readout, 'w = 1 : 1', on_pre=on_pre_ex, method='euler')
S_I_readout = Synapses(G_inh, G_readout, 'w = 1 : 1', on_pre=on_pre_inh, method='euler')
#-------initialization of neuron parameters----------
G_ex.v = '13.5+1.5*rand()'
G_inh.v = '13.5+1.5*rand()'
G_readout.v = '0'
G_ex.g = '0'
G_inh.g = '0'
G_readout.g = '0'
G_ex.h = '0'
G_inh.h = '0'
G_readout.h = '0'
G_ex.tau = tau_ex
G_inh.tau = tau_inh
G_readout.tau = tau_read
[G_ex,G_in] = base.allocate([G_ex,G_inh],5,5,20)
# -------initialization of network topology and synapses parameters----------
S_inE.connect(condition='j<0.3*N_post', p = p_inE)
S_inI.connect(condition='j<0.3*N_post', p = p_inI)
S_EE.connect(condition='i != j', p='0.3*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')
S_EI.connect(p='0.2*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')
S_IE.connect(p='0.4*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')
S_II.connect(condition='i != j', p='0.1*exp(-((x_pre-x_post)**2+(y_pre-y_post)**2+(z_pre-z_post)**2)/R**2)')
S_E_readout.connect(j='i')
S_I_readout.connect(j='i+n_ex')
S_inE.w = function.gamma(A_inE, S_inE.w.shape)
S_inI.w = function.gamma(A_inI, S_inI.w.shape)
S_EE.w = function.gamma(A_EE, S_EE.w.shape)
S_IE.w = function.gamma(A_IE, S_IE.w.shape)
S_EI.w = function.gamma(A_EI, S_EI.w.shape)
S_II.w = function.gamma(A_II, S_II.w.shape)
S_EE.pre.delay = '1.5*ms'
S_EI.pre.delay = '0.8*ms'
S_IE.pre.delay = '0.8*ms'
S_II.pre.delay = '0.8*ms'
# ------create network-------------
net = Network(collect())
net.store('init')
# ------run network-------------
stimulus = TimedArray(inputs[0], dt=Dt)
duration = inputs[0].shape[0]
net.run(duration * Dt)
states = net.get_states()['neurongroup_read']['v']
net.restore('init')
return (states, inputs[1])
def parameters_search(**parameter):
# ------parallel run for train-------
states_train_list = pool.map(partial(run_net, **parameter), [(x) for x in zip(data_train_s, label_train)])
# ----parallel run for test--------
states_test_list = pool.map(partial(run_net, **parameter), [(x) for x in zip(data_test_s, label_test)])
# ------Readout---------------
states_train, states_test, _label_train, _label_test = [], [], [], []
for train in states_train_list :
states_train.append(train[0])
_label_train.append(train[1])
for test in states_test_list:
states_test.append(test[0])
_label_test.append(test[1])
states_train = (MinMaxScaler().fit_transform(np.asarray(states_train))).T
states_test = (MinMaxScaler().fit_transform(np.asarray(states_test))).T
score_train, score_test = readout.readout_sk(states_train, states_test,
np.asarray(_label_train), np.asarray(_label_test),
solver="lbfgs", multi_class="multinomial")
# ----------show results-----------
print('parameters %s' % parameter)
print('Train score: ', score_train)
print('Test score: ', score_test)
return score_test
##########################################
# -------BO parameters search---------------
if __name__ == '__main__':
core = 10
pool = Pool(core)
optimizer = bayes_opt.BayesianOptimization(
f=parameters_search,
pbounds={'R': (0.01, 2), 'f': (0.01, 2), 'tau':(0.01, 2)},
verbose=2,
random_state=np.random.RandomState(),
)
# from bayes_opt.util import load_logs
# load_logs(optimizer, logs=["./BO_res_Jv.json"])
logger = bayes_opt.observer.JSONLogger(path="./BO_res_Jv.json")
optimizer.subscribe(bayes_opt.event.Events.OPTMIZATION_STEP, logger)
optimizer.maximize(
init_points=10,
n_iter=200,
acq='ucb',
kappa=2.576,
xi=0.0,
) | [
"zhouyanasd@gmail.com"
] | zhouyanasd@gmail.com |
1dae397b6c8ed684d73f25e7b4d4d398d0b99762 | 296ee7f58031d7c22fb92790b2cda5e881b5301a | /1st year/1st semester/FPLab/Test.19.Dec/repository/repository.py | a5f41e7e42922fed8ea4f9a08a9b7b3d8fa18b71 | [] | no_license | arazi47/university-projects | 6cf7adb1fade977580e51d3842162eb033769088 | 4ebd70c857fac2565b44f6e40904cba209a138a1 | refs/heads/master | 2023-01-08T10:28:23.846702 | 2020-05-05T08:54:37 | 2020-05-05T08:54:37 | 105,974,765 | 0 | 2 | null | 2022-12-27T14:41:06 | 2017-10-06T06:43:06 | HTML | UTF-8 | Python | false | false | 317 | py | class Repository:
def __init__(self):
self.__adn_list = []
def append(self, adn_string):
self.__adn_list.append(adn_string)
def get_data(self):
return self.__adn_list
def length(self):
return len(self.__adn_list)
def clear(self):
self.__adn_list.clear() | [
"razialexis43@gmail.com"
] | razialexis43@gmail.com |
c93ce125a7a5d1cd37cdc72e569c24ac3d9e661d | ef03c81d6ff3a5f1eaffa71aa55f0c2e2ec07233 | /opensql/dbhelper.py | 66046de2fda31b5eb63273537187cbb50182a7c2 | [] | no_license | Alanaktion/opensql-pro | b9eab904685e293e723920f6f37ff5c46a5fe99d | 6fb59c2dc0923946be5d84c9faa1a0a743f1c1f1 | refs/heads/master | 2023-03-17T15:44:48.571429 | 2018-05-25T16:44:57 | 2018-05-25T16:44:57 | 349,199,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,175 | py | """
Helper module for working with PyMySQL data sets
Handles converting PyMySQL objects to Gtk-compatible structures and types
"""
import datetime
import pymysql
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
def int_to_type(type_id):
"""Convert PyMySQL field type to GType"""
# TODO: Verify how NULL type actually appears and handle it better
type_map = {
pymysql.constants.FIELD_TYPE.TINY: int,
pymysql.constants.FIELD_TYPE.SHORT: int,
pymysql.constants.FIELD_TYPE.LONG: int,
pymysql.constants.FIELD_TYPE.INT24: int,
pymysql.constants.FIELD_TYPE.YEAR: int,
pymysql.constants.FIELD_TYPE.DECIMAL: float,
pymysql.constants.FIELD_TYPE.FLOAT: float,
pymysql.constants.FIELD_TYPE.DOUBLE: float,
pymysql.constants.FIELD_TYPE.LONGLONG: float,
pymysql.constants.FIELD_TYPE.NEWDECIMAL: float,
pymysql.constants.FIELD_TYPE.NULL: None,
pymysql.constants.FIELD_TYPE.VARCHAR: str,
pymysql.constants.FIELD_TYPE.BIT: str,
pymysql.constants.FIELD_TYPE.JSON: str,
pymysql.constants.FIELD_TYPE.ENUM: str,
pymysql.constants.FIELD_TYPE.SET: str,
pymysql.constants.FIELD_TYPE.TINY_BLOB: str,
pymysql.constants.FIELD_TYPE.MEDIUM_BLOB: str,
pymysql.constants.FIELD_TYPE.LONG_BLOB: str,
pymysql.constants.FIELD_TYPE.BLOB: str,
pymysql.constants.FIELD_TYPE.VAR_STRING: str,
pymysql.constants.FIELD_TYPE.STRING: str,
pymysql.constants.FIELD_TYPE.GEOMETRY: str,
pymysql.constants.FIELD_TYPE.TIMESTAMP: str,
pymysql.constants.FIELD_TYPE.DATE: str,
pymysql.constants.FIELD_TYPE.TIME: str,
pymysql.constants.FIELD_TYPE.DATETIME: str,
pymysql.constants.FIELD_TYPE.NEWDATE: str,
}
return type_map.get(type_id, str)
def value_to_renderable(val):
"""Convert a PyMySQL result value to a renderable format"""
if isinstance(val, datetime.datetime):
return val.strftime('%Y-%m-%d %H:%M:%S')
if isinstance(val, datetime.date):
return val.strftime('%Y-%m-%d')
if isinstance(val, bytes):
return '<BINARY>'
return val
def result_to_liststore(result, description, treeview=None, editable=False):
"""Convert PyMySQL result to GtkListStore"""
cols = []
for i, col in enumerate(description):
cols = cols + [int_to_type(col[1])]
if treeview:
control = Gtk.CellRendererText(editable=editable, ellipsize='end',
single_paragraph_mode=True)
label = col[0].replace('_', '__')
column = Gtk.TreeViewColumn(label, control, text=i)
column.set_resizable(True)
treeview.append_column(column)
result_list = Gtk.ListStore(*cols)
for row in result:
rowfinal = []
for val in row.values():
displayval = value_to_renderable(val)
rowfinal.append(displayval)
result_list.append(rowfinal)
return result_list
def escape_identifier(identifier):
"""Escape MySQL identifier"""
return "`%s`" % identifier.replace('`', '``')
| [
"alanaktion@gmail.com"
] | alanaktion@gmail.com |
1726f839984b2d0e702d58d1a70897100343f60d | 36f217ec710217b479d43243ba135452a3b7e953 | /30. File Handling/reading_csv_files.py | 065d3e8e0b4bab587934b50a1e4aae6f93ee83d9 | [] | no_license | shrirangmhalgi/Python-Bootcamp | 571a34eda516c031ec5f447f2a26ad68e01ee576 | 4c17f7db1b7a8870ef13aecb3154e0d32db44fcc | refs/heads/master | 2020-08-05T03:19:50.081426 | 2019-12-16T08:11:56 | 2019-12-16T08:11:56 | 212,373,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # Normal file handling to read a file but it returns a giant string of data
# with open("file.csv") as file:
# file.read()
# from csv import reader
# with open("file.csv") as file:
# reader_csv = reader(file, delimiter="|")
# next(reader_csv)
# for fighter in reader_csv:
# # each row is a list
# # Header is included here so we can iterate on reader_csv object
# print(f"{fighter[0]} is from {fighter[1]}")
from csv import DictReader
with open("file.csv") as file:
reader_csv = DictReader(file, delimiter=",")
for fighter in reader_csv:
print(fighter["Name"]) | [
"shrirangmhalgi@gmail.com"
] | shrirangmhalgi@gmail.com |
02de053f7a35ad14f7c9469e279ff827159d5414 | 904bf81488ce47c93453a8a841403e831f03ebe0 | /tx_lobbying/search_indexes.py | edb9f8e6255ec97382048159f312a5a1398c6c77 | [
"Apache-2.0"
] | permissive | texastribune/tx_lobbying | b7b26ed8acb6059f46bf1e4285af69398795b074 | 81dd911667e5368b874a56d5fba8e1613f7027ee | refs/heads/master | 2020-04-01T09:25:11.457807 | 2015-05-19T03:34:53 | 2015-05-19T03:34:53 | 7,674,962 | 1 | 3 | null | 2015-05-28T03:08:54 | 2013-01-17T21:47:06 | Python | UTF-8 | Python | false | false | 920 | py | """
Haystack search indicies.
I denormalize thing here to try and make things easier on the database later.
"""
from haystack import indexes
from .models import Lobbyist, Interest
class LobbyistIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='name', document=True)
content_auto = indexes.EdgeNgramField(model_attr='name')
url = indexes.CharField()
def get_model(self):
return Lobbyist
def get_updated_field(self):
return 'updated_at'
def prepare_url(self, obj):
return obj.get_absolute_url()
class InterestIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='name', document=True)
content_auto = indexes.EdgeNgramField(model_attr='name')
url = indexes.CharField()
def get_model(self):
return Interest
def prepare_url(self, obj):
return obj.get_absolute_url()
| [
"c@crccheck.com"
] | c@crccheck.com |
25d68b3df827070a671cceccc646c451bcd00f3f | a21b6dca72957be702d39eb60a45af49b2c50a99 | /deta.py | 20fe4b2d740644302ca39586b026500a3369a05e | [] | no_license | kenpos/PythonMachineLearning | f1775d663fda9bced3f6e9340a18db7782e209c5 | b5f8d74b36121c17fe9382fae70808a26129fc4b | refs/heads/master | 2021-01-01T04:40:43.133196 | 2016-05-04T09:22:59 | 2016-05-04T09:22:59 | 58,038,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # -*- coding: utf-8 -*-
"""
センサーデータ確認用.ひたすらprintするだけ.
"""
import serial
def test():
devPath = "/dev/cu.usbmodem1421"
leadtime = 9600
ser = serial.Serial(devPath, leadtime)
ser.open
while True:
data = ser.readline().strip().rsplit()
print(data)
if __name__ == "__main__":
test()
| [
"kenpos13@gmail.com"
] | kenpos13@gmail.com |
a283425bbb4f90949b0edfcd5f68564d3833b84f | 050d5f569497b8e04272b2b6955ac6f844e094e7 | /hail/python/hail/ir/register_functions.py | 814b1dbcbd0074c87fe410eb5acaeab94fbd9ce5 | [
"MIT"
] | permissive | steveherrin/hail | a68460870aa8207de628ee2054a7af889ef8e07c | edd724faf9443d37cca6a22d4c0a2af939130427 | refs/heads/master | 2020-07-29T14:03:50.642849 | 2019-09-20T16:11:21 | 2019-09-20T16:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,551 | py | import hail as hl
from hail.expr.nat import NatVariable
from .ir import register_function, register_session_function, register_seeded_function
def register_reference_genome_functions(rg):
from hail.expr.types import dtype
register_session_function(f"isValidContig({rg})", (dtype("str"),), dtype("bool"))
register_session_function(f"isValidLocus({rg})", (dtype("str"),dtype("int32"),), dtype("bool"))
register_session_function(f"getReferenceSequenceFromValidLocus({rg})", (dtype("str"),dtype("int32"),dtype("int32"),dtype("int32"),), dtype("str"))
register_session_function(f"getReferenceSequence({rg})", (dtype("str"),dtype("int32"),dtype("int32"),dtype("int32"),), dtype("str"))
def register_functions():
from hail.expr.types import dtype
register_function("flatten", (dtype("array<array<?T>>"),), dtype("array<?T>"))
register_function("difference", (dtype("set<?T>"),dtype("set<?T>"),), dtype("set<?T>"))
register_function("median", (dtype("set<?T:numeric>"),), dtype("?T"))
register_function("median", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("uniqueMinIndex", (dtype("array<?T>"),), dtype("int32"))
register_function("mean", (dtype("array<?T:numeric>"),), dtype("float64"))
register_function("toFloat32", (dtype("?T:numeric"),), dtype("float32"))
register_function("uniqueMaxIndex", (dtype("array<?T>"),), dtype("int32"))
register_function("toSet", (dtype("array<?T>"),), dtype("set<?T>"))
def array_floating_point_divide(arg_type, ret_type):
register_function("/", (arg_type, hl.tarray(arg_type),), hl.tarray(ret_type))
register_function("/", (hl.tarray(arg_type),arg_type), hl.tarray(ret_type))
register_function("/", (hl.tarray(arg_type),hl.tarray(arg_type)), hl.tarray(ret_type))
array_floating_point_divide(hl.tint32, hl.tfloat32)
array_floating_point_divide(hl.tint64, hl.tfloat32)
array_floating_point_divide(hl.tfloat32, hl.tfloat32)
array_floating_point_divide(hl.tfloat64, hl.tfloat64)
def ndarray_floating_point_divide(arg_type, ret_type):
register_function("/", (arg_type, hl.tndarray(arg_type, NatVariable()),), hl.tndarray(ret_type, NatVariable()))
register_function("/", (hl.tndarray(arg_type, NatVariable()), arg_type), hl.tndarray(ret_type, NatVariable()))
register_function("/", (hl.tndarray(arg_type, NatVariable()),
hl.tndarray(arg_type, NatVariable())), hl.tndarray(ret_type, NatVariable()))
ndarray_floating_point_divide(hl.tint32, hl.tfloat32)
ndarray_floating_point_divide(hl.tint64, hl.tfloat32)
ndarray_floating_point_divide(hl.tfloat32, hl.tfloat32)
ndarray_floating_point_divide(hl.tfloat64, hl.tfloat64)
register_function("values", (dtype("dict<?key, ?value>"),), dtype("array<?value>"))
register_function("[*:]", (dtype("array<?T>"),dtype("int32"),), dtype("array<?T>"))
register_function("[*:]", (dtype("str"),dtype("int32"),), dtype("str"))
register_function("get", (dtype("dict<?key, ?value>"),dtype("?key"),), dtype("?value"))
register_function("get", (dtype("dict<?key, ?value>"),dtype("?key"),dtype("?value"),), dtype("?value"))
register_function("max", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("nanmax", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("max", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("nanmax", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("max_ignore_missing", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("nanmax_ignore_missing", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("product", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("toInt32", (dtype("?T:numeric"),), dtype("int32"))
register_function("extend", (dtype("array<?T>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("argmin", (dtype("array<?T>"),), dtype("int32"))
register_function("toFloat64", (dtype("?T:numeric"),), dtype("float64"))
register_function("sort", (dtype("array<?T>"),), dtype("array<?T>"))
register_function("sort", (dtype("array<?T>"),dtype("bool"),), dtype("array<?T>"))
register_function("isSubset", (dtype("set<?T>"),dtype("set<?T>"),), dtype("bool"))
register_function("[*:*]", (dtype("str"),dtype("int32"),dtype("int32"),), dtype("str"))
register_function("[*:*]", (dtype("array<?T>"),dtype("int32"),dtype("int32"),), dtype("array<?T>"))
register_function("+", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("+", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<?T>"))
register_function("+", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("+", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("+", (dtype("ndarray<?T:numeric, ?nat>"), dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("+", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("**", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<float64>"))
register_function("**", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<float64>"))
register_function("**", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<float64>"))
register_function("**", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("**", (dtype("ndarray<?T:numeric, ?nat>"),dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("**", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("append", (dtype("array<?T>"),dtype("?T"),), dtype("array<?T>"))
register_function("[:*]", (dtype("str"),dtype("int32"),), dtype("str"))
register_function("[:*]", (dtype("array<?T>"),dtype("int32"),), dtype("array<?T>"))
register_function("remove", (dtype("set<?T>"),dtype("?T"),), dtype("set<?T>"))
register_function("[]", (dtype("str"),dtype("int32"),), dtype("str"))
register_function("indexArray", (dtype("array<?T>"),dtype("int32"),), dtype("?T"))
register_function("[]", (dtype("dict<?key, ?value>"),dtype("?key"),), dtype("?value"))
register_function("dictToArray", (dtype("dict<?key, ?value>"),), dtype("array<tuple(?key, ?value)>"))
register_function("%", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("%", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<?T>"))
register_function("%", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("%", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("%", (dtype("ndarray<?T:numeric, ?nat>"),dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("%", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("dict", (dtype("array<tuple(?key, ?value)>"),), dtype("dict<?key, ?value>"))
register_function("dict", (dtype("set<tuple(?key, ?value)>"),), dtype("dict<?key, ?value>"))
register_function("keys", (dtype("dict<?key, ?value>"),), dtype("array<?key>"))
register_function("min", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("nanmin", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("min", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("nanmin", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("min_ignore_missing", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("nanmin_ignore_missing", (dtype("?T"),dtype("?T"),), dtype("?T"))
register_function("sum", (dtype("array<?T:numeric>"),), dtype("?T"))
register_function("toInt64", (dtype("?T:numeric"),), dtype("int64"))
register_function("contains", (dtype("dict<?key, ?value>"),dtype("?key"),), dtype("bool"))
register_function("contains", (dtype("array<?T>"),dtype("?T"),), dtype("bool"))
register_function("contains", (dtype("set<?T>"),dtype("?T"),), dtype("bool"))
register_function("-", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<?T>"))
register_function("-", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("-", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("-", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("-", (dtype("ndarray<?T:numeric, ?nat>"),dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("-", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("addone", (dtype("int32"),), dtype("int32"))
register_function("isEmpty", (dtype("dict<?key, ?value>"),), dtype("bool"))
register_function("isEmpty", (dtype("array<?T>"),), dtype("bool"))
register_function("isEmpty", (dtype("set<?T>"),), dtype("bool"))
register_function("[:]", (dtype("array<?T>"),), dtype("array<?T>"))
register_function("[:]", (dtype("str"),), dtype("str"))
register_function("union", (dtype("set<?T>"),dtype("set<?T>"),), dtype("set<?T>"))
register_function("*", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("*", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<?T>"))
register_function("*", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("*", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("*", (dtype("ndarray<?T:numeric, ?nat>"),dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("*", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("intersection", (dtype("set<?T>"),dtype("set<?T>"),), dtype("set<?T>"))
register_function("add", (dtype("set<?T>"),dtype("?T"),), dtype("set<?T>"))
register_function("argmax", (dtype("array<?T>"),), dtype("int32"))
register_function("//", (dtype("array<?T:numeric>"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("//", (dtype("array<?T:numeric>"),dtype("?T"),), dtype("array<?T>"))
register_function("//", (dtype("?T:numeric"),dtype("array<?T>"),), dtype("array<?T>"))
register_function("//", (dtype("ndarray<?T:numeric, ?nat>"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("//", (dtype("ndarray<?T:numeric, ?nat>"),dtype("?T"),), dtype("ndarray<?T, ?nat>"))
register_function("//", (dtype("?T:numeric"),dtype("ndarray<?T, ?nat>"),), dtype("ndarray<?T, ?nat>"))
register_function("keySet", (dtype("dict<?key, ?value>"),), dtype("set<?key>"))
register_function("qnorm", (dtype("float64"),), dtype("float64"))
register_function("oneHotAlleles", (dtype("call"),dtype("int32"),), dtype("array<int32>"))
register_function("dpois", (dtype("float64"),dtype("float64"),dtype("bool"),), dtype("float64"))
register_function("dpois", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("ploidy", (dtype("call"),), dtype("int32"))
register_function("||", (dtype("bool"),dtype("bool"),), dtype("bool"))
register_function("ppois", (dtype("float64"),dtype("float64"),dtype("bool"),dtype("bool"),), dtype("float64"))
register_function("ppois", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("log10", (dtype("float64"),), dtype("float64"))
register_function("isHet", (dtype("call"),), dtype("bool"))
register_function("isAutosomalOrPseudoAutosomal", (dtype("?T:locus"),), dtype("bool"))
register_function("testCodeUnification", (dtype("?x:numeric"),dtype("?x:int32"),), dtype("?x"))
register_seeded_function("rand_pois", (dtype("float64"),), dtype("float64"))
register_seeded_function("rand_pois", (dtype("int32"),dtype("float64"),), dtype("array<float64>"))
register_function("toFloat32", (dtype("str"),), dtype("float32"))
register_function("toFloat32", (dtype("bool"),), dtype("float32"))
register_function("isAutosomal", (dtype("?T:locus"),), dtype("bool"))
register_function("isPhased", (dtype("call"),), dtype("bool"))
register_function("isHomVar", (dtype("call"),), dtype("bool"))
register_function("corr", (dtype("array<float64>"),dtype("array<float64>"),), dtype("float64"))
register_function("log", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("log", (dtype("float64"),), dtype("float64"))
register_function("foobar2", (), dtype("int32"))
register_function("approxEqual", (dtype("float64"),dtype("float64"),dtype("float64"),dtype("bool"),dtype("bool"),), dtype("bool"))
register_function("plDosage", (dtype("array<?N:int32>"),), dtype("float64"))
register_function("includesEnd", (dtype("interval<?T>"),), dtype("bool"))
register_function("position", (dtype("?T:locus"),), dtype("int32"))
register_seeded_function("rand_unif", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("str", (dtype("?T"),), dtype("str"))
register_function("valuesSimilar", (dtype("?T"),dtype("?T"),dtype('float64'),dtype('bool'),), dtype("bool"))
register_function("replace", (dtype("str"),dtype("str"),dtype("str"),), dtype("str"))
register_function("exp", (dtype("float64"),), dtype("float64"))
register_function("&&", (dtype("bool"),dtype("bool"),), dtype("bool"))
register_function("compare", (dtype("int32"),dtype("int32"),), dtype("int32"))
register_function("triangle", (dtype("int32"),), dtype("int32"))
register_function("Interval", (dtype("?T"),dtype("?T"),dtype("bool"),dtype("bool"),), dtype("interval<?T>"))
register_function("contig", (dtype("?T:locus"),), dtype("str"))
register_function("Call", (dtype("bool"),), dtype("call"))
register_function("Call", (dtype("str"),), dtype("call"))
register_function("Call", (dtype("int32"),dtype("bool"),), dtype("call"))
register_function("Call", (dtype("int32"),dtype("int32"),dtype("bool"),), dtype("call"))
register_function("Call", (dtype("array<int32>"),dtype("bool"),), dtype("call"))
register_function("qchisqtail", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("binomTest", (dtype("int32"),dtype("int32"),dtype("float64"),dtype("int32"),), dtype("float64"))
register_function("qpois", (dtype("float64"),dtype("float64"),), dtype("int32"))
register_function("qpois", (dtype("float64"),dtype("float64"),dtype("bool"),dtype("bool"),), dtype("int32"))
register_function("is_finite", (dtype("float32"),), dtype("bool"))
register_function("is_finite", (dtype("float64"),), dtype("bool"))
register_function("inYPar", (dtype("?T:locus"),), dtype("bool"))
register_function("contingency_table_test", (dtype("int32"),dtype("int32"),dtype("int32"),dtype("int32"),dtype("int32"),), dtype("struct{p_value: float64, odds_ratio: float64}"))
register_function("toInt32", (dtype("bool"),), dtype("int32"))
register_function("toInt32", (dtype("str"),), dtype("int32"))
register_function("foobar1", (), dtype("int32"))
register_function("toFloat64", (dtype("str"),), dtype("float64"))
register_function("toFloat64", (dtype("bool"),), dtype("float64"))
register_function("dbeta", (dtype("float64"),dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("Locus", (dtype("str"),), dtype("?T:locus"))
register_function("Locus", (dtype("str"), dtype("int32"),), dtype("?T:locus"))
register_function("LocusAlleles", (dtype("str"),), dtype("struct{locus: ?T, alleles: array<str>}"))
register_function("LocusInterval", (dtype("str"),dtype("bool"),), dtype("interval<?T:locus>"))
register_function("LocusInterval", (dtype("str"),dtype("int32"),dtype("int32"),dtype("bool"),dtype("bool"),dtype("bool"),), dtype("interval<?T:locus>"))
register_function("globalPosToLocus", (dtype("int64"),), dtype("?T:locus"))
register_function("locusToGlobalPos", (dtype("?T:locus"),), dtype("int64"))
register_function("liftoverLocus", (dtype(f"?T:locus"), dtype('float64'),), dtype(f"struct{{result:?U:locus,is_negative_strand:bool}}"))
register_function("liftoverLocusInterval", (dtype(f"interval<?T:locus>"), dtype('float64'),), dtype(f"struct{{result:interval<?U:locus>,is_negative_strand:bool}}"))
register_function("min_rep", (dtype("?T:locus"),dtype("array<str>"),), dtype("struct{locus: ?T, alleles: array<str>}"))
register_function("locus_windows_per_contig", (dtype("array<array<float64>>"),dtype("float64"),), dtype("tuple(array<int32>, array<int32>)"))
register_function("toBoolean", (dtype("str"),), dtype("bool"))
register_seeded_function("rand_bool", (dtype("float64"),), dtype("bool"))
register_function("pchisqtail", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_seeded_function("rand_cat", (dtype("array<float64>"),), dtype("int32"))
register_function("inYNonPar", (dtype("?T:locus"),), dtype("bool"))
register_function("+", (dtype("str"),dtype("str"),), dtype("str"))
register_function("**", (dtype("float32"),dtype("float32"),), dtype("float64"))
register_function("**", (dtype("int32"),dtype("int32"),), dtype("float64"))
register_function("**", (dtype("int64"),dtype("int64"),), dtype("float64"))
register_function("**", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("length", (dtype("str"),), dtype("int32"))
register_function("slice", (dtype("str"),dtype("int32"),dtype("int32"),), dtype("str"))
register_function("split", (dtype("str"),dtype("str"),dtype("int32"),), dtype("array<str>"))
register_function("split", (dtype("str"),dtype("str"),), dtype("array<str>"))
register_seeded_function("rand_gamma", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("UnphasedDiploidGtIndexCall", (dtype("int32"),), dtype("call"))
register_function("[]", (dtype("call"),dtype("int32"),), dtype("int32"))
register_function("sign", (dtype("int64"),), dtype("int64"))
register_function("sign", (dtype("float64"),), dtype("float64"))
register_function("sign", (dtype("float32"),), dtype("float32"))
register_function("sign", (dtype("int32"),), dtype("int32"))
register_function("unphasedDiploidGtIndex", (dtype("call"),), dtype("int32"))
register_function("gamma", (dtype("float64"),), dtype("float64"))
register_function("%", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("%", (dtype("int64"),dtype("int64"),), dtype("int64"))
register_function("%", (dtype("float32"),dtype("float32"),), dtype("float32"))
register_function("%", (dtype("int32"),dtype("int32"),), dtype("int32"))
register_function("fisher_exact_test", (dtype("int32"),dtype("int32"),dtype("int32"),dtype("int32"),), dtype("struct{p_value: float64, odds_ratio: float64, ci_95_lower: float64, ci_95_upper: float64}"))
register_function("floor", (dtype("float64"),), dtype("float64"))
register_function("floor", (dtype("float32"),), dtype("float32"))
register_function("isNonRef", (dtype("call"),), dtype("bool"))
register_function("includesStart", (dtype("interval<?T>"),), dtype("bool"))
register_function("isHetNonRef", (dtype("call"),), dtype("bool"))
register_function("hardy_weinberg_test", (dtype("int32"),dtype("int32"),dtype("int32"),), dtype("struct{het_freq_hwe: float64, p_value: float64}"))
register_function("haplotype_freq_em", (dtype("array<int32>"),), dtype("array<float64>"))
register_function("nNonRefAlleles", (dtype("call"),), dtype("int32"))
register_function("abs", (dtype("float64"),), dtype("float64"))
register_function("abs", (dtype("float32"),), dtype("float32"))
register_function("abs", (dtype("int64"),), dtype("int64"))
register_function("abs", (dtype("int32"),), dtype("int32"))
register_function("endswith", (dtype("str"),dtype("str"),), dtype("bool"))
register_function("sqrt", (dtype("float64"),), dtype("float64"))
register_function("isnan", (dtype("float32"),), dtype("bool"))
register_function("isnan", (dtype("float64"),), dtype("bool"))
register_function("lower", (dtype("str"),), dtype("str"))
register_seeded_function("rand_beta", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_seeded_function("rand_beta", (dtype("float64"),dtype("float64"),dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("toInt64", (dtype("bool"),), dtype("int64"))
register_function("toInt64", (dtype("str"),), dtype("int64"))
register_function("testCodeUnification2", (dtype("?x"),), dtype("?x"))
register_function("contains", (dtype("str"),dtype("str"),), dtype("bool"))
register_function("contains", (dtype("interval<?T>"),dtype("?T"),), dtype("bool"))
register_function("entropy", (dtype("str"),), dtype("float64"))
register_function("filtering_allele_frequency", (dtype("int32"),dtype("int32"),dtype("float64"),), dtype("float64"))
register_function("gqFromPL", (dtype("array<?N:int32>"),), dtype("int32"))
register_function("startswith", (dtype("str"),dtype("str"),), dtype("bool"))
register_function("ceil", (dtype("float32"),), dtype("float32"))
register_function("ceil", (dtype("float64"),), dtype("float64"))
register_function("json", (dtype("?T"),), dtype("str"))
register_function("strip", (dtype("str"),), dtype("str"))
register_function("firstMatchIn", (dtype("str"),dtype("str"),), dtype("array<str>"))
register_function("isEmpty", (dtype("interval<?T>"),), dtype("bool"))
register_function("~", (dtype("str"),dtype("str"),), dtype("bool"))
register_function("mkString", (dtype("set<str>"),dtype("str"),), dtype("str"))
register_function("mkString", (dtype("array<str>"),dtype("str"),), dtype("str"))
register_function("dosage", (dtype("array<?N:float64>"),), dtype("float64"))
register_function("upper", (dtype("str"),), dtype("str"))
register_function("overlaps", (dtype("interval<?T>"),dtype("interval<?T>"),), dtype("bool"))
register_function("downcode", (dtype("call"),dtype("int32"),), dtype("call"))
register_function("inXPar", (dtype("?T:locus"),), dtype("bool"))
register_function("format", (dtype("str"),dtype("?T:tuple"),), dtype("str"))
register_function("pnorm", (dtype("float64"),), dtype("float64"))
register_function("is_infinite", (dtype("float32"),), dtype("bool"))
register_function("is_infinite", (dtype("float64"),), dtype("bool"))
register_function("isHetRef", (dtype("call"),), dtype("bool"))
register_function("isMitochondrial", (dtype("?T:locus"),), dtype("bool"))
register_function("hamming", (dtype("str"),dtype("str"),), dtype("int32"))
register_function("end", (dtype("interval<?T>"),), dtype("?T"))
register_function("start", (dtype("interval<?T>"),), dtype("?T"))
register_function("inXNonPar", (dtype("?T:locus"),), dtype("bool"))
register_function("escapeString", (dtype("str"),), dtype("str"))
register_function("isHomRef", (dtype("call"),), dtype("bool"))
register_seeded_function("rand_norm", (dtype("float64"),dtype("float64"),), dtype("float64"))
register_function("chi_squared_test", (dtype("int32"),dtype("int32"),dtype("int32"),dtype("int32"),), dtype("struct{p_value: float64, odds_ratio: float64}"))
| [
"daniel.zidan.king@gmail.com"
] | daniel.zidan.king@gmail.com |
3dab6d251c6ac13c212ea60b449bf66fc68e4008 | 48d86947d5f3b5896c4a05cfcddcff01582a26ef | /amnesia/task/migrations/0002_auto_20170504_2027.py | 1792df7727f51e01362639cf36de2a20b7de1620 | [] | no_license | pratulyab/amnesia | 181874288c97fbf7e73d10c64e214c2a17574773 | 6b0b3428a27f98e0e2f6bb8aefdc8a4459e7b8cc | refs/heads/master | 2021-01-20T12:49:16.592335 | 2017-05-07T20:38:06 | 2017-05-07T20:38:06 | 90,409,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-04 20:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='task',
name='every',
field=models.CharField(choices=[('15', '15 minutes'), ('30', '30 minutes'), ('45', '45 minutes'), ('0', '60 minutes')], default='0', help_text='Repeat every', max_length=2),
),
migrations.AlterField(
model_name='task',
name='sleep_cycle',
field=models.CharField(choices=[('4-19', '8pm - 4am'), ('5-20', '9pm - 5am'), ('6-21', '10pm - 6am'), ('7-22', '11pm - 7am'), ('8-23', '12pm - 8am')], default='4-19', help_text='Assuming 8 hours sleep cycle', max_length=5),
),
]
| [
"pratulyabubna@outlook.com"
] | pratulyabubna@outlook.com |
dc7c9a762785e6e916f3fe35491b4ce3eac7898d | b5fc696d225d7310d607e33c3a4ac5aeec0c89fc | /kazmarket/asgi.py | fcf06c4bce04ccc17083e33667cd8eb83baac29a | [] | no_license | adriankokai/kazmarket | 88a081318cdbe60ce1639c00f0545d72fa474462 | f47cc7851d27ce7a714f7bc9c7bad88b68130584 | refs/heads/main | 2023-04-01T13:45:19.021873 | 2021-04-01T14:05:50 | 2021-04-01T14:05:50 | 345,583,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for kazmarket project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kazmarket.settings')
application = get_asgi_application()
| [
"adriankokai@gmail.com"
] | adriankokai@gmail.com |
c97b3fb4ecb0da287d3bc8bb69898f4820c8586f | dc2c8bb5ed55ed11f89f31db92f07cde3a83acaf | /Day-1/day-1.py | dd69c8324581e431f5c99fb9f6dfdd0f9368e999 | [] | no_license | Gulamwaris/ConsistentCoding | 4364711f89ff7b187966e3141d9397e551630bbc | 23abc845b3323b69c96853358ec30ebf62a67310 | refs/heads/master | 2020-09-28T20:07:14.545153 | 2019-12-11T11:38:52 | 2019-12-11T11:38:52 | 226,853,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from __future__ import print_function
if __name__ == '__main__':
n = int(raw_input())
i=1
while(i<n):
print(i) | [
"gwarsi70@gmail.com"
] | gwarsi70@gmail.com |
2b8b968c3656e1b5670f87141f8df68bd85cc7cb | 60933400231f805c16a755b56016441ecbf2e8b6 | /Part 1 - Data Preprocessing/categorical_data.py | 5cff20896d72a5fd2072f2f5fd9a09a6231d8f7b | [] | no_license | sayedcseku/machine-learning-a-z | 0d7ba3482044d92e8118e0891b83ab038daf1f5e | e279897e538615a5f663de6f7c0c98ee8ca88511 | refs/heads/master | 2022-11-22T14:53:27.969302 | 2020-07-17T13:28:41 | 2020-07-17T13:28:41 | 280,430,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | # Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Taking care of missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
# Encoding categorical data
# Encoding the Independent Variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
# Encoding the Dependent Variable
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
# spliting the dataset into trainig and test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state = 0)
# feature scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test) | [
"sayed4931@gmail.com"
] | sayed4931@gmail.com |
d0c7559b7165c2f244c313b57474e26053f26554 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/NationalVulnerabilityDatabaseFeed/Integrations/NationalVulnerabilityDatabaseFeed/NationalVulnerabilityDatabaseFeed.py | daa1e14306dcea7f4df316bec273e24e3104e8bc | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 12,285 | py | import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
from typing import Dict, Any, List
from datetime import datetime, timedelta
from time import sleep
import urllib3
# Disable insecure warnings
urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S:000 UTC-00:00" # ISO8601 format with UTC, default in XSOAR
DEFAULT_LIMIT = 100
# =========================================== Helper Functions ===========================================#
def parse_cve_data(item=None) -> dict:
fields = dict()
if item and isinstance(item, dict):
# Populate common fields
cve: dict = item.get('cve') # type: ignore
# Description
if "description" in cve:
description = cve.get('description', {}).get('description_data')
if "en" in [x.get('lang') for x in description]:
fields['cvedescription'] = description[([x.get('lang') for x in description]).index('en')].get('value')
# References
if "references" in cve:
references: List = cve.get('references', {}).get('reference_data')
fields['publications'] = [
{
"link": x.get('url'),
"title": x.get('name'),
"source": x.get('refsource')
} for x in references]
# Parse impact data
impact = item.get('impact')
if impact:
# All CVSS data
all_cvss = []
# Base Metric V2
if "impact" in impact:
impact_v2 = impact.get('impact')
if "baseMetricV2" in impact_v2:
base_metric_v2 = impact_v2.get('baseMetricV2')
if "cvssV2" in base_metric_v2:
cvss_v2 = base_metric_v2.get('cvssV2')
all_cvss.append({"cvssV2": cvss_v2})
# Base Metric V3
if "baseMetricV3" in impact:
base_metric = impact.get('baseMetricV3')
if "cvssV3" in base_metric:
cvss_v3 = base_metric.get('cvssV3')
all_cvss.append({"cvssV3": cvss_v3})
cvss_v3_data = []
for k, v in cvss_v3.items():
cvss_v3_data.append(
{
"metric": camel_case_to_underscore(k).replace("_", " ").title(),
"values": v
}
)
fields['cvss3'] = cvss_v3_data
fields['cvssvector'] = cvss_v3.get('vectorString')
fields['cvss'] = all_cvss
return fields
def extract_titles(data_item={}) -> list:
titles = []
for title in data_item.get('titles'):
titles.append(title.get('title'))
return titles
def extract_descriptions(data_item={}) -> list:
descriptions = []
for description in data_item.get('cve', {}).get('description', {}).get('description_data'):
descriptions.append(description.get('value'))
return descriptions
# ========================================== Generic Query ===============================================#
def test_module(client: BaseClient, params: Dict[str, Any]):
api_key = params.get('apiKey')
try:
params = {
"cveId": "CVE-2020-22120"
}
if api_key:
params['apiKey'] = api_key
res = client._http_request('GET', full_url='https://services.nvd.nist.gov/rest/json/cpes/1.0', params=params)
if "error" in res:
return_error((res.get('error')))
elif "resultsPerPage" in res:
return_results('ok')
except Exception as err:
raise DemistoException(err)
def fetch_indicators_command(client, params):
command = demisto.command()
api_key = params.get('apiKey')
get_type = params.get('type')
cpe_match_string = params.get('cpeMatchString')
cpe_keyword = params.get('keyword')
include_deprecated = params.get('deprecated')
cvss_v2_metrics = params.get('cvssV2Metrics')
cvss_v2_severity = params.get('cvssV2Severity')
cvss_v3_metrics = params.get('cvssV3Metrics')
cvss_v3_severity = params.get('cvssV3Severity')
history = int(params.get('history'))
exceeds_span = True
urls = {
"CPE": "/rest/json/cpes/1.0/",
"CVE": "/rest/json/cves/1.0/"
}
url = urls[get_type]
now = datetime.utcnow()
startIndex = 0
resultsPerPage = 2000
data_items = []
indicators: List[Dict] = []
last_run_data = demisto.getLastRun()
run_times: List[datetime] = []
run_limit = 9
# If there is no last run date, use the history specified in the params
if "lastRun" not in last_run_data or command == 'nvd-get-indicators':
last_run = (now - timedelta(days=history))
else:
last_run = dateparser.parse(last_run_data.get('lastRun')) # type: ignore
modStartDate = last_run
modEndDate = now
# API calls can only span 120 days, so we should loop if the history
# parameter is greater than this
while exceeds_span and modEndDate and modStartDate:
delta = (modEndDate - modStartDate).days
if delta > 120:
modEndDate = modStartDate + timedelta(days=120)
else:
exceeds_span = False
params = {
"modStartDate": modStartDate.strftime(DATE_FORMAT),
"modEndDate": modEndDate.strftime(DATE_FORMAT),
"startIndex": startIndex,
"resultsPerPage": resultsPerPage
}
if api_key:
params['apiKey'] = api_key
run_limit = 99
if get_type == "CPE":
params['addOns'] = "cves"
if include_deprecated:
params['includeDeprecated'] = include_deprecated
if get_type == "CVE":
if cvss_v2_metrics:
params['cvssV2Metrics'] = cvss_v2_metrics
if cvss_v2_severity:
params['cvssV2Severity'] = cvss_v2_severity
if cvss_v3_metrics:
params['cvssV3Metrics'] = cvss_v3_metrics
if cvss_v3_severity:
params['cvssV3Severity'] = cvss_v3_severity
if cpe_match_string:
params['cpeMatchString'] = cpe_match_string
if cpe_keyword:
params['keyword'] = cpe_keyword
total_results = 1
collection_count = 0
# Collect all the indicators together
while collection_count < total_results:
# Check to ensure no rate limits are hit
if len(run_times) == run_limit:
first_time = run_times[0]
last_time = run_times[(run_limit - 1)]
if (last_time - first_time).seconds <= 60:
demisto.info("Rate limit hit, sleeping for 3 seconds")
# We sleep 3 seconds to avoid hitting any rate limits
sleep(3)
del run_times[0]
run_times.append(datetime.utcnow())
res = client._http_request('GET', url, params=params, timeout=300)
# Check to see if there are any errors
if "error" in res:
return_error(res.get('error'))
total_results = res.get('totalResults', 0)
resultsPerPage = res.get('resultsPerPage', 0)
result = res.get('result')
if result:
if get_type == 'CPE':
data_items += result.get('cpes')
else:
data_items += result.get('CVE_Items')
params['startIndex'] += resultsPerPage
collection_count += resultsPerPage
modStartDate = modEndDate
modEndDate = now
# If this is nvd-get-indicators command:
if command == 'nvd-get-indicators':
# If they are CPEs
if get_type == 'CPE':
outputs = [
{
"cpe23Uri": x.get('cpe23Uri'),
"titles": ". ".join(extract_titles(data_item=x)),
"vulnerabilities": ", ".join(x.get('vulnerabilities'))
} for x in data_items
]
command_results = CommandResults(
outputs_prefix='CPE',
outputs_key_field='cpe23Uri',
outputs=data_items,
readable_output=tableToMarkdown("National Vulnerability Database CPEs:", outputs)
)
# If they are CVEs
elif get_type == 'CVE':
outputs = [
{
"id": x.get('cve').get('CVE_data_meta').get('ID'),
"description": ". ".join(extract_descriptions(data_item=x))
}
for x in data_items]
command_results = CommandResults(
outputs_prefix='CVE',
outputs_key_field='id',
outputs=data_items,
readable_output=tableToMarkdown("National Vulnerability Database CVEs:", outputs)
)
return_results(command_results)
# Else if this is fetch-indicators
elif command == 'fetch-indicators':
indicators = []
# If they are CPEs
if get_type == 'CPE' and data_items:
for item in data_items:
item['type'] = "CPE"
indicator = {
"value": item.get('cpe23Uri'),
"rawJSON": item
}
# This is reserved for future use
if "vulnerabilities" in item:
relationships = []
for vulnerability in item.get('vulnerabilities', []):
relationship = EntityRelationship(
name=EntityRelationship.Relationships.RELATED_TO,
entity_a=item.get('cpe23Uri'),
entity_a_family="Indicator",
entity_a_type="CPE",
entity_b=vulnerability,
entity_b_family="Indicator",
entity_b_type="CVE"
)
relationships.append(relationship.to_indicator())
indicator['relationships'] = relationships
indicators.append(indicator)
# If they are CVEs
elif get_type == 'CVE' and data_items:
for item in data_items:
item['type'] = "CVE"
fields: Dict = parse_cve_data(item)
indicators.append({
"value": item.get('cve', {}).get('CVE_data_meta', {}).get('ID'),
"type": FeedIndicatorType.CVE,
"fields": fields,
"rawJSON": item
})
# Create the indicators in a batch, 2000 at a time
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
# Set new integration context
demisto.setLastRun({"lastRun": now.isoformat()})
# =========================================== Built-In Queries ===========================================#
''' MAIN FUNCTION '''
# COMMAND CONSTANTS
commands = {
'test-module': test_module,
'fetch-indicators': fetch_indicators_command,
'nvd-get-indicators': fetch_indicators_command
}
def main() -> None:
params = demisto.params()
base_url = "https://services.nvd.nist.gov"
verify_cert = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
client = BaseClient(
base_url=base_url,
verify=verify_cert,
proxy=proxy,
)
commands[command](client, params)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError: {str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| [
"noreply@github.com"
] | demisto.noreply@github.com |
e57e853474644c44099208c5e7e28f78db7b9b5f | ed33455c6fb61d628a77f7d5fb571129a62bec09 | /app/models/product.py | fb7616b716e7b096760dda998131b1b955019db2 | [] | no_license | ardhi12/fastapi-restapi-mongodb | 58b73928b651a6bade3f6127a5a3b630301fb7d5 | 70537919aa992aab9f665d49c8a51c151dfbfdef | refs/heads/master | 2023-03-06T20:30:47.972412 | 2021-02-20T12:23:11 | 2021-02-20T12:23:11 | 340,064,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | from mongoengine import *
# mongoengine menggunakan model ODM (Object Document Mapper) mirip seperti ORM pada RDBMS
# Class ini akan digunakan mongoengine untuk membuat schema database
# Model ini juga terdapat validation input, jadi tidak perlu membuat model pydantic lagi
class Products(Document):
name = StringField(max_length=50, required=True)
| [
"ardhi@jojonomic.com"
] | ardhi@jojonomic.com |
a49dec791677cc3c5ea73509c5321469917a7a17 | f4d7222843cc7a35778b9c45e739f8a616023204 | /testcase/jfqyl_tp/test_jfqyl_6_periods_tp.py | d060ab308325398c02644daf8b9f27b2da4e49e8 | [] | no_license | sjkcdpc/CloudLoan | 03e6748a5a1faac52432d3afb2931c4b95fe3c52 | 80e35396ca0b342868f9d751977cab4bdaa45c72 | refs/heads/master | 2023-02-27T00:28:24.961777 | 2021-02-05T01:59:21 | 2021-02-05T01:59:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,174 | py | # -*- coding: UTF-8 -*-
"""
@auth:卜祥杰
@date:2020-06-23 14:51:00
@describe: 即分期医疗3期
"""
import unittest
import os
import json
import sys
import time
from common.common_func import Common
from log.logger import Logger
from common.open_excel import excel_table_byname
from config.configer import Config
from common.get_sql_data import GetSqlData
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
logger = Logger(logger="test_jfqyl_6_periods_tp").getlog()
class Jfqyl6Tp(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = 'qa'
cls.r = Common.conn_redis(environment=cls.env)
cls.file = Config().get_item('File', 'jfq_case_file')
@classmethod
def tearDownClass(cls):
pass
def test_100_apply(self):
"""进件"""
data = excel_table_byname(self.file, 'apply')
Common.p2p_get_userinfo('jfqyl_6_periods', self.env)
self.r.mset(
{
"jfqyl_6_periods_sourceUserId": Common.get_random('userid'),
"jfqyl_6_periods_transactionId": Common.get_random('transactionId'),
"jfqyl_6_periods_phone": Common.get_random('phone'),
"jfqyl_6_periods_sourceProjectId": Common.get_random('sourceProjectId'),
}
)
param = json.loads(data[0]['param'])
param.update(
{
"sourceProjectId": self.r.get('jfqyl_6_periods_sourceProjectId'),
"sourceUserId": self.r.get('jfqyl_6_periods_sourceUserId'),
"transactionId": self.r.get('jfqyl_6_periods_transactionId')
}
)
param['applyInfo'].update(
{
"applyTime": Common.get_time("-"),
"applyAmount": 33333.33,
"applyTerm": 6,
"productCode": "FQ_JK_JFQYL"
}
)
param['loanInfo'].update(
{
"loanAmount": 33333.33,
"loanTerm": 6,
"assetInterestRate": 0.153,
"userInterestRate": 0.153
}
)
param['personalInfo'].update(
{
"cardNum": self.r.get('jfqyl_6_periods_cardNum'),
"custName": self.r.get('jfqyl_6_periods_custName'),
"phone": self.r.get('jfqyl_6_periods_phone')
}
)
param['applyInfo'].update({"applyTime": Common.get_time("-")})
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.r.set('jfqyl_6_periods_projectId', rep['content']['projectId'])
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
def test_101_sign_credit(self):
"""上传授信协议"""
data = excel_table_byname(self.file, 'contract_sign')
param = Common.get_json_data('data', 'jfq_sign_credit.json')
param.update(
{
"serviceSn": Common.get_random('serviceSn'),
"sourceUserId": self.r.get('jfqyl_6_periods_sourceUserId'),
"contractType": 5,
"sourceContractId": Common.get_random('userid'),
"transactionId": self.r.get('jfqyl_6_periods_transactionId'),
"associationId": self.r.get('jfqyl_6_periods_projectId')
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
def test_102_query_apply_result(self):
"""进件结果查询"""
GetSqlData.change_project_audit_status(
project_id=self.r.get('jfqyl_6_periods_projectId'),
environment=self.env
)
data = excel_table_byname(self.file, 'query_apply_result')
param = json.loads(data[0]['param'])
param.update(
{
"sourceProjectId": self.r.get('jfqyl_6_periods_sourceProjectId'),
"projectId": self.r.get('jfqyl_6_periods_projectId')
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
self.assertEqual(rep['content']['auditStatus'], 2)
# @unittest.skip("-")
def test_103_sign_borrow(self):
"""上传借款协议"""
data = excel_table_byname(self.file, 'contract_sign')
param = Common.get_json_data('data', 'jfq_sign_borrow.json')
param.update(
{
"serviceSn": Common.get_random('serviceSn'),
"sourceUserId": self.r.get('jfqyl_6_periods_sourceUserId'),
"sourceContractId": Common.get_random('userid'),
"transactionId": self.r.get('jfqyl_6_periods_transactionId'),
"associationId": self.r.get('jfqyl_6_periods_projectId')
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.r.set("jfqyl_6_periods_contractId", rep['content']['contractId'])
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
@unittest.skip("-")
def test_105_image_upload(self):
"""上传图片"""
data = excel_table_byname(self.file, 'image_upload')
param = json.loads(data[0]['param'])
param.update({"associationId": self.r.get('jfqyl_6_periods_projectId')})
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
def test_106_contact_query(self):
"""合同结果查询:获取签章后的借款协议"""
data = excel_table_byname(self.file, 'contract_query')
param = json.loads(data[0]['param'])
param.update(
{
"associationId": self.r.get('jfqyl_6_periods_projectId'),
"serviceSn": Common.get_random("serviceSn"),
"requestTime": Common.get_time("-"),
"sourceUserId": self.r.get("jfqyl_6_periods_sourceUserId"),
"contractId": self.r.get("jfqyl_6_periods_contractId")
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
def test_107_calculate(self):
"""还款计划试算(未放款):正常还款"""
data = excel_table_byname(self.file, 'calculate')
param = json.loads(data[0]['param'])
param.update(
{
"sourceUserId": self.r.get("jfqyl_6_periods_sourceUserId"),
"transactionId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"sourceProjectId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"projectId": self.r.get("jfqyl_6_periods_projectId")
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
def test_108_loan_pfa(self):
"""放款申请"""
data = excel_table_byname(self.file, 'loan_pfa')
param = json.loads(data[0]['param'])
self.r.set("jfqyl_6_periods_loan_serviceSn", Common.get_random("serviceSn"))
param.update(
{
"sourceProjectId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"projectId": self.r.get("jfqyl_6_periods_projectId"),
"sourceUserId": self.r.get("jfqyl_6_periods_sourceUserId"),
"serviceSn": self.r.get("jfqyl_6_periods_loan_serviceSn"),
"id": self.r.get('jfqyl_6_periods_cardNum'),
"accountName": self.r.get("jfqyl_6_periods_custName"),
"amount": 33333.33
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
# 修改支付表中的品钛返回code
time.sleep(8)
GetSqlData.change_pay_status(
environment=self.env,
project_id=self.r.get('jfqyl_6_periods_projectId')
)
def test_109_loan_query(self):
"""放款结果查询"""
GetSqlData.loan_set(environment=self.env, project_id=self.r.get('jfqyl_6_periods_projectId'))
data = excel_table_byname(self.file, 'pfa_query')
param = json.loads(data[0]['param'])
param.update({"serviceSn": self.r.get("jfqyl_6_periods_loan_serviceSn")})
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
self.assertEqual(rep['content']['projectLoanStatus'], 3)
def test_110_query_repayment_plan(self):
"""国投云贷还款计划查询"""
data = excel_table_byname(self.file, 'query_repayment_plan')
param = json.loads(data[0]['param'])
param.update(
{
"transactionId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"projectId": self.r.get("jfqyl_6_periods_projectId")
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.r.set("jfqyl_6_periods_repayment_plan", json.dumps(rep['content']['repaymentPlanList']))
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
# @unittest.skipUnless(sys.argv[4] == "early_settlement", "-")
# @unittest.skip("跳过")
def test_111_calculate(self):
"""还款计划试算:提前结清"""
data = excel_table_byname(self.file, 'calculate')
param = json.loads(data[0]['param'])
param.update(
{
"sourceUserId": self.r.get("jfqyl_6_periods_sourceUserId"),
"transactionId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"sourceProjectId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"projectId": self.r.get("jfqyl_6_periods_projectId"),
"businessType": 2
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.r.set(
"jfqyl_6_periods_early_settlement_repayment_plan",
json.dumps(rep['content']['repaymentPlanList'])
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
# @unittest.skipUnless(sys.argv[4] == "early_settlement", "-")
# @unittest.skip("跳过")
def test_112_calculate(self):
"""还款计划试算:退货"""
data = excel_table_byname(self.file, 'calculate')
param = json.loads(data[0]['param'])
param.update(
{
"sourceUserId": self.r.get("jfqyl_6_periods_sourceUserId"),
"transactionId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"sourceProjectId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"projectId": self.r.get("jfqyl_6_periods_projectId"),
"businessType": 3
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.r.set(
"jfqyl_6_periods_return_repayment_plan",
json.dumps(rep['content']['repaymentPlanList'])
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
# @unittest.skipUnless(sys.argv[4] == "repayment_offline", "-")
@unittest.skip("跳过")
def test_113_offline_repay_repayment(self):
"""线下还款流水推送:正常还一期"""
data = excel_table_byname(self.file, 'offline_repay')
param = json.loads(data[0]['param'])
period = 1
plan_pay_date = GetSqlData.get_repayment_detail(
project_id=self.r.get("jfqyl_6_periods_projectId"),
environment=self.env,
period=period,
repayment_plan_type=1
)
repayment_plan_list = self.r.get("jfqyl_6_periods_repayment_plan")
success_amount = 0.00
repayment_detail_list = []
for i in json.loads(repayment_plan_list):
if i['period'] == period:
plan_detail = {
"sourceRepaymentDetailId": Common.get_random("transactionId"),
"payAmount": i['restAmount'],
"planCategory": i['repaymentPlanType']
}
success_amount = round(success_amount + float(plan_detail.get("payAmount")), 2)
repayment_detail_list.append(plan_detail)
param.update(
{
"projectId": self.r.get("jfqyl_6_periods_projectId"),
"transactionId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"sourceProjectId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"sourceRepaymentId": Common.get_random("sourceProjectId"),
"planPayDate": str(plan_pay_date['plan_pay_date']),
"successAmount": success_amount,
"payTime": Common.get_time("-"),
"period": period
}
)
param['repaymentDetailList'] = repayment_detail_list
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
# @unittest.skipUnless(sys.argv[4] == "early_settlement_offline", "-")
@unittest.skip("跳过")
def test_114_offline_nrepay_early_settlement(self):
"""线下还款流水推送:提前全部结清"""
data = excel_table_byname(self.file, 'offline_repay')
param = json.loads(data[0]['param'])
plan_pay_date = GetSqlData.get_repayment_detail(
project_id=self.r.get("jfqyl_6_periods_projectId"),
environment=self.env,
period=1,
repayment_plan_type=1
)
repayment_plan_list = json.loads(self.r.get("jfqyl_6_periods_early_settlement_repayment_plan"))
success_amount = 0.00
repayment_detail_list = []
for i in repayment_plan_list:
plan_detail = {
"sourceRepaymentDetailId": Common.get_random("transactionId"),
"payAmount": i['amount'],
"planCategory": i['repaymentPlanType']
}
success_amount = round(success_amount + plan_detail.get("payAmount"), 2)
repayment_detail_list.append(plan_detail)
param.update(
{
"projectId": self.r.get("jfqyl_6_periods_projectId"),
"transactionId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"sourceProjectId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"sourceRepaymentId": Common.get_random("sourceProjectId"),
"planPayDate": str(plan_pay_date['plan_pay_date']),
"successAmount": success_amount,
"repayType": 2,
"period": repayment_plan_list[0]['period'],
"payTime": Common.get_time("-")
}
)
param['repaymentDetailList'] = repayment_detail_list
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
@unittest.skip("-")
def test_115_debt_transfer(self):
"""上传债转函"""
data = excel_table_byname(self.file, 'contract_sign')
param = Common.get_json_data('data', 'kkd_debt_transfer.json')
param.update(
{
"serviceSn": Common.get_random('serviceSn'),
"sourceUserId": self.r.get('jfqyl_6_periods_sourceUserId'),
"sourceContractId": Common.get_random('userid'),
"transactionId": self.r.get('jfqyl_6_periods_transactionId'),
"associationId": self.r.get('jfqyl_6_periods_projectId')
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.r.set("jfqyl_6_periods_contractId", rep['content']['contractId'])
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
@unittest.skip("-")
def test_116_return(self):
"""退货"""
data = excel_table_byname(self.file, 'offline_repay')
param = json.loads(data[0]['param'])
plan_pay_date = GetSqlData.get_repayment_detail(
project_id=self.r.get("jfqyl_6_periods_projectId"),
environment=self.env,
period=1,
repayment_plan_type=1
)
repayment_plan_list = json.loads(self.r.get("jfqyl_6_periods_return_repayment_plan"))
success_amount = 0.00
repayment_detail_list = []
for i in repayment_plan_list:
plan_detail = {
"sourceRepaymentDetailId": Common.get_random("transactionId"),
"payAmount": i['amount'],
"planCategory": i['repaymentPlanType']
}
success_amount = round(success_amount + plan_detail.get("payAmount"), 2)
repayment_detail_list.append(plan_detail)
param.update(
{
"projectId": self.r.get("jfqyl_6_periods_projectId"),
"transactionId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"sourceProjectId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"sourceRepaymentId": Common.get_random("sourceProjectId"),
"planPayDate": str(plan_pay_date['plan_pay_date']),
"successAmount": success_amount,
"repayType": 3,
"period": repayment_plan_list[0]['period'],
"payTime": Common.get_time("-")
}
)
param['repaymentDetailList'] = repayment_detail_list
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
product="cloudloan",
environment=self.env
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
@unittest.skip("-")
def test_117_capital_flow(self):
"""资金流水推送"""
data = excel_table_byname(self.file, 'cash_push')
param = json.loads(data[0]['param'])
success_amount = GetSqlData.get_repayment_amount(
project_id=self.r.get("jfqyl_6_periods_projectId"),
environment=self.env,
period=1
)
param.update(
{
"serviceSn": Common.get_random("serviceSn"),
"projectId": self.r.get("jfqyl_6_periods_projectId"),
"sourceProjectId": self.r.get("jfqyl_6_periods_sourceProjectId"),
"repaymentPlanId": Common.get_random("sourceProjectId"),
"sucessAmount": success_amount,
"sourceRepaymentId": Common.get_random("sourceProjectId"),
"tradeTime": Common.get_time(),
"finishTime": Common.get_time()
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=param,
environment=self.env,
product="gateway"
)
response_data = json.loads(Common.dencrypt_response(rep.text))
self.assertEqual(response_data['resultCode'], int(data[0]['resultCode']))
@unittest.skip("-")
def test_118_project_cancel(self):
"""进件取消"""
data = excel_table_byname(self.file, 'project_cancel')
param = json.loads(data[0]['param'])
param.update(
{
"projectId": self.r.get("jfqyl_6_periods_projectId"),
"sourceProjectId": self.r.get("jfqyl_6_periods_sourceProjectId")
}
)
if len(data[0]['headers']) == 0:
headers = None
else:
headers = json.loads(data[0]['headers'])
rep = Common.response(
faceaddr=data[0]['url'],
headers=headers,
data=json.dumps(param, ensure_ascii=False),
environment=self.env,
product="cloudloan"
)
self.assertEqual(rep['resultCode'], int(data[0]['resultCode']))
if __name__ == '__main__':
unittest.main()
| [
"bxj3416162@163.com"
] | bxj3416162@163.com |
7f611c84c8e4bd5fbd87fdfe6e15165d7275e17f | f7bdda5ce6026e30f8d2258499d066cec0a9bf6a | /detect_object.py | 03a40e8e061ac0b96d172f6b950f5f922d728bb4 | [] | no_license | AbhishekBose/yolo_docker | cfb3e3fe3dda8092771f614bdd9ce3ea022435e1 | 1377a73b38a95cfdde37ddc215a6f90ecbd407b0 | refs/heads/master | 2022-06-25T21:14:51.702133 | 2019-12-15T15:30:19 | 2019-12-15T15:30:19 | 228,206,788 | 10 | 10 | null | 2022-06-01T20:54:18 | 2019-12-15T15:29:53 | Python | UTF-8 | Python | false | false | 5,336 | py | #%%
from ctypes import *
import random
import argparse
import os
import traceback
import cv2
import functools
import numpy as np
import time
import sys
import imutils
#%%
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
libdarknet_path = os.path.join("/home/darknet/libdarknet.so")
lib = CDLL(libdarknet_path, RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def array_to_image(arr):
# need to return old values to avoid python freeing memory
arr = arr.transpose(2,0,1)
c, h, w = arr.shape[0:3]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w,h,c,data)
return im, arr
def netdetect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
im, image = array_to_image(image)
rgbgr_image(im)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
a = dets[j].prob[0:meta.classes]
if any(a):
ai = np.array(a).nonzero()[0]
for i in ai:
b = dets[j].bbox
res.append((meta.names[i], dets[j].prob[i], (max(b.x,0), max(b.y,0), max(b.w,0), max(b.h,0))))
res = sorted(res, key=lambda x: -x[1])
if isinstance(image,bytes): free_image(im)
free_detections(dets, num)
return res
#%%
def convert_yolo_normal(x,y,w,h,s1,s2):
b0=(2*x-w)/2
b1=w+b0
b2=(2*y-h)/2
b3=h+b2
return (int(b0),int(b1),int(b2),int(b3))
#%%
if __name__ == "__main__":
config_file = 'object_detector.config'
with open('config.json') as f:
data = json.load(f)
weights_file = data['weights_file']
cfg_file = data['cfg_file']
obj_data = data['obj_file']
image_name = sys.argv[1]
img = cv2.imread(image_name)
netdet = load_net(cfg_file,weights_file,0)
metadet = load_meta(obj_data)
obj_res = netdetect(netlp,metalp, veh,0.7)
for obj_res:
print('All detected objects are:: ')
for i in range(len(obj_res)):
print(obj_res[i][0])
| [
"abose550@gmail.com"
] | abose550@gmail.com |
129e029f51e6c808b38cbff8b551f38366f41e0c | 0726e305f3a7b57e8837ddcd334148ec68e9d2de | /portfolio/settings.py | 79f45073c3745b1bc73328e6685fcf08e83d4536 | [
"MIT"
] | permissive | Brian23-eng/portfolio-1 | 873448172532c0dd82de496911ad509022189db1 | 70ec48288fadf803a166f70728adfb1a61916a6d | refs/heads/master | 2022-09-24T21:26:13.670066 | 2020-01-23T05:36:20 | 2020-01-23T05:36:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,673 | py | """
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y#k96$zi!2uc9@tj#bvr0smlxx1v)2dcff447#%=kwn)$4(*1i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'portfolio.myportfolio',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'portfolio',
'USER': 'moringa',
'PASSWORD': 'p@$$w0rd',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# configuring the location for media
MEDIA_URL = '/project_images/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'project_images')
# Configure Django app for Heroku
django_heroku.settings(locals()) | [
"bryomajor@gmail.com"
] | bryomajor@gmail.com |
de47b786715392df67b7ad9f5edf179be7ef70d7 | 038c3cd2b1b480c32bb90dc20acb45352e8b86ce | /book/book/spiders/suning.py | 90a6c140b45e187680d3fe76c2d399573ec6427c | [
"MIT"
] | permissive | huzing2524/spider | 48b7721390185717d82ace74cfeef8d818503eee | df80c89912ad466afab404d54b60b0a479c4901c | refs/heads/master | 2020-04-08T00:11:46.665349 | 2019-06-19T09:59:42 | 2019-06-19T09:59:42 | 158,841,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,184 | py | # -*- coding: utf-8 -*-
import re
from copy import deepcopy
import scrapy
class SuningSpider(scrapy.Spider):
name = 'suning'
allowed_domains = ['suning.com']
start_urls = ['https://book.suning.com/']
def parse(self, response):
"""获取大分类的分组"""
# 大分类
div_list = response.xpath("//div[@class='menu-list']/div[@class='menu-item']")
# 子菜单分类
div_sub_list = response.xpath("//div[@class='menu-list']/div[@class='menu-sub']")
for div in div_list:
item = dict()
# 大分类的名字
item["b_cate"] = div.xpath(".//h3/a/text()").extract_first()
# 当前大分类的所有的中间分类的位置
current_sub_div = div_sub_list[div_list.index(div)]
# 获取中间分类的分组
p_list = current_sub_div.xpath(".//div[@class='submenu-left']/p[@class='submenu-item']")
for p in p_list:
# 中间分类的名字
item["m_cate"] = p.xpath("./a/text()").extract_first() # 小说
# 获取小分组的分组
li_list = p.xpath("./following-sibling::ul[1]/li")
for li in li_list:
# 小分类的名字
item["s_cate"] = li.xpath("./a/text()").extract_first()
item["s_href"] = li.xpath("./a/@href").extract_first()
# 请求图书的列表页
yield scrapy.Request(
item["s_href"],
callback=self.parse_book_list,
meta={"item": deepcopy(item)}
)
# 发送请求,获取列表页第一页后一部分的数据
next_part_url_temp = "https://list.suning.com/emall/showProductList.do?ci={}&pg=03&cp=0&il=0&iy=0&adNumber=0&n=1&ch=4&sesab=ABBAAA&id=IDENTIFYING&cc=010&paging=1&sub=0"
ci = item["s_href"].split("-")[1]
next_part_url = next_part_url_temp.format(ci)
yield scrapy.Request(
next_part_url,
callback=self.parse_book_list,
meta={"item": deepcopy(item)}
)
def parse_book_list(self, response):
"""处理图书列表页的内容"""
item = response.meta["item"]
# 获取图书列表页的分组
li_list = response.xpath("//li[contains(@class, 'product book')]")
for li in li_list:
# 书名
item["book_name"] = li.xpath(".//p[@class='sell-point']/a/text()").extract_first().strip()
# 书的url地址,不完整
item["book_href"] = li.xpath(".//p[@class='sell-point']/a/@href").extract_first()
# 书店名
item["book_store_name"] = li.xpath(".//p[contains(@class, 'seller oh no-more')]/a/text()").extract_first()
# 发送详情页的请求
yield response.follow(
item["book_href"],
callback=self.parse_book_detail,
meta={"item": deepcopy(item)}
)
def parse_book_detail(self, response):
"""处理图书详情页内容"""
item = response.meta["item"]
price_temp_url = "https://pas.suning.com/nspcsale_0_000000000{}_000000000{}_{}_10_010_0100101_226503_1000000_9017_10106____{}_{}.html"
p1 = response.url.split("/")[-1].split(".")[0]
p3 = response.url.split("/")[-2]
p4 = re.findall('"catenIds":"(.*?)",', response.body.decode())
if len(p4) > 0:
p4 = p4[0]
p5 = re.findall('"weight":"(.*?)",', response.body.decode())[0]
price_url = price_temp_url.format(p1, p1, p3, p4, p5)
yield scrapy.Request(
price_url,
callback=self.parse_book_price,
meta={"item": item}
)
def parse_book_price(self, response):
# 请求图书价格
item = response.meta["item"]
item["book_price"] = re.findall('"netPrice":"(.*?)"', response.body.decode())[0]
yield item
| [
"hu200901959"
] | hu200901959 |
953b813584667bf1bd1e285fa7bdb8d4caa9ffa5 | 9bb18febdfc911a88756afd9490526f8e7929bfc | /spacy/tests/regression/test_issue3951.py | f9912c494ec18b830006793bd039c8ffa525a4cc | [
"MIT"
] | permissive | ashaffer/spaCy | 3c28c7c8422fd4072bd3d472e796994d3269cf9f | ec0beccaf13eef263feec27e820136ad1e270bd4 | refs/heads/master | 2020-07-05T23:42:00.467234 | 2019-08-16T16:39:25 | 2019-08-16T16:39:25 | 202,819,156 | 1 | 0 | MIT | 2019-08-17T01:06:11 | 2019-08-17T01:06:10 | null | UTF-8 | Python | false | false | 585 | py | # coding: utf8
from __future__ import unicode_literals
import pytest
from spacy.matcher import Matcher
from spacy.tokens import Doc
@pytest.mark.xfail
def test_issue3951(en_vocab):
"""Test that combinations of optional rules are matched correctly."""
matcher = Matcher(en_vocab)
pattern = [
{"LOWER": "hello"},
{"LOWER": "this", "OP": "?"},
{"OP": "?"},
{"LOWER": "world"},
]
matcher.add("TEST", None, pattern)
doc = Doc(en_vocab, words=["Hello", "my", "new", "world"])
matches = matcher(doc)
assert len(matches) == 0
| [
"ines@ines.io"
] | ines@ines.io |
6e2a8974f9e92b41c65b5175d2747cf1b240a852 | b0c5388f19e66a76a3e925e3317af9d35fc5f499 | /album/migrations/0004_auto_20210129_2103.py | d30f8ad1cb924718a468c7194aa4170cbc500e16 | [] | no_license | agDevelop/album | 857529a8c3f0b950d072423d1963806eafdc3f68 | f61032a7ae8ccae54179126653c1cd5b7811af30 | refs/heads/master | 2023-02-24T04:06:13.320281 | 2021-01-31T17:35:18 | 2021-01-31T17:35:18 | 334,492,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # Generated by Django 3.1.5 on 2021-01-29 18:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('album', '0003_auto_20210129_2101'),
]
operations = [
migrations.AlterField(
model_name='album',
name='description',
field=models.TextField(max_length=300, null=True),
),
]
| [
"andrewnikolaevich.2014@gmail.com"
] | andrewnikolaevich.2014@gmail.com |
2b0cf68a644b5d9a8f2428da10b012b86f26607c | f6f51c9c402021d019900e4a2322f4dbbb998d3d | /project/BERI/sw/fpu_benchmarks/.svn/text-base/run_benchmark.py.svn-base | 13366921a039b45fe504dfc72f9019aac5e68df1 | [] | no_license | alpearce/mphil-acd | 011360c399cf0df5bb5bc9c9512ab690ab4691f8 | 93a48be3c44b69f191acc4947cdaf6f622b306f0 | refs/heads/master | 2016-09-05T10:21:19.058750 | 2015-01-13T14:40:24 | 2015-01-13T14:40:24 | 29,138,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | #!/usr/bin/env python
import sys
import array
import subprocess
def extract_name_and_data(line):
image_name = line.split(' ', 1)[0][len('START'):]
start = 'START' + image_name + ' '
return image_name, line.strip()[len(start):-len('END')]
def get_data(text_data):
result = []
for i in range(0, len(text_data), 2):
byte_hex = text_data[i:i+1]
result.append(int(byte_hex, 16))
return result
def main():
for line in [l.rstrip() for l in sys.stdin.readlines()]:
if line.startswith('START'):
image_name, image_data = extract_name_and_data(line)
with open(image_name.lower() + '_data', 'wb') as mandelbrot_file:
mandelbrot_file.write(image_data)
else:
print line
if __name__ == '__main__':
main()
| [
"ap819@cam.ac.uk"
] | ap819@cam.ac.uk | |
3de876ed7afa2ae78af950f99460a192208c868f | ef74dfafd7a1846f868c7e5a7d931b80d0469c87 | /CP_Prac1_T7.py | db3525490642e7d0a3e00095aec6b50fbc2476e5 | [] | no_license | FishyPower/CP_2018_Prac_1 | 8ba559f2226ed7a372d95a2fa4609371ebb5b56a | 78b6c001c8ef3464ded56fc2d0b5a089aa856af4 | refs/heads/master | 2021-05-11T14:57:57.758463 | 2018-01-16T17:04:32 | 2018-01-16T17:04:32 | 117,711,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | #Computing Practical 1 - Task 7
name = input("Enter name: ")
hours = float(input("Enter number of hours worked weekly: "))
hourly_pay = float(input("Enter hourly pay rate: "))
cpf_rate = float(input("Enter CPF contribution rate(%): "))
gross_pay = hours * hourly_pay * cpf_rate
cpf = gross_pay / 100
net_pay = gross_pay - cpf
print("Payroll statement for {0}".format(name))
print("Number of hours worked in week: {0}".format(hours))
print("Hourly pay rate: $ {0}".format(hourly_pay))
print("Gross pay = $ {0}".format(gross_pay))
print()
print("Net pay = $ {0}".format(net_pay)) | [
"chiang.yuhsuan@dhs.sg"
] | chiang.yuhsuan@dhs.sg |
adf2b3d60aa741b2c39725e4ed34e3c9129c74ae | 3b12495ad076a769b8d722467a4000a5b8b8b537 | /3d-supervoxel/test.py | 0040f3af686878e7f1ef7d709d2bd3fe2fced72a | [] | no_license | yueyang130/superpixel-segment-SLIC | 8c9b2f169ee36711941d0f8c3d9fa834418c3ccb | 118eff0d42a0a3b6e012a5eb0af8c5a6cb9da89f | refs/heads/master | 2022-12-01T23:52:59.558284 | 2020-08-22T16:04:14 | 2020-08-22T16:04:14 | 289,519,878 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 17 15:22:59 2019
@author: Administrator
"""
from os import path
from skimage import io, color
from tqdm import trange,tqdm
import numpy as np
import nibabel as nib
filename = path.abspath('.') + '\\' + 'mprage_3T_bet_dr.nii'
im = nib.load(filename)
data = im.get_fdata()
#归一化
gray = data/data.max()*255
rgb = color.grey2rgb(gray)
rgb = rgb/rgb.max()
lab = color.rgb2lab(rgb)
'''
for i in range(im.shape[0]):
for j in range(im.shape[1]):
for k in range(im.shape[2]):
if lab[i,j,k,1]>1:
print(i,j,k,1,lab[i,j,k,1],sep=' ')
if lab[i,j,k,2]>1:
print(i,j,k,2,lab[i,j,k,2],sep=' ')
''' | [
"17377416@buaa.edu.cn"
] | 17377416@buaa.edu.cn |
de5ec83749603d84453db9285e68d7d64b0f4369 | 8edd63a42469bf09fcad1c1070995ceda6e49646 | /env/lib/python2.7/site-packages/observations/r/phosphate.py | 6977f1f13f9f2bdb88341269c503dcf9123103b4 | [] | no_license | silky/bell-ppls | fa0b5418f40dab59de48b7220ff30caba5945b56 | 369e7602c810b694a70ac1e875017480c8910ac8 | refs/heads/master | 2020-04-06T08:40:28.588492 | 2018-11-01T06:51:33 | 2018-11-01T06:51:33 | 157,312,221 | 1 | 0 | null | 2018-11-13T03:04:18 | 2018-11-13T03:04:18 | null | UTF-8 | Python | false | false | 1,835 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def phosphate(path):
"""Phosphate Level Data
Plasma inorganic phosphate levels from 33 subjects.
A data frame with 33 observations on the following 9 variables.
`group`
a factor with levels `control` and `obese`.
`t0`
baseline phosphate level
,
`t0.5`
phosphate level after 1/2 an hour.
`t1`
phosphate level after one an hour.
`t1.5`
phosphate level after 1 1/2 hours.
`t2`
phosphate level after two hours.
`t3`
phosphate level after three hours.
`t4`
phosphate level after four hours.
`t5`
phosphate level after five hours.
C. S. Davis (2002), *Statistical Methods for the Analysis of Repeated
Measurements*, Springer, New York.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `phosphate.csv`.
Returns:
Tuple of np.ndarray `x_train` with 33 rows and 9 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'phosphate.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HSAUR/phosphate.csv'
maybe_download_and_extract(path, url,
save_file_name='phosphate.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| [
"akobeid.1@gmail.com"
] | akobeid.1@gmail.com |
c733d23c52b223a95360a77acfa2c1924f9cc877 | 9433ce01c6e2906c694b6f0956a4640e1872d4d2 | /src/main/python/wdbd/codepool/numpy/np_ployfit.py | 7f5cd81f4002e97e948a3b8be95f2be36410229a | [] | no_license | shwdbd/python_codepool | fcd7950fc1339994186461ae18c34cee238938ee | 92a4fb61d060f9a545499b6b7f99a4dc211d5009 | refs/heads/master | 2023-02-20T19:49:23.677824 | 2022-06-15T08:53:51 | 2022-06-15T08:53:51 | 209,431,254 | 0 | 1 | null | 2023-02-15T21:58:53 | 2019-09-19T00:56:03 | Python | UTF-8 | Python | false | false | 344 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : np_ployfit.py
@Time : 2019/11/06 14:24:55
@Author : Jeffrey Wang
@Version : 1.0
@Contact : shwangjj@163.com
@Desc : 拟合函数
1. fit_1z 一阶函数拟合
2. fit_2z 二阶函数拟合
? 如何评价拟合效果
'''
# here put the import lib
| [
"shwangjj@163.com"
] | shwangjj@163.com |
4f933506f4af1143b9acc28db9a09d38ec4467de | 4eab0329e5bf8b91e3305eaf9202de107cfe889b | /notebooks/data8_notebooks/lab04/tests/q2_3.py | bd02b5e36213b04e7d67c069acfcddca70a09933 | [
"MIT",
"BSD-3-Clause"
] | permissive | y1ngyang/jupyterhub_AWS_deployment | e638f489ad1b70962204f4355eb2a7c4fc97dc7d | 8172d63d94894774ec29f427ab5eeec637c923f4 | refs/heads/master | 2023-04-15T16:00:11.948307 | 2018-05-18T20:16:37 | 2018-05-18T20:16:37 | 134,009,971 | 0 | 0 | BSD-3-Clause | 2023-04-04T00:27:55 | 2018-05-18T22:33:34 | Jupyter Notebook | UTF-8 | Python | false | false | 375 | py | test = {
'name': '',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> disemvowel("Datascience rules!") == "Dtscnc rls!"
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"jamesfolberth@gmail.com"
] | jamesfolberth@gmail.com |
4a99cb7f8032bf0032b0b8f17537a649423c093f | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/Hadronizer/Hadronizer_SMS_Scans_2jets_Qcut115_TuneZ2star_8TeV_madgraph_tauola_cff.py | 7c2bade42601f1e62d4add7bf522b8596ebcd219 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,753 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
from GeneratorInterface.ExternalDecays.TauolaSettings_cff import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(True),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(1),
comEnergy = cms.double(8000.0),
ExternalDecays = cms.PSet(
Tauola = cms.untracked.PSet(
TauolaPolar,
TauolaDefaultInputCards
),
parameterSets = cms.vstring('Tauola')
),
UseExternalGenerators = cms.untracked.bool(True),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring('MSEL=0 ! User defined processes',
'PMAS(5,1)=4.8 ! b quark mass',
'PMAS(6,1)=172.5 ! t quark mass',
'MDCY(C1000022,1)=0 ! stable neutralino',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_nqmatch = cms.int32(5),
MEMAIN_etaclmax = cms.double(5),
MEMAIN_qcut = cms.double(115),
MEMAIN_minjets = cms.int32(0),
MEMAIN_maxjets = cms.int32(2),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(""),
outTree_flag = cms.int32(0)
)
)
| [
"sha1-91110b754141b4ea1416a095611be3c9b61e195f@cern.ch"
] | sha1-91110b754141b4ea1416a095611be3c9b61e195f@cern.ch |
46d07128a604bf41698eb408598644429535db5b | 77e8b7bc211624687eb61fdb071020642b2c0080 | /machinelearning/f16.py | 843de75f00d51c7741c5309feaa61e2e095f6c40 | [] | no_license | wherby/hackerrank | fab46ea208042ce8055c2755545896bf69f88895 | 84345f56690ea6b1d5db181b12d2a2669007456c | refs/heads/master | 2020-09-26T23:53:06.841052 | 2019-09-15T12:11:43 | 2019-09-15T12:11:43 | 67,225,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
import warnings
from matplotlib import style
from collections import Counter
style.use('fivethirtyeight')
dataset ={'k': [[1,2],[2,3],[3,1]], 'r': [[6,5], [7,7,],[8,6]]}
new_feature = [5,7]
# for i in dataset:
# for ii in dataset[i]:
# plt.scatter(ii[0],ii[1],s =100, color =i)
# [[plt.scatter(ii[0],ii[1],s =100, color =i) for ii in dataset[i]] for i in dataset]
# plt.scatter(new_feature[0], new_feature[1])
# plt.show()
def k_nearest_neighbors(data, predict, k =3):
if len(data) >=k:
warnings.warn('k is set to value less than totoal voting groups')
distances = []
for group in data:
for features in data[group]:
#euclidean_distance = np.sqrt(np.sum((np.array(features) - np.array(predict)) **2))
euclidean_distance = np.linalg.norm(np.array(features) - np.array(predict))
distances.append([euclidean_distance, group])
votes = [ i[1] for i in sorted(distances)[:k]]
#print(sorted(distances))
#print( Counter(votes).most_common(1))
vote_result = Counter(votes).most_common(1)[0][0]
return vote_result
result = k_nearest_neighbors(dataset, new_feature , k =3)
print result
[[plt.scatter(ii[0],ii[1],s =100, color =i) for ii in dataset[i]] for i in dataset]
plt.scatter(new_feature[0], new_feature[1],color = result)
plt.show() | [
"187225577@qq.com"
] | 187225577@qq.com |
69aba4f5261198e29730677cffbbb602b6d534ea | 8cd7efbd3194c8ea6d353178ab7821f080248c8d | /wallee/models/payment_app_refund_configuration_create.py | 6b6ac1c7466b6cecd9855fb5b12c43519e298d39 | [
"Apache-2.0"
] | permissive | wallee-payment/python-sdk | 11bd1bd824cab180f072c39704c431155fd5f73e | 68c4ae3a494865e988b7df03199d87610318fd51 | refs/heads/master | 2023-08-02T15:57:06.298222 | 2023-07-20T14:41:53 | 2023-07-20T14:41:53 | 199,405,576 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,202 | py | # coding: utf-8
import pprint
import six
from enum import Enum
class PaymentAppRefundConfigurationCreate:
swagger_types = {
'multiple_refunds_supported': 'bool',
'refund_endpoint': 'str',
'refund_timeout_in_minutes': 'int',
}
attribute_map = {
'multiple_refunds_supported': 'multipleRefundsSupported','refund_endpoint': 'refundEndpoint','refund_timeout_in_minutes': 'refundTimeoutInMinutes',
}
_multiple_refunds_supported = None
_refund_endpoint = None
_refund_timeout_in_minutes = None
def __init__(self, **kwargs):
self.discriminator = None
self.multiple_refunds_supported = kwargs.get('multiple_refunds_supported', None)
self.refund_endpoint = kwargs.get('refund_endpoint', None)
self.refund_timeout_in_minutes = kwargs.get('refund_timeout_in_minutes', None)
@property
def multiple_refunds_supported(self):
"""Gets the multiple_refunds_supported of this PaymentAppRefundConfigurationCreate.
This flag indicates whether the connector supports multiple refunds for a single transaction or not.
:return: The multiple_refunds_supported of this PaymentAppRefundConfigurationCreate.
:rtype: bool
"""
return self._multiple_refunds_supported
@multiple_refunds_supported.setter
def multiple_refunds_supported(self, multiple_refunds_supported):
"""Sets the multiple_refunds_supported of this PaymentAppRefundConfigurationCreate.
This flag indicates whether the connector supports multiple refunds for a single transaction or not.
:param multiple_refunds_supported: The multiple_refunds_supported of this PaymentAppRefundConfigurationCreate.
:type: bool
"""
self._multiple_refunds_supported = multiple_refunds_supported
@property
def refund_endpoint(self):
"""Gets the refund_endpoint of this PaymentAppRefundConfigurationCreate.
The refund endpoint is invoked to request the payment service provider to execute a refund.
:return: The refund_endpoint of this PaymentAppRefundConfigurationCreate.
:rtype: str
"""
return self._refund_endpoint
@refund_endpoint.setter
def refund_endpoint(self, refund_endpoint):
"""Sets the refund_endpoint of this PaymentAppRefundConfigurationCreate.
The refund endpoint is invoked to request the payment service provider to execute a refund.
:param refund_endpoint: The refund_endpoint of this PaymentAppRefundConfigurationCreate.
:type: str
"""
self._refund_endpoint = refund_endpoint
@property
def refund_timeout_in_minutes(self):
"""Gets the refund_timeout_in_minutes of this PaymentAppRefundConfigurationCreate.
When the refund is triggered we expect a feedback about the state of it. This timeout defines after how long we consider the refund as failed without receiving a final state update.
:return: The refund_timeout_in_minutes of this PaymentAppRefundConfigurationCreate.
:rtype: int
"""
return self._refund_timeout_in_minutes
@refund_timeout_in_minutes.setter
def refund_timeout_in_minutes(self, refund_timeout_in_minutes):
"""Sets the refund_timeout_in_minutes of this PaymentAppRefundConfigurationCreate.
When the refund is triggered we expect a feedback about the state of it. This timeout defines after how long we consider the refund as failed without receiving a final state update.
:param refund_timeout_in_minutes: The refund_timeout_in_minutes of this PaymentAppRefundConfigurationCreate.
:type: int
"""
self._refund_timeout_in_minutes = refund_timeout_in_minutes
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(PaymentAppRefundConfigurationCreate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PaymentAppRefundConfigurationCreate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [
"sebastian.bossert@wallee.com"
] | sebastian.bossert@wallee.com |
a5f32522b81ecf21b374bf7698b1b28df3ae94e6 | 58323430fd9d90bcb19dc87f7c89d1980d312796 | /Project 1/src/Job2/job2_mapper1.py | f2b454f53242799e5c03ea0b33b130e84fa9e8ca | [] | no_license | AndreaGiorgi/BigData_Projects | 108e56b4e6c1229c4b3314111e10480c8e8b26bc | 74968f74950db4d254676a19603b5ae15c29373a | refs/heads/main | 2023-08-18T20:08:59.460261 | 2021-09-27T19:28:06 | 2021-09-27T19:28:06 | 378,914,380 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/env python3
"""mapper.py"""
import sys
from datetime import datetime
for record in sys.stdin:
line = record.strip()
elements = line.split(",")
if (len(elements) == 8):
try:
ticker, open_value, close_value, _, low, high, volume, date = elements
if ticker and open_value and close_value and low and high and volume and date is not None:
date1 = datetime.strptime(date, "%Y-%m-%d")
if date1.year > 2008 and date1.year < 2019:
print('%s\t%f\t%f\t%f\t%f\t%i\t%s' %(str(ticker), float(open_value), float(close_value), float(low), float(high), int(volume), str(date)))
except ValueError:
continue | [
"andrea_giorgi@outlook.it"
] | andrea_giorgi@outlook.it |
290b5944fc385a0618b69f147d5ad0c40b99e7ad | e3a7fba0eb4150fd4c81b6382e789da95ede50dd | /cparse/util.py | c177df4292a70e98a85466ef2ecd7e68d9c169b9 | [
"MIT"
] | permissive | luciancooper/cparse | c3cfab002203d872f413bbca62d6d8dbbfde278d | fd6c5733c821e38f0be46ed930d763107ad8deea | refs/heads/master | 2020-04-17T06:56:32.173104 | 2019-03-25T01:09:47 | 2019-03-25T01:09:47 | 166,346,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,021 | py | import pydecorator
from datetime import datetime
import urllib.request
from contextlib import closing
import re
import sys
import os
import inspect
def getkey(d,key,default=None):
"""gets key from dict (d), if key does not exist, return default"""
if key in d:
return d[key]
else:
return default
@pydecorator.list
def extract(index,collection):
for x in index:
yield collection[x]
def is_iterable(a):
if type(a)==str:
return False
try:
iter(a)
return True
except TypeError:
return False
# ============================================ Reduce ============================================ #
def iter_reduce(iterable,init=None):
it = iter(iterable)
try:
v0 = next(it) if init is None else init
except StopIteration:
return
for v1 in it:
yield v0,v1
v0 = v1
def reduce(fn,iterable,init=None):
it = iter(iterable)
try:
value = next(it) if init is None else init
except StopIteration:
return None
for e in it:
value = fn(value,e)
return value
# ============================================ Sort ============================================ #
def mergesort(vector,cmp,unique=False):
def merger(a,b):
i,j,x,y = 0,0,len(a),len(b)
while i<x and j<y:
z = cmp(a[i],b[j])
if z<0:
yield a[i]
i=i+1
elif z>0:
yield b[j]
j=j+1
else:
yield a[i]
if not unique:
yield b[j]
i,j=i+1,j+1
while i<x:
yield a[i]
i=i+1
while j<y:
yield b[j]
j=j+1
def sorter(a):
if len(a)<=1:return a
m = len(a)//2
return [*merger(sorter(a[:m]),sorter(a[m:]))]
if inspect.isgenerator(vector):
vector = list(vector)
return sorter(vector)
# ============================================ time ============================================ #
def timestamp(ts):
return datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
# TODO - timezones
# ============================================ urls ============================================ #
def is_url(path):
return re.match(r'https?:\/\/',path)
def split_url(url):
prefix = re.match(r'https?:\/\/',url)
pieces = url[prefix.end(0):].split('/')
domain = prefix.group(0)+pieces[0]
return (domain,)+tuple(pieces[1:])
# ============================================ files ============================================ #
def read_file(file):
if is_url(file):
# Download File
with closing(urllib.request.urlopen(file)) as response:
return response.read().decode('utf-8')
return
with open(file,'r') as f:
return f.read()
# ============================================ strings ============================================ #
def str_col(items,align='>'):
# (> : right) (< : left) (^ : center)
s = [str(i) for i in items]
mx = max(len(x) for x in s)
a = '{:%s%i}'%(align,mx)
return [a.format(x) for x in s]
def str_table(data,header=None):
if header is not None:
data = [[h]+c for h,c in zip(header,data)]
cols = [[str(x) for x in c] for c in data]
spans = [max(len(x) for x in c) for c in cols]
cols = [[x.rjust(w,' ') for x in c] for w,c in zip(spans,cols)]
divider = '+-%s-+'%'-+-'.join(x*'-' for x in spans)
rows = ['| %s |'%' | '.join(x) for x in zip(*cols)]
return '\n'.join([divider]+[a for b in [[r]+[divider] for r in rows] for a in b])
# ============================================ cli ============================================ #
# :---------:------:------:------------:----------:
# | Color | Text | BG | BrightText | BrightBG |
# :---------:------:------:------------:----------:
# | Black | 30 | 40 | 30;1 | 40;1 |
# | Red | 31 | 41 | 31;1 | 41;1 |
# | Green | 32 | 42 | 32;1 | 42;1 |
# | Yellow | 33 | 43 | 33;1 | 43;1 |
# | Blue | 34 | 44 | 34;1 | 44;1 |
# | Magenta | 35 | 45 | 35;1 | 45;1 |
# | Cyan | 36 | 46 | 36;1 | 46;1 |
# | White | 37 | 47 | 37;1 | 47;1 |
# :---------:------:------:------------:----------:
# cli color to apply to specified code files
ftype_cli = {
'js':'38;5;11',
'html':'38;5;208',
'css':'38;5;26',
'py':'38;5;226',
'rb':'38;5;160',
'json':'38;5;28',
'xml':'38;5;28',
'php':'38;5;21',
'r':'38;5;21',
'ipynb':'38;5;172',
'c':'38;5;32',
'cc':'38;5;32',
'cpp':'38;5;32',
'cs':'38;5;32',
'cxx':'38;5;32',
'java':'38;5;215'
}
def cli_color(text,*colors):
return "{}{}\x1b[0m".format("".join("\x1b[{}m".format(c) for c in colors),text)
def cli_warning(message):
print("\x1b[31mWarning: {}\x1b[0m".format(message),file=sys.stderr)
| [
"cooperlucian@gmail.com"
] | cooperlucian@gmail.com |
7fe5c3ec26d8899afa3d50a451db9795d5a2b81e | c34312ca0d4a1219ac5bdb56f89dcb99d43dc97a | /268MissingNumber.py | d7ea3dd0de987d73c310146280460e97747eb64a | [] | no_license | chao813/Leetcode-Practice | ed1ef4f029e57efe1f6bc665d5df36a1a046f706 | d4d923ece10add474ba6cfd4a8080c2a85c9fc1c | refs/heads/master | 2021-01-01T15:22:28.041299 | 2018-12-27T06:59:46 | 2018-12-27T06:59:46 | 97,605,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """Given an array containing n distinct numbers taken from 0, 1, 2, ..., n, find the one that is missing from the array.
Example 1:
Input: [3,0,1]
Output: 2
Example 2:
Input: [9,6,4,2,3,5,7,0,1]
Output: 8"""
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
s = sum(nums)
m = len(nums)
return int(((m*(m+1))/2) - s) | [
"chao9@purdue.edu"
] | chao9@purdue.edu |
1cb9d669a9f6d421eb4de425cd324a56d283896c | 23e7b0302114743b4c74587ef069b707c173c635 | /dsc_datatool/output/influxdb.py | 109b4d58f311585d601a5065d1bc9cab7c130d65 | [
"BSD-3-Clause"
] | permissive | Zombie-Technology/dsc-datatool | a1099a072b0d7f68baec9744012c2153e59d2fb6 | cfe660eb9d5aa306bdba31b2ec3708ace7661840 | refs/heads/master | 2022-12-30T12:40:50.053081 | 2020-10-21T12:23:23 | 2020-10-21T12:23:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,615 | py | """dsc_datatool.output.influxdb
See `man dsc-datatool-output influxdb`.
Part of dsc_datatool.
:copyright: 2020 OARC, Inc.
"""
import re
import sys
import atexit
from dsc_datatool import Output,args
_re = re.compile(r'([,=\s])')
def _key(key):
return re.sub(_re, r'\\\1', key)
def _val(val):
ret = re.sub(_re, r'\\\1', val)
if ret == '':
return '""'
return ret
def _process(tags, timestamp, dimension, fh):
if dimension.dimensions is None:
return
if len(dimension.dimensions) > 0:
if not (dimension.name == 'All' and dimension.value == 'ALL'):
tags += ',%s=%s' % (_key(dimension.name.lower()), _val(dimension.value))
for d2 in dimension.dimensions:
_process(tags, timestamp, d2, fh)
return
if dimension.values is None:
return
if len(dimension.values) > 0:
tags += ',%s=' % _key(dimension.name.lower())
for k, v in dimension.values.items():
print('%s%s value=%s %s' % (tags, _val(k), v, timestamp), file=fh)
class InfluxDB(Output):
start_timestamp = True
fh = None
def __init__(self, opts):
Output.__init__(self, opts)
timestamp = opts.get('timestamp', 'start')
if timestamp == 'start':
pass
elif timestamp == 'stop':
self.timestamp = False
else:
raise Exception('timestamp option invalid')
file = opts.get('file', None)
append = opts.get('append', False)
if file:
if append:
self.fh = open(file, 'a')
else:
self.fh = open(file, 'w')
atexit.register(self.close)
else:
self.fh = sys.stdout
if opts.get('dml', False):
print('# DML', file=self.fh)
database = opts.get('database', None)
if database:
print('# CONTEXT-DATABASE: %s' % database, file=self.fh)
def close(self):
if self.fh:
self.fh.close()
self.fh = None
def process(self, datasets):
for dataset in datasets:
tags = '%s,server=%s,node=%s' % (_key(dataset.name.lower()), args.server, args.node)
if self.start_timestamp:
timestamp = dataset.start_time * 1000000000
else:
timestamp = dataset.end_time * 1000000000
for d in dataset.dimensions:
_process(tags, timestamp, d, self.fh)
if sys.version_info[0] == 3 and sys.version_info[1] == 5: # pragma: no cover
Output.__init_subclass__(InfluxDB)
| [
"lundstrom.jerry@gmail.com"
] | lundstrom.jerry@gmail.com |
75eb45297fe8e25dc8fb53797ae723cb5b776560 | 42de202043de8ff9aa99c2f4f8693b63b4c2e169 | /forms/views.py | 7dede54e5defeb09fb1fab111498496cb7450317 | [] | no_license | danielmwai/marine | 8aa4f180d7dda93e5e94363012911d78370add47 | 22f390ebbcb8484dcc9ec8ae54d75493a0bc3165 | refs/heads/master | 2021-05-16T13:41:55.731219 | 2017-10-01T11:02:36 | 2017-10-01T11:02:36 | 105,435,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,938 | py | """Main forms views."""
from urllib import urlencode
from decimal import Decimal
from django.utils import timezone
from django.shortcuts import render
from django.contrib.sites.models import Site
from auth.forms import LoginForm
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, JsonResponse
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from .models import (
RegPerson, HSCodes, RegCargo, HSCategory,
CustomerOrders, CustomerInvoice, CustomerGoods, RegCompany,
BondsApplication)
from auth.views import profile
from .functions import (
auto_suggest, gen_invoice_number, make_payment, get_persons,
hs_lists, get_user, get_registrations, update_person,
update_company, check_account, get_premium_data, new_person,
new_company, get_bond_data, get_xrate, invoice_data,
create_notes, get_taxes)
from .forms import CargoForm, BondForm
from main.models import RegPorts
from main.views import dashboard
from auth.forms import RegisterForm
from auth.models import AppUser
from auth.functions import create_account, send_verification
from main.functions import get_categories, get_rate, get_duty
def public_search(request):
"""Method for public search."""
try:
if request.method == 'GET':
print 'public get'
data = auto_suggest(request)
return JsonResponse(data, safe=False)
except Exception, e:
raise e
@login_required(login_url='/login/')
def search(request):
"""Some default page for Bad request error page."""
try:
if request.method == 'POST':
vals = ''
filter_type = request.POST.get('filter')
if filter_type == 'ports':
country_id = request.POST.get('country_id')
ports = RegPorts.objects.filter(country_id=country_id)
for port in ports:
port_id = port.id
port_name = port.port_name
vals += '<option value="%s">%s</option>' % (
port_id, port_name)
return HttpResponse(vals)
elif filter_type == 'category':
section_id = request.POST.get('section_id')
items = HSCategory.objects.filter(section_id=section_id)
vals = '<option value="">Please Select Category</option>'
for item in items:
category_id = item.id
category_name = item.category_name
vals += '<option value="%s">%s</option>' % (
category_id, category_name)
return HttpResponse(vals)
elif filter_type == 'goods':
category_id = request.POST.get('category_id')
items = HSCodes.objects.filter(category_id=category_id)
vals = hs_lists(items)
return HttpResponse(vals)
elif filter_type == 'currency':
currency_id = request.POST.get('currency_id')
rate = get_xrate(currency_id)
return JsonResponse(rate, safe=False)
elif request.method == 'GET':
print 'Get customer'
data = auto_suggest(request)
return JsonResponse(data, safe=False)
form = LoginForm()
return render(request, 'forms/forms.html', {'form': form})
except Exception, e:
print 'error - %s' % (str(e))
raise e
@login_required(login_url='/login/')
def register(request):
"""Some default page for Bad request error page."""
try:
account_id = request.user.id
if request.method == 'POST':
form = LoginForm(data=request.POST)
first_name = request.POST.get('first_name')
middle_name = request.POST.get('middle_name')
surname = request.POST.get('surname')
pin_number = request.POST.get('pin_number')
idpass_number = request.POST.get('id_number')
mobile_number = request.POST.get('mobile_number')
postal_address = request.POST.get('postal_address')
tax_status = request.POST.get('tax_status')
person_new = RegPerson(first_name=first_name,
middle_name=middle_name,
surname=surname,
pin_number=pin_number,
idpass_number=idpass_number,
mobile_number=mobile_number,
postal_address=postal_address,
tax_status=tax_status,
account_id=account_id,
is_void=False)
person_new.save()
msg = 'Person details saved successfully'
messages.info(request, msg)
return HttpResponseRedirect(reverse(dashboard))
result = RegPerson.objects.filter(
account_id=account_id, is_void=False)
if result:
return HttpResponseRedirect(reverse(profile))
else:
result = RegCompany.objects.filter(
account_id=account_id, is_void=False)
if result:
return HttpResponseRedirect(reverse(profile))
form = LoginForm()
return render(request, 'forms/register.html', {'form': form})
except Exception, e:
raise e
@login_required(login_url='/login/')
def cargo(request):
"""Some default page for Bad request error page."""
try:
created_by = request.user.id
categories = get_categories()
if request.method == 'POST':
# Save order details first
transact_type = request.POST.get('transact_type')
person_id = request.POST.get('person_id')
origin_port_id = request.POST.get('port_id')
country_id = request.POST.get('country_id')
dest_port_id = request.POST.get('dest_port')
inland_warehouse = request.POST.get('warehouse')
voyage_start = request.POST.get('voyage_start')
voyage_end = request.POST.get('voyage_end')
transport_mode = request.POST.get('transport_mode')
logistics_type = request.POST.get('logistics_id')
conso_id = request.POST.get('conso_id')
handler = request.POST.get('ins_handler')
agent_id = request.POST.get('agent_id')
broker_id = request.POST.get('broker_id')
bank_id = request.POST.get('bank_id')
pay_bank = request.POST.get('pay_bank')
vessel = request.POST.get('vessel_name')
total_cost = float(request.POST.get('total_cost'))
payable_amount = float(request.POST.get('total_amount'))
total_freight = float(request.POST.get('total_freight'))
total_tax = float(request.POST.get('total_tax'))
total_premium = float(request.POST.get('total_premiums'))
sum_assured = float(request.POST.get('sum_assured'))
insurance = request.POST.get('insurance_co')
payment_mode = request.POST.get('payment_mode')
# Extras
tax_ws = request.POST.get('tax_ws')
tax_ts = request.POST.get('tax_ts')
tax_se = request.POST.get('tax_se')
tax_ov = request.POST.get('tax_ov')
tax_sl = request.POST.get('tax_sl')
war_strike = int(tax_ws) if tax_ws else 0
trans_ship = int(tax_ts) if tax_ts else 0
storage_ext = int(tax_se) if tax_se else 0
over_age = int(tax_ov) if tax_ov else 0
short_land = int(tax_sl) if tax_sl else 0
vessel_id = int(vessel) if vessel else None
time_now = timezone.now()
if int(transact_type) == 2:
origin_port_id = request.POST.get('ex_port_id')
country_id = request.POST.get('ex_country_id')
dest_port_id = request.POST.get('ex_dest_port')
inland_warehouse = request.POST.get('ex_warehouse')
if not agent_id:
agent_id = None
if not broker_id:
broker_id = None
if not conso_id:
conso_id = None
if not bank_id:
bank_id = pay_bank
else:
bank_id = None
new_order = CustomerOrders(
transaction_type=transact_type,
person_id=person_id,
origin_port_id=origin_port_id,
country_id=country_id,
dest_port_id=dest_port_id,
inland_warehouse=inland_warehouse,
voyage_start=voyage_start,
voyage_end=voyage_end,
transport_mode=transport_mode,
logistics_type=logistics_type,
consolidator_id=conso_id,
insurance_id=insurance,
handler=handler,
agent_id=agent_id,
broker_id=broker_id,
is_paid=False,
total_tax=total_tax,
total_cost=total_cost,
total_premium=total_premium,
sum_assured=sum_assured,
war_strike=war_strike,
storage_ext=storage_ext,
trans_ship=trans_ship,
over_age=over_age,
short_land=short_land,
bank_id=bank_id,
vessel_id=vessel_id,
pay_method=payment_mode,
total_freight=total_freight,
created_by_id=created_by,
created_at=time_now)
new_order.save()
order_id = new_order.pk
invoice_no = gen_invoice_number(insurance)
trans_id = int(transport_mode)
total_premiums = Decimal(total_premium)
stamp_duty = get_duty(trans_id, sum_assured)
phf = total_premiums * Decimal(0.0025)
itl = total_premiums * Decimal(0.0020)
war_strike = 0.00000
storage_ext = 0.00000
trans_ship = 0.00000
over_age = 0.00000
short_land = 0.00000
new_invoice = CustomerInvoice(
invoice_no=invoice_no,
policy_no=None,
payable_amount=payable_amount,
sum_assured=sum_assured,
total_premium=total_premium,
stamp_duty=stamp_duty,
pcf_amount=phf,
itl_amount=itl,
war_strike=war_strike,
storage_ext=storage_ext,
trans_ship=trans_ship,
over_age=over_age,
short_landing=short_land,
discount=0.0,
paid_amount=0.000,
pay_status=False,
insurance_id=insurance,
person_id=person_id,
orders_id=order_id,
created_by_id=created_by,
created_at=time_now)
new_invoice.save()
inv_id = new_invoice.pk
# Save this goods to customer goods and delete tmp
tmps = RegCargo.objects.select_related().filter(
account_id=created_by, person_id=person_id)
for tmp in tmps:
sum_assured = tmp.quantity * tmp.price
ins_co = tmp.insurance_id
a_type = tmp.ship_type
cat_id = tmp.category_id
ctype = tmp.cover_type
ptype = tmp.package_type
freight = tmp.freight_cost
#
covers = {1: 'a', 2: 'b', 3: 'c'}
cid = covers[ctype]
amount = sum_assured + freight
tentry = tmp.goods
taxes = get_taxes(amount, tentry, 2)
rail_dev = taxes['rail_dev']
sugar_dev = taxes['sugar_dev']
excise_duty = taxes['excise_duty']
import_duty = taxes['import_duty']
cargo_rate = get_rate(ins_co, cat_id, a_type, ptype, cid)
new_good = CustomerGoods(
amount=tmp.quantity,
price=tmp.price,
freight_cost=tmp.freight_cost,
package_type=tmp.package_type,
cargo_rate=cargo_rate,
import_duty=import_duty,
excise_duty=excise_duty,
sugardev_levy=sugar_dev,
raildev_levy=rail_dev,
goods_id=tmp.goods_id,
orders_id=order_id,
person_id=tmp.person_id,
created_by_id=created_by,
created_at=time_now)
new_good.save()
tmps.delete()
p_mode = int(payment_mode)
make_payment(inv_id, payable_amount, int(payment_mode))
msg = 'Order saved successfully and Invoice created.'
messages.info(request, msg)
# return HttpResponseRedirect(reverse(dashboard))
domain = Site.objects.get_current()
ret_url = 'https://%s/payment/' % (domain)
if p_mode == 2:
pay_amount = int(payable_amount * 100)
dparts = 'kenswitchpaymentsurface/Payment.aspx?'
url = 'https://apps.kenswitch.com:8066/' + dparts
params = {'id': 'bWFyaW5l', 'txnid': invoice_no,
'amount': pay_amount, 'rec_bank': 34,
'rec_acc': '01001030021701', 'return_url': ret_url}
dest = urlencode(params)
full_address = url + dest
return HttpResponseRedirect(full_address)
else:
return HttpResponseRedirect(reverse(dashboard))
form = CargoForm()
rform = RegisterForm()
person, company = get_persons(created_by)
return render(request, 'forms/cargo.html',
{'form': form, 'person': person, 'rform': rform,
'categories': categories})
except Exception, e:
print 'error on cargo - %s' % (str(e))
raise e
def premiums(request):
"""Some default page for Bad request error page."""
try:
covers = {1: 'a', 2: 'b', 3: 'c'}
if request.method == 'POST':
account_id = request.user.id
item_id = request.POST.get('item_id')
person_id = int(request.POST.get('person_id'))
if item_id:
tmps = RegCargo.objects.select_related().filter(
account_id=account_id, person_id=person_id,
pk=item_id)
tmps.delete()
data = get_premium_data(request, account_id, person_id)
return JsonResponse(data)
# Do the premiums calculation
x_rate = float(request.POST.get('exchange_rate'))
ins_co = int(request.POST.get('insurance_co'))
goods_type = int(request.POST.get('goods_type'))
quantity = int(request.POST.get('quantity'))
cover_id = int(request.POST.get('cover'))
oprice = request.POST.get('price')
trans_id = int(request.POST.get('transport_mode'))
category_id = int(request.POST.get('goods_category'))
packaging = request.POST.get('package_type')
package_type = int(packaging) if packaging else 0
fcost = float(request.POST.get('freight_cost'))
freight_cost = fcost * x_rate
# Optional premiums
fprice = float(oprice.replace(',', ''))
price = fprice * x_rate
cover = covers[cover_id]
# Check rate first
rate = get_rate(ins_co, category_id, trans_id, package_type, cover)
if not rate:
msg = 'The selected goods are not covered.'
data = {'status': 9, 'premium': 0,
'response': '', 'taxes': 0, 'tax': 0,
'sum_assured': 0, 'message': msg}
return JsonResponse(data)
# Temporariry save this data
tmpid, created = RegCargo.objects.update_or_create(
account_id=account_id, goods_id=goods_type,
person_id=person_id, is_void=False,
defaults={'insurance_id': ins_co,
'goods_id': goods_type,
'quantity': quantity,
'price': price, 'package_type': package_type,
'cover_type': cover_id,
'freight_cost': freight_cost,
'ship_type': trans_id,
'is_void': False,
'person_id': person_id,
'category_id': category_id,
'account_id': account_id})
data = get_premium_data(request, account_id, person_id)
return JsonResponse(data)
except Exception, e:
print 'premiums error - %s' % (str(e))
raise e
@login_required(login_url='/login/')
def claims(request, id):
"""Some default page for Bad request error page."""
try:
claim = CustomerInvoice.objects.get(id=id)
return render(request, 'forms/claims.html', {'claim': claim})
except Exception, e:
raise e
@login_required(login_url='/login/')
def edit_cargo(request, id):
"""Some default page for Bad request error page."""
try:
idata = invoice_data(request, id)
if not idata:
msg = "Invoice not found."
messages.error(request, msg)
return HttpResponseRedirect(reverse(dashboard))
rates = idata['rates']
charges = idata['charges']
others = idata['others']
premium = idata['premium']
tax_premium = idata['tax_premium']
insured = idata['insured']
invoice = idata['invoice']
tcost = idata['tcost']
pcost = idata['pcost']
fcost = idata['fcost']
cost_freight = idata['cost_freight']
goods = idata['goods']
if request.method == 'POST':
action_id = request.POST.get('action_id')
if not action_id:
inv_id = invoice['invoice'].id
create_notes(request, inv_id)
# Change the approval status
msg = "Application ammended successfully."
messages.info(request, msg)
return HttpResponseRedirect(reverse(dashboard))
else:
dnt = request.POST.get('discount')
discount = float(dnt) if dnt else 0
if discount > 0:
idata = invoice_data(request, id, discount)
rates = idata['rates']
charges = idata['charges']
others = idata['others']
premium = idata['premium']
tax_premium = idata['tax_premium']
insured = idata['insured']
invoice = idata['invoice']
tcost = idata['tcost']
pcost = idata['pcost']
fcost = idata['fcost']
cost_freight = idata['cost_freight']
goods = idata['goods']
pf = '{:20,.2f}'.format(premium)
cff = '{:20,.2f}'.format(cost_freight)
tcostf = '{:20,.2f}'.format(tcost)
pcostf = '{:20,.2f}'.format(pcost)
insuredf = '{:20,.2f}'.format(insured)
taxpf = '{:20,.2f}'.format(tax_premium)
dtxt = 'Premium (Discount %s%%)' % (discount)
dt = '<table class="table">'
dt += '<thead><tr><th>Item</th><th>Value</th>'
dt += '<th>Rate</th><th>%s</th></tr>' % (dtxt)
dt += '</thead><tbody>'
dt += '<tr><td>Cost and Freight</td>'
dt += '<td align="right">%s</td>' % (cff)
dt += '<td align="right">-</td>'
dt += '<td align="right">%s</td></tr>' % (pcostf)
dt += '<tr><td>Tax Estimates</td>'
dt += '<td align="right">%s</td>' % (tcostf)
dt += '<td align="right">0.00100</td>'
dt += '<td align="right">%s</td></tr>' % (taxpf)
dt += '<tr><td>Sum Insured</td>'
dt += '<td align="right">%s</td>' % (insuredf)
dt += '<td></td><td></td></tr>'
for charge in rates:
if charge in charges:
itm = rates[charge]
nm = itm['name']
rt = itm['rate'] if 'rate' in itm else "-"
rtf = '{:20,.5f}'.format(rt) if rt != '-' else rt
cg = '{:20,.2f}'.format(charges[charge])
dt += '<tr><td>%s</td><td></td>' % (nm)
dt += '<td align="right">%s</td>' % (rtf)
dt += '<td align="right">%s</td></tr>' % (cg)
dt += '<tr><td><strong>Total Premiums</strong></td><td></td>'
dt += '<td></td><td align="right">'
dt += '<strong>%s</strong></td></tr>' % (pf)
dt += '</tbody></table>'
result = {'status': 0, 'message': dt}
return JsonResponse(result)
inv_data = invoice['invoice']
return render(request, 'forms/cargo_edit.html',
{'invoice': invoice, 'goods': goods, 'freight': fcost,
'taxes': tcost, 'others': others, 'insured': insured,
'premium': premium, 'data': inv_data})
except Exception, e:
raise e
@login_required(login_url='/login/')
def csform(request, id):
"""Some default page for Bad request error page."""
try:
claim = CustomerInvoice.objects.get(id=id)
return render(request, 'forms/csform.html', {'claim': claim})
except Exception, e:
raise e
@login_required(login_url='/login/')
def registrations(request):
"""Some default page for Bad request error page."""
try:
registrations = get_registrations(request)
return render(request, 'forms/registrations.html',
{'claim': {}, 'registrations': registrations})
except Exception, e:
raise e
@login_required(login_url='/login/')
def reg_validate(request, id):
"""Some default page for Bad request error page."""
try:
user_group = ''
grps = ['User', 'Manager', 'Admin']
if request.method == 'POST':
account_type = int(request.POST.get('account_type'))
if account_type == 1:
# Edit a person
update_person(request)
else:
# Edit a company
update_company(request)
msg = 'Account modified successfully'
messages.info(request, msg)
return HttpResponseRedirect(reverse(registrations))
coid = request.user.company_id
sid = request.user.site_id
user = get_user(id)
regs = get_registrations(request)
if not user:
return render(request, 'forms/registrations.html',
{'registrations': regs})
else:
if coid != user.company_id or sid != user.site_id:
return render(request, 'forms/registrations.html',
{'registrations': regs})
groups = user.groups.all()
for group in groups:
if str(group) in grps:
user_group = str(group)
account = check_account(user)
if account:
account_type = 1
acc_id = user.regperson.id
vals = {'surname': user.regperson.surname, 'email': user.email,
'middle_name': user.regperson.middle_name,
'first_name': user.regperson.first_name,
'staff_number': user.regperson.staff_number,
'id_number': user.regperson.idpass_number,
'mobile_number': user.regperson.mobile_number,
'postal_address': user.regperson.postal_address,
'user_level': user_group}
else:
account_type = 2
acc_id = user.regcompany.id
vals = {'company_name': user.regcompany.company_name,
'email': user.email,
'kra_pin': user.regcompany.pin_number,
'coreg_number': user.regcompany.reg_number,
'etr_number': user.regcompany.etr_number,
'physical_address': user.regcompany.physical_address,
'mobile_number': user.regcompany.mobile_number,
'postal_address': user.regcompany.postal_address,
'user_level': user_group}
form = RegisterForm(data=vals)
return render(request, 'forms/vregistrations.html',
{'form': form, 'registrations': registrations,
'account': account_type, 'account_id': acc_id})
except Exception, e:
raise e
@login_required(login_url='/login/')
def new_user(request):
"""Some default page for Bad request error page."""
try:
form = RegisterForm()
if request.method == 'POST':
form = RegisterForm(data=request.POST)
first_name = request.POST.get('first_name')
middle_name = request.POST.get('middle_name')
surname = request.POST.get('surname')
staff_number = request.POST.get('staff_number')
idpass_number = request.POST.get('id_number')
mobile_number = request.POST.get('mobile_number')
postal_address = request.POST.get('postal_address')
email = request.POST.get('email')
# Make random password
password = AppUser.objects.make_random_password()
company_id = request.user.company_id
site_id = request.user.site_id
client_type_id = request.user.person_type
user = create_account(email, client_type_id, password)
if user:
account_id = user.pk
person_new = RegPerson(first_name=first_name,
middle_name=middle_name,
surname=surname,
staff_number=staff_number,
idpass_number=idpass_number,
mobile_number=mobile_number,
postal_address=postal_address,
account_id=account_id,
is_void=False)
person_new.save()
user.company_id = company_id
user.site_id = site_id
user.person_type = client_type_id
user.save(update_fields=["company_id", "person_type",
"site_id"])
# Add group
group = Group.objects.get(name='User')
user.groups.add(group)
msg = 'Person details saved successfully'
more_txt = '<p>Password: %s</p>' % (password)
send_verification(request, email, more_txt)
messages.info(request, msg)
return HttpResponseRedirect(reverse(registrations))
else:
msg = 'Email already registered'
messages.error(request, msg)
return render(request, 'forms/registration.html',
{'form': form})
return render(request, 'forms/registration.html',
{'form': form})
except Exception, e:
print 'error creating account -%s' % (str(e))
raise e
def cbm_calculator(request):
"""Method to calculate the CBM."""
try:
form = {}
return render(request, 'forms/cbm.html',
{'form': form})
except Exception, e:
raise e
else:
pass
def bonds(request):
"""Method for bonds forms."""
try:
bonds = BondsApplication.objects.filter(
is_active=True)
return render(request, 'forms/bonds.html',
{'bonds': bonds})
except Exception, e:
raise e
else:
pass
def new_bonds(request):
"""Method for bonds forms."""
try:
form = BondForm()
account_id = request.user.id
if request.method == 'POST':
form = BondForm(data=request.POST)
bond_amount = request.POST.get('bond_amount')
bond_class = request.POST.get('bond_class')
insurance = request.POST.get('insurance_co')
amount = float(bond_amount.replace(',', ''))
bond_new = BondsApplication(
bond_id=bond_class,
insurance_id=insurance,
amount=amount,
client_id=account_id,
created_by_id=account_id,
is_active=True)
bond_new.save()
msg = 'Bond created successfully awaiting verification '
msg += 'and approval.'
messages.info(request, msg)
return HttpResponseRedirect(reverse(bonds))
return render(request, 'forms/bonds_new.html',
{'form': form})
except Exception, e:
raise e
else:
pass
def validate_bonds(request, id):
"""Method for bonds forms."""
try:
form = BondForm()
# account_id = request.user.id
bdata = get_bond_data(id)
bond = bdata if bdata else {}
if request.method == 'POST':
msg = 'Bond validated successfully.'
messages.info(request, msg)
return HttpResponseRedirect(reverse(bonds))
return render(request, 'forms/bonds_validate.html',
{'form': form, 'bond': bond})
except Exception, e:
raise e
else:
pass
def new_client(request):
"""Method to create new clients."""
try:
status_code = 0
email = request.POST.get('email')
ctype = int(request.POST.get('ctype'))
password = AppUser.objects.make_random_password()
company_id = request.user.company_id
site_id = request.user.site_id
client_type_id = 1
user = create_account(email, client_type_id, password)
if user:
acc_id = user.pk
if ctype == 2:
new_company(request, acc_id)
print 'New company'
else:
new_person(request, acc_id)
user.company_id = company_id
user.site_id = site_id
user.person_type = client_type_id
user.save(update_fields=["company_id", "person_type",
"site_id"])
msg = 'Customer profile created successfully'
more_txt = '<p>Password: %s</p>' % (password)
send_verification(request, email, more_txt)
else:
status_code, acc_id = 9, 0
msg = 'Email already registered in the system.'
data = {'status': status_code, 'message': msg,
'person_id': acc_id}
except Exception, e:
print 'error creating client - %s' % (str(e))
data = {'status': 9, 'message': 'Failed', 'person_id': 0}
return JsonResponse(data)
else:
return JsonResponse(data)
| [
"naistechnologies.com@gmail.com"
] | naistechnologies.com@gmail.com |
60d63347ecfd5b38227a460d472a5bf2e5eaec8d | 9c916ed53b11afb232134ae95c6f7f451330d155 | /code/cd.py | f2a84fb3a5f40de6a27e3809bc01569a28b0e623 | [
"MIT"
] | permissive | eggplantbren/CombinatoricDegeneracies | 1c2a156d119310e8b7480dbf57b20f179dc17377 | a58d6440bb8c710b3359aded1e0401ef7c982047 | refs/heads/master | 2021-01-22T00:29:30.396420 | 2016-03-08T18:01:25 | 2016-03-08T18:01:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,358 | py | """
This file is part of the CombinatoricDegeneracies project.
Copyright 2016 David W. Hogg & Dan Foreman-Mackey.
# to-do list:
- Write code to return a marginalized likelihood, given a prior.
- Write exact-sampling code.
# notes / bugs:
- The zero covariance across modes (block-diagonality) is baked-in.
- The `draw_sample()` code is unnecessarily slow.
"""
import itertools as it
import numpy as np
from scipy.misc import logsumexp
def factorial(N):
if N == 0:
return 1
assert N > 0
return np.prod(range(1, N+1))
def choose(N, M):
"""
## bugs:
- bad implementation (will fail at large inputs)
"""
return factorial(N) / factorial(M) / factorial(N-M)
class mixture_of_gaussians:
"""
## notes:
- Redundantly, this requires *both* `vars` and `ivars`. Why? Good reasons!
## bugs:
- May not work at `K==1` or `D==1`.
"""
def __init__(self, amps, means, vars, ivars):
self.K = len(amps)
assert amps.shape == (self.K,)
assert np.all(amps >= 0.)
self.amps = amps
KK, D = means.shape
assert KK == self.K
self.D = D
self.means = means
assert vars.shape == (self.K, self.D, self.D)
assert ivars.shape == (self.K, self.D, self.D)
for var, ivar in zip(vars, ivars):
assert np.all(var.T == var)
assert np.all(ivar.T == ivar)
assert np.allclose(np.dot(var, ivar), np.eye(self.D))
self.vars = vars
self.ivars = ivars
self.logdets = np.zeros(self.K)
for k,var in enumerate(vars):
s, logdet = np.linalg.slogdet(var)
assert s > 0.
self.logdets[k] = logdet
def __mul__(self, other):
"""
multiply one by another!
## bugs:
- Only partially tested.
"""
assert self.D == other.D
newK = self.K * other.K
newamps = np.zeros(newK)
newmeans = np.zeros((newK, self.D))
newvars = np.zeros((newK, self.D, self.D))
newivars = np.zeros((newK, self.D, self.D))
for k,(sk,ok) in enumerate(it.product(range(self.K), range(other.K))):
# following https://www.cs.nyu.edu/~roweis/notes/gaussid.pdf
newivars[k] = self.ivars[sk] + other.ivars[ok]
newvars[k] = np.linalg.inv(newivars[k])
newvars[k] = 0.5 * (newvars[k] + newvars[k].T) # symmetrize
newmeans[k] = np.dot(newvars[k], (np.dot(self.ivars[sk], self.means[sk]) +
np.dot(other.ivars[ok], other.means[ok])))
s, newlogdet = np.linalg.slogdet(newvars[k])
assert s > 0.
newlogamp = np.log(self.amps[sk]) + np.log(other.amps[ok]) - 0.5 * self.D * np.log(np.pi) \
+ 0.5 * newlogdet - 0.5 * self.logdets[sk] - 0.5 * other.logdets[ok] \
- 0.5 * np.dot(self.means[sk], np.dot(self.ivars[sk], self.means[sk])) \
- 0.5 * np.dot(other.means[ok], np.dot(other.ivars[ok], other.means[ok])) \
+ 0.5 * np.dot(newmeans[k], np.dot(newivars[k], newmeans[k]))
newamps[k] = np.exp(newlogamp)
return mixture_of_gaussians(newamps, newmeans, newvars, newivars)
def draw_sample(self):
"""
This could be sped up by saving a factorization of the `self.vars`.
"""
k = np.random.randint(self.K)
return np.random.multivariate_normal(self.means[k], self.vars[k])
def log_value(self, x):
"""
log as in ln
"""
assert x.shape == self.means[0].shape
vals = np.log(self.amps) - 0.5 * self.D * np.log(np.pi) - 0.5 * self.logdets
for k in range(self.K):
delta = x - self.means[k]
vals[k] += -0.5 * np.dot(delta, np.dot(self.ivars[k], delta)) # chi-squared term
return logsumexp(vals)
def log_marginalized_value(self, d, xd):
"""
Same as `log_value` but plotting the one-dimensional function,
marginalizing out everything except dimension `d`.
## notes:
- Check out `var` not `ivar`.
"""
vals = np.log(self.amps) - np.log(np.pi) - 0.5 * np.log(self.vars[:, d, d])
for k in range(self.K):
delta = xd - self.means[k, d]
vals[k] += -0.5 * delta * delta / self.vars[k, d, d]
return logsumexp(vals)
def log_fully_marginalized_value(self):
return np.log(np.sum(self.amps))
def __call__(self, x, d=None):
if d is None:
return self.log_value(x)
return self.log_marginalized_value(d, x)
def get_log_likelihood(M, K, D, precision=256., ndof=None):
"""
Build a log likelihood function for a problem with `K` pigeons,
each of which gets put in one of `M` holes, each of which has `D`
adjustable parameters. The output function will take as input a
numpy array with `K*D` elements.
## notes:
- Makes amplitudes from a flat-in-log pdf.
- Makes means from a unit-variance Gaussian.
- Makes inverse variances from a mean of outer products of things.
## bugs:
- Should take random state as input.
- Magic numbers and decisions galore.
- Doesn't work yet.
"""
assert int(K) > 0
assert int(D) > 0
assert M > K
# create M D-space Gaussians
amps = np.exp(np.random.uniform(size=M)) # MAGIC decision
means = np.random.normal(size=(M, D)) # more MAGIC
vars = np.zeros((M, D, D))
ivars = np.zeros((M, D, D))
if ndof is None:
ndof = D + 2 # MAGIC
for m in range(M):
vecs = np.random.normal(size=(ndof, D)) # more MAGIC
vars[m] = (1. / precision) * np.mean(vecs[:, :, None] * vecs[:, None, :], axis=0) # mean not sum
ivars[m] = np.linalg.inv(vars[m])
ivars[m] = 0.5 * (ivars[m] + ivars[m].T) # symmetrize
assert np.allclose(np.dot(ivars[m], vars[m]), np.eye(D))
# create mixture of M-choose-K times K! Gaussians (OMG!)
Kfac = factorial(K)
McKKfac = choose(M, K) * Kfac
KD = K * D
bigamps = np.zeros(McKKfac)
bigmeans = np.zeros((McKKfac, KD))
bigvars = np.zeros((McKKfac, KD, KD))
bigivars = np.zeros((McKKfac, KD, KD))
for i, p in enumerate(it.permutations(range(M), K)):
bigamps[i] = 1.
for k in range(K):
bigamps[i] *= amps[p[k]]
bigmeans[i, k * D : k * D + D] = means[p[k]]
bigvars[i, k * D : k * D + D, k * D : k * D + D] = vars[p[k]]
bigivars[i, k * D : k * D + D, k * D : k * D + D] = ivars[p[k]]
bigamps /= McKKfac
return mixture_of_gaussians(bigamps, bigmeans, bigvars, bigivars)
def get_log_prior(KD):
"""
Return a "spherical" 1-Gaussian, `KD`-dimensional prior.
"""
bigamps = np.ones((1,))
bigmeans = np.ones((1, KD))
foo = 1.
bigvars = foo * np.eye(KD).reshape((1, KD, KD))
bigivars = (1. / foo) * np.eye(KD).reshape((1, KD, KD))
return mixture_of_gaussians(bigamps, bigmeans, bigvars, bigivars)
def hogg_savefig(fn):
print("writing ", fn)
return plt.savefig(fn)
if __name__ == "__main__":
import pylab as plt
import corner
np.random.seed(42)
tM, tK, tD = 7, 5, 2
ln_prior = get_log_prior(tK * tD)
ln_like = get_log_likelihood(tM, tK, tD, precision=4.)
ln_post = ln_prior * ln_like # ARGH TERRIBLE TIMES
print("FML:", np.exp(ln_post.log_fully_marginalized_value() - ln_prior.log_fully_marginalized_value()))
xds = np.arange(-3., 3., 0.01)
xs = np.zeros((len(xds), tK * tD))
xs[:,0] = xds
ln_ps1 = np.array([ln_prior(x) + ln_like(x) for x in xs])
ln_ps2 = np.array([ln_post(x) for x in xs])
plt.clf()
plt.plot(xds, np.exp(ln_ps1 - np.max(ln_ps1)), "k-")
plt.plot(xds, np.exp(ln_ps2 - np.max(ln_ps1)), "r--")
hogg_savefig("cd.png")
for d in range(ln_like.D):
ln_Ls = np.array([ln_like(x, d=d) for x in xds])
ln_ps = np.array([ln_post(x, d=d) for x in xds])
plt.clf()
plt.plot(xds, np.exp(ln_ps - np.max(ln_ps)), "k-")
plt.plot(xds, np.exp(ln_Ls - np.max(ln_Ls)), "r--")
hogg_savefig("cd{:04d}.png".format(d))
samples = np.array([ln_post.draw_sample() for t in range(2 ** 15)])
figure = corner.corner(samples)
cfn = "corner.png"
print("writing ", cfn)
figure.savefig(cfn)
| [
"david.hogg@nyu.edu"
] | david.hogg@nyu.edu |
13dbec58035415d1c5f089873bd5406c4922bc9f | 5a13cff8fbb83b4bc47d79f2a9aa07a519a9a1f8 | /basic/conversion_t.py | cc6be4d4a30f0380079832613a54cc0ec5214d0f | [] | no_license | chahayeong/django | 228c6892a999a426ee4f2b4b61e6891e8727ef5d | 41bd5df8e8aed8a7800e079e5ef0f6891801ee4d | refs/heads/master | 2023-05-02T19:12:12.696357 | 2021-06-02T09:55:04 | 2021-06-02T09:55:04 | 370,548,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | import pandas as pd
class Conversion(object):
@staticmethod
def create_tuple() -> ():
return (1,2,3,4,5,6,7,8,9)
@staticmethod
def tuple_to_list(tp) -> []:
return list(tp)
@staticmethod
def int_to_float(ls) -> []:
return [float(i) for i in ls]
@staticmethod
def float_to_int(ls) -> []:
return [int(i) for i in ls]
@staticmethod
def list_to_dictionary(ls) -> {}:
return dict(zip([str(i) for i in ls], ls))
@staticmethod
def hello_to_tuple(st) -> ():
return tuple(list(st))
@staticmethod
def hello_to_list(tp) -> []:
return list(tp)
@staticmethod
def dictionary_to_dataframe(dt) -> object:
return pd.DataFrame.from_dict(dt, orient='index')
@staticmethod
def main():
c = Conversion()
tp = ()
ls = []
while 1:
m = input('0-exit 1-create tuple\n'
'2-convert list\n'
'3-convert float-list\n'
'4-convert int-list\n'
'5-list convert dictionary\n'
'6-str convert tuple\n'
'7-str tuple convert list')
if m == '0':
break
# 1부터 9까지 요소를 가진 튜플을 생성하시오 (return)
elif m == '1':
tp = c.create_tuple()
print(f'tp의 타입 : {type(tp)}')
print(tp)
# 1번 튜플을 리스트로 전환하시오 (return)
elif m == '2':
ls = c.tuple_to_list(tp)
print(f'ls의 타입 : {type(ls)}')
print(ls)
# 2번 리스트를 실수(float) 리스트 바꾸시오 (return)
elif m == '3':
ls = c.int_to_float(ls)
print(f'ls의 타입 : {type(ls)}')
print(ls)
# 3번 실수(float) 리스트을, 정수 리스트로 바꾸시오 (return)
elif m == '4':
ls = c.float_to_int(ls)
print(f'ls의 타입 : {type(ls)}')
print(ls)
# 4번 리스트를 딕셔너리로 전환하시오. 단 키는 리스트의 인덱스인데 str 로 전환하시오 (return)
elif m == '5':
dt = c.list_to_dictionary(ls)
print(f'dt의 타입 : {type(dt)}')
print(dt)
# 'hello' 를 튜플로 전환하시오
elif m == '6':
tp = c.hello_to_tuple('hello')
print(f'tp의 타입 : {type(tp)}')
print(tp)
# 6번 튜플을 리스트로 전환하시오
elif m == '7':
ls = c.tuple_to_list(tp)
print(f'ls의 타입 : {type(ls)}')
print(ls)
# 5번 딕셔너리를 데이터프레임 으로 전환하시오
elif m == '8':
tp = c.create_tuple()
ls = c.tuple_to_list(tp)
dt = c.list_to_dictionary(ls)
print(dt)
df = c.dictionary_to_dataframe(dt)
print(f'df의 타입 : {type(df)}')
# If using all scalar values, you must pass an index
print(df)
else:
continue
Conversion.main() | [
"c6prince@naver.com"
] | c6prince@naver.com |
ed3033c9f7d1d085fc22e690975658fc514ef663 | 51bb4639861ac0e804b3286c5bcf80ab21b1eed0 | /crack_interview/primality.py | eafae442a20fd45c5f715b9678b26eaa32fff00d | [] | no_license | kondrashov-do/hackerrank | b6adecd6cb917a111f995cf871468d82335fd235 | c71c7b04850fdef11c65a3a7ccbba3e733769360 | refs/heads/master | 2018-09-27T07:12:09.452131 | 2018-06-07T09:30:08 | 2018-06-07T09:30:08 | 105,986,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | from math import sqrt
def isPrime(p):
if p < 2:
return False
elif p <= 3:
return True
elif p % 2 == 0 or p % 3 == 0:
return False
else:
for i in range(3, int(sqrt(p)+1), 2):
if p % i == 0:
return False
return True
p = int(input().strip())
for a0 in range(p):
n = int(input().strip())
if isPrime(n):
print('Prime')
else:
print('Not prime') | [
"kondrashov.do@gmail.com"
] | kondrashov.do@gmail.com |
88408fcb665dc11747215b23168068b6c1b7d849 | be3242e9d1ed4e330ada6812c35902328417c42d | /inference.py | fb8834e5ca68e415e2ffce876667e1b634c52ee7 | [
"Apache-2.0"
] | permissive | ryanleary/mlperf-rnnt-ref | bd254fa4eb395c701febbd5d50b18a79c57bb36a | fe0cc4145c240d4f8a8fe1814f397df63095e220 | refs/heads/master | 2020-09-17T02:47:14.471859 | 2020-01-13T13:54:03 | 2020-01-13T13:54:03 | 223,965,468 | 4 | 4 | Apache-2.0 | 2019-12-13T10:05:20 | 2019-11-25T14:16:35 | Python | UTF-8 | Python | false | false | 10,428 | py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
from typing import List
from tqdm import tqdm
import math
import toml
from dataset import AudioToTextDataLayer
from helpers import process_evaluation_batch, process_evaluation_epoch, Optimization, add_ctc_labels, AmpOptimizations, print_dict, model_multi_gpu, __ctc_decoder_predictions_tensor
from model import AudioPreprocessing, GreedyCTCDecoder, JasperEncoderDecoder
from parts.features import audio_from_file
import torch
import apex
from apex import amp
import random
import numpy as np
import pickle
import time
def parse_args():
parser = argparse.ArgumentParser(description='Jasper')
parser.add_argument("--local_rank", default=None, type=int)
parser.add_argument("--batch_size", default=16, type=int, help='data batch size')
parser.add_argument("--steps", default=None, help='if not specified do evaluation on full dataset. otherwise only evaluates the specified number of iterations for each worker', type=int)
parser.add_argument("--model_toml", type=str, help='relative model configuration path given dataset folder')
parser.add_argument("--dataset_dir", type=str, help='absolute path to dataset folder')
parser.add_argument("--val_manifest", type=str, help='relative path to evaluation dataset manifest file')
parser.add_argument("--ckpt", default=None, type=str, required=True, help='path to model checkpoint')
parser.add_argument("--max_duration", default=None, type=float, help='maximum duration of sequences. if None uses attribute from model configuration file')
parser.add_argument("--pad_to", default=None, type=int, help="default is pad to value as specified in model configurations. if -1 pad to maximum duration. If > 0 pad batch to next multiple of value")
parser.add_argument("--fp16", action='store_true', help='use half precision')
parser.add_argument("--cudnn_benchmark", action='store_true', help="enable cudnn benchmark")
parser.add_argument("--save_prediction", type=str, default=None, help="if specified saves predictions in text form at this location")
parser.add_argument("--logits_save_to", default=None, type=str, help="if specified will save logits to path")
parser.add_argument("--seed", default=42, type=int, help='seed')
parser.add_argument("--wav", type=str, help='absolute path to .wav file (16KHz)')
return parser.parse_args()
def eval(
data_layer,
audio_processor,
encoderdecoder,
greedy_decoder,
labels,
multi_gpu,
args):
"""performs inference / evaluation
Args:
data_layer: data layer object that holds data loader
audio_processor: data processing module
encoderdecoder: acoustic model
greedy_decoder: greedy decoder
labels: list of labels as output vocabulary
multi_gpu: true if using multiple gpus
args: script input arguments
"""
logits_save_to=args.logits_save_to
audio_processor.eval()
encoderdecoder.eval()
with torch.no_grad():
_global_var_dict = {
'predictions': [],
'transcripts': [],
'logits' : [],
}
if args.wav:
features, p_length_e = audio_processor(audio_from_file(args.wav))
torch.cuda.synchronize()
t0 = time.perf_counter()
t_log_probs_e = encoderdecoder(features)
torch.cuda.synchronize()
t1 = time.perf_counter()
t_predictions_e = greedy_decoder(log_probs=t_log_probs_e)
hypotheses = __ctc_decoder_predictions_tensor(t_predictions_e, labels=labels)
print("INFERENCE TIME\t\t: {} ms".format((t1-t0)*1000.0))
print("TRANSCRIPT\t\t:", hypotheses[0])
return
for it, data in enumerate(tqdm(data_layer.data_iterator)):
tensors = []
for d in data:
tensors.append(d.cuda())
t_audio_signal_e, t_a_sig_length_e, t_transcript_e, t_transcript_len_e = tensors
inp = (t_audio_signal_e, t_a_sig_length_e)
t_processed_signal, p_length_e = audio_processor(x=inp)
if args.use_conv_mask:
t_log_probs_e, t_encoded_len_e = encoderdecoder((t_processed_signal, p_length_e))
else:
t_log_probs_e = encoderdecoder(t_processed_signal)
t_predictions_e = greedy_decoder(log_probs=t_log_probs_e)
values_dict = dict(
predictions=[t_predictions_e],
transcript=[t_transcript_e],
transcript_length=[t_transcript_len_e],
output=[t_log_probs_e]
)
process_evaluation_batch(values_dict, _global_var_dict, labels=labels)
if args.steps is not None and it + 1 >= args.steps:
break
wer, _ = process_evaluation_epoch(_global_var_dict)
if (not multi_gpu or (multi_gpu and torch.distributed.get_rank() == 0)):
print("==========>>>>>>Evaluation WER: {0}\n".format(wer))
if args.save_prediction is not None:
with open(args.save_prediction, 'w') as fp:
fp.write('\n'.join(_global_var_dict['predictions']))
if logits_save_to is not None:
logits = []
for batch in _global_var_dict["logits"]:
for i in range(batch.shape[0]):
logits.append(batch[i].cpu().numpy())
with open(logits_save_to, 'wb') as f:
pickle.dump(logits, f, protocol=pickle.HIGHEST_PROTOCOL)
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.benchmark = args.cudnn_benchmark
print("CUDNN BENCHMARK ", args.cudnn_benchmark)
assert(torch.cuda.is_available())
if args.local_rank is not None:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
multi_gpu = args.local_rank is not None
if multi_gpu:
print("DISTRIBUTED with ", torch.distributed.get_world_size())
if args.fp16:
optim_level = Optimization.mxprO3
else:
optim_level = Optimization.mxprO0
jasper_model_definition = toml.load(args.model_toml)
dataset_vocab = jasper_model_definition['labels']['labels']
ctc_vocab = add_ctc_labels(dataset_vocab)
val_manifest = args.val_manifest
featurizer_config = jasper_model_definition['input_eval']
featurizer_config["optimization_level"] = optim_level
args.use_conv_mask = jasper_model_definition['encoder'].get('convmask', True)
if args.max_duration is not None:
featurizer_config['max_duration'] = args.max_duration
if args.pad_to is not None:
featurizer_config['pad_to'] = args.pad_to if args.pad_to >= 0 else "max"
print('model_config')
print_dict(jasper_model_definition)
print('feature_config')
print_dict(featurizer_config)
data_layer = None
if args.wav is None:
data_layer = AudioToTextDataLayer(
dataset_dir=args.dataset_dir,
featurizer_config=featurizer_config,
manifest_filepath=val_manifest,
labels=dataset_vocab,
batch_size=args.batch_size,
pad_to_max=featurizer_config['pad_to'] == "max",
shuffle=False,
multi_gpu=multi_gpu)
audio_preprocessor = AudioPreprocessing(**featurizer_config)
encoderdecoder = JasperEncoderDecoder(jasper_model_definition=jasper_model_definition, feat_in=1024, num_classes=len(ctc_vocab))
if args.ckpt is not None:
print("loading model from ", args.ckpt)
checkpoint = torch.load(args.ckpt, map_location="cpu")
for k in audio_preprocessor.state_dict().keys():
checkpoint['state_dict'][k] = checkpoint['state_dict'].pop("audio_preprocessor." + k)
audio_preprocessor.load_state_dict(checkpoint['state_dict'], strict=False)
encoderdecoder.load_state_dict(checkpoint['state_dict'], strict=False)
greedy_decoder = GreedyCTCDecoder()
# print("Number of parameters in encoder: {0}".format(model.jasper_encoder.num_weights()))
if args.wav is None:
N = len(data_layer)
step_per_epoch = math.ceil(N / (args.batch_size * (1 if not torch.distributed.is_initialized() else torch.distributed.get_world_size())))
if args.steps is not None:
print('-----------------')
print('Have {0} examples to eval on.'.format(args.steps * args.batch_size * (1 if not torch.distributed.is_initialized() else torch.distributed.get_world_size())))
print('Have {0} steps / (gpu * epoch).'.format(args.steps))
print('-----------------')
else:
print('-----------------')
print('Have {0} examples to eval on.'.format(N))
print('Have {0} steps / (gpu * epoch).'.format(step_per_epoch))
print('-----------------')
else:
audio_preprocessor.featurizer.normalize = "per_feature"
print ("audio_preprocessor.normalize: ", audio_preprocessor.featurizer.normalize)
audio_preprocessor.cuda()
encoderdecoder.cuda()
if args.fp16:
encoderdecoder = amp.initialize(
models=encoderdecoder,
opt_level=AmpOptimizations[optim_level])
encoderdecoder = model_multi_gpu(encoderdecoder, multi_gpu)
eval(
data_layer=data_layer,
audio_processor=audio_preprocessor,
encoderdecoder=encoderdecoder,
greedy_decoder=greedy_decoder,
labels=ctc_vocab,
args=args,
multi_gpu=multi_gpu)
if __name__=="__main__":
args = parse_args()
print_dict(vars(args))
main(args)
| [
"rleary@nvidia.com"
] | rleary@nvidia.com |
99cd2f3d04cfbe7b31e152626da03bf3c051cae4 | c88ba27b44d12fcbd021cd4f533693118f4962be | /src/blog/rest.py | 674e8096c71f9b93c78a1a982e54c2b340c6d3b6 | [
"MIT"
] | permissive | dzwiedziu-nkg/azu-python-rest | 4d57810024dab06ed018997feb589aa3596c4565 | 59540ee0e70dafea1e46c50a5ed159db5057dc26 | refs/heads/master | 2020-04-02T01:52:10.829967 | 2018-10-20T10:31:38 | 2018-10-20T10:31:38 | 153,878,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | from rest_framework import viewsets, mixins, permissions, status
from rest_framework.decorators import action, permission_classes
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from blog.models import Article, Comment
from blog.serializers import ArticleListSerializer, ArticleWithCommentsSerializer, CommentSerializer, VoteSerializer
class ArticlesViewSet(viewsets.ModelViewSet):
queryset = Article.objects.all()
pagination_class = PageNumberPagination
def get_serializer_class(self):
if self.action == 'list':
return ArticleListSerializer
else:
return ArticleWithCommentsSerializer
def perform_create(self, serializer):
serializer.save(author=self.request.user)
def check_object_permissions(self, request, obj: Article):
if self.action in ['update', 'destroy', 'partial_update']:
user = self.request.user
if not user.is_superuser and user.id != obj.author_id:
self.permission_denied(
request, message='You can modify and delete only self articles'
)
super().check_object_permissions(request, obj)
def check_permissions(self, request):
if self.action != 'vote':
super().check_permissions(request)
@action(methods=['post'], detail=True)
def vote(self, request, pk=None):
serializer = VoteSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(article_id=pk)
return Response({'status': 'voted'}, status=status.HTTP_201_CREATED)
class CommentsViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet
):
serializer_class = CommentSerializer
permission_classes = (permissions.AllowAny,)
def get_queryset(self):
article_id = int(self.kwargs['article_id'])
return Comment.objects.filter(article_id=article_id)
def perform_create(self, serializer):
article_id = int(self.kwargs['article_id'])
serializer.save(article_id=article_id)
| [
"nkg753@gmail.com"
] | nkg753@gmail.com |
eacb3a5640789a1a017ae13e3bf8c6ff13fa5828 | 0fd25f447daaee571c8bd0f7cb8124d2078bfd68 | /my_site/wsgi.py | f7400cf4d5fb8bf02d41e358bb047a5421c60243 | [] | no_license | tanvir07-ops/1st | c86f14050eb4523c474d3158fd90171ca8baaad3 | f4f4e81591180871266710bbb683f7f229664936 | refs/heads/master | 2023-05-05T22:52:24.440697 | 2021-05-21T12:44:06 | 2021-05-21T12:44:06 | 369,501,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
WSGI config for my_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_site.settings')
application = Cling(get_wsgi_application())
| [
"tanvir9836859@gmail.com"
] | tanvir9836859@gmail.com |
e7d59612442402e55c3867c1c2936050b7fd829d | d1d92b2eead0113fb0b769198af7b8e3df3118c5 | /src/monte_carlo/__init__.py | 35e5e09b202eaf278d508d245ecfadd3f2fcbbbe | [] | no_license | MengqiYe/pymath | b68bd46dfb30f08d91d59a1752694da834785ba6 | c628f354161d1207f5b26db651c5286339199748 | refs/heads/main | 2023-06-05T00:33:26.396192 | 2021-06-24T02:57:36 | 2021-06-24T02:57:36 | 374,103,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created by Mengqi Ye on 2021/6/23
"""
| [
"mengqi.ye@anlogic.com"
] | mengqi.ye@anlogic.com |
22da572fd8043e924e2a441011cb46136ab73ff5 | e5ace852bfe90d6ceb9f6336bdfd90adb943af13 | /portainer_ce_api/models/portainer_user_access_policies.py | c042897051c557218848d8dcdc0d652a0efa668b | [] | no_license | SirumTME/portainer_ce_api | e88e897bdaef0ac64f66da6491941775aa2df1f0 | 815e95873e8e62144e12710ab756a5227db26de2 | refs/heads/master | 2023-06-20T17:45:32.030954 | 2021-07-27T21:27:11 | 2021-07-27T21:27:11 | 388,707,425 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,373 | py | # coding: utf-8
"""
PortainerCE API
Portainer API is an HTTP API served by Portainer. It is used by the Portainer UI and everything you can do with the UI can be done using the HTTP API. Examples are available at https://documentation.portainer.io/api/api-examples/ You can find out more about Portainer at [http://portainer.io](http://portainer.io) and get some support on [Slack](http://portainer.io/slack/). # Authentication Most of the API endpoints require to be authenticated as well as some level of authorization to be used. Portainer API uses JSON Web Token to manage authentication and thus requires you to provide a token in the **Authorization** header of each request with the **Bearer** authentication mechanism. Example: ``` Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6MSwidXNlcm5hbWUiOiJhZG1pbiIsInJvbGUiOjEsImV4cCI6MTQ5OTM3NjE1NH0.NJ6vE8FY1WG6jsRQzfMqeatJ4vh2TWAeeYfDhP71YEE ``` # Security Each API endpoint has an associated access policy, it is documented in the description of each endpoint. Different access policies are available: - Public access - Authenticated access - Restricted access - Administrator access ### Public access No authentication is required to access the endpoints with this access policy. ### Authenticated access Authentication is required to access the endpoints with this access policy. ### Restricted access Authentication is required to access the endpoints with this access policy. Extra-checks might be added to ensure access to the resource is granted. Returned data might also be filtered. ### Administrator access Authentication as well as an administrator role are required to access the endpoints with this access policy. # Execute Docker requests Portainer **DOES NOT** expose specific endpoints to manage your Docker resources (create a container, remove a volume, etc...). Instead, it acts as a reverse-proxy to the Docker HTTP API. This means that you can execute Docker requests **via** the Portainer HTTP API. To do so, you can use the `/endpoints/{id}/docker` Portainer API endpoint (which is not documented below due to Swagger limitations). This endpoint has a restricted access policy so you still need to be authenticated to be able to query this endpoint. Any query on this endpoint will be proxied to the Docker API of the associated endpoint (requests and responses objects are the same as documented in the Docker API). **NOTE**: You can find more information on how to query the Docker API in the [Docker official documentation](https://docs.docker.com/engine/api/v1.30/) as well as in [this Portainer example](https://documentation.portainer.io/api/api-examples/). # noqa: E501
OpenAPI spec version: 2.6.0
Contact: info@portainer.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from portainer_ce_api.configuration import Configuration
class PortainerUserAccessPolicies(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""PortainerUserAccessPolicies - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PortainerUserAccessPolicies, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortainerUserAccessPolicies):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PortainerUserAccessPolicies):
return True
return self.to_dict() != other.to_dict()
| [
"dennis.uhlemann@sirum.de"
] | dennis.uhlemann@sirum.de |
cec9fc1b515742352ed473eb914c808d0f83ae12 | 77cbea7449a6c8d3d20ba785edd800137ddaeed6 | /losses.py | 7f320cdd434c038b8627ecc034b757bc589eeaba | [
"Apache-2.0"
] | permissive | language-ml/multilingual_news | a9b8840639f315beffb9ed8b7e6998fc87c698a3 | 29c916aacb0c361e8e3169f5bf8d76766289a417 | refs/heads/master | 2023-03-17T02:59:51.158889 | 2020-09-18T16:36:56 | 2020-09-18T16:36:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,588 | py | import torch
import torch.nn as nn
def FocalLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
probs = torch.sigmoid(logits)
pt = (1 - labels) * (1 - probs) + labels * probs
log_pt = torch.log(pt)
floss = - (1 - pt)**2 * log_pt
floss_weighted = floss * inverse_normed_freqs
return torch.mean(floss_weighted)
def FocalLoss2(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
probs = torch.sigmoid(logits)
pt = (1 - labels) * (1 - probs) + labels * probs
log_pt = torch.log(pt)
floss = - (1 - pt)**2 * log_pt
alpha = inverse_normed_freqs.repeat(labels.shape[0]).view((labels.shape[0],-1))
weights = (1 - labels) * (1 - alpha) + labels * alpha
floss_weighted = floss * weights
return torch.mean(floss_weighted)
def FocalLoss3(logits, labels, weights_0):
batch_size = labels.shape[0]
num_labels = labels.shape[1] # should be 9
weights_1 = 1/num_labels - weights_0
labels = labels.type(torch.float32)
probs = torch.sigmoid(logits)
pt = (1 - labels) * (1 - probs) + labels * probs
log_pt = torch.log(pt)
floss = - (1 - pt)**2 * log_pt
alpha_0 = weights_0.repeat(batch_size).view((batch_size,-1))
alpha_1 = weights_1.repeat(batch_size).view((batch_size,-1))
weights = (1 - labels) * alpha_0 + labels * alpha_1
floss_weighted = floss * weights
return torch.mean(floss_weighted)
def BCELoss(logits, labels, inverse_normed_freqs=None):
loss_fct = nn.BCEWithLogitsLoss()
num_labels = labels.shape[1]
# loss = loss_fct(logits.view(-1, num_labels).double(), labels.view(-1, self.num_labels).double())
loss = loss_fct(logits.double(), labels.double())
return loss
def SoftmaxFocalLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
m = nn.Softmax(dim=1)
probs = m(logits)
logprobs = torch.log(probs)
logprobs = (1 - probs)**2 * logprobs
logyhat_for_gold = labels * logprobs * inverse_normed_freqs
logyhat_for_gold_summed = torch.sum(logyhat_for_gold, dim=1)
return torch.mean(-logyhat_for_gold_summed)
def SoftmaxLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
m = nn.Softmax(dim=1)
probs = m(logits)
logyhat_for_gold = labels * torch.log(probs)
logyhat_for_gold_summed = torch.sum(logyhat_for_gold, dim=1)
return torch.mean(-logyhat_for_gold_summed)
def SoftmaxWeightedLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
m = nn.Softmax(dim=1)
probs = m(logits)
logyhat_for_gold = labels * torch.log(probs) * inverse_normed_freqs
logyhat_for_gold_summed = torch.sum(logyhat_for_gold, dim=1)
return torch.mean(-logyhat_for_gold_summed)
def NormalizedLogSoftmaxLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
m = nn.Softmax(dim=1)
probs = m(logits)
logyhat_for_gold = labels * torch.log(probs)
logyhat_for_gold_normalized_summed = torch.sum(logyhat_for_gold / labels.sum(dim=1).reshape((-1,1)), dim=1)
return torch.mean(-logyhat_for_gold_normalized_summed)
def LogNormalizedSoftmaxLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
m = nn.Softmax(dim=1)
probs = m(logits)
yhat_for_gold = labels * probs
yhat_for_gold_normalized = torch.sum(yhat_for_gold / labels.sum(dim=1).reshape((-1,1)),dim=1)
logyhat_for_gold_normalized = torch.log(yhat_for_gold_normalized)
return torch.mean(-logyhat_for_gold_normalized)
| [
"feyzatoksal@gmail.com"
] | feyzatoksal@gmail.com |
679c6defaabd2bab83286ceb6db462b55e602843 | f949f48d1c28c85c4f1137ac8948b83fda7ebab3 | /ngraph/python/tests/test_onnx/test_backend.py | e508551b2c04189f4fff8f8ed988fbcb2f03acd7 | [
"Apache-2.0"
] | permissive | wchyu-code/openvino | a8eeb8c94bc7c92b12648b9dfefe0a9fd0c6bb39 | 49c9f2e6b07ab9ce6459c366e71401a0745ea46e | refs/heads/master | 2023-04-21T04:28:54.415082 | 2021-05-04T17:43:04 | 2021-05-04T17:43:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,113 | py | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging
import onnx.backend.test
from tests.test_onnx.utils.onnx_backend import OpenVinoTestBackend
from tests import (BACKEND_NAME,
xfail_issue_33488,
xfail_issue_33512,
xfail_issue_33535,
xfail_issue_33538,
xfail_issue_33581,
xfail_issue_33589,
xfail_issue_33593,
xfail_issue_33595,
xfail_issue_33596,
xfail_issue_33606,
xfail_issue_33633,
xfail_issue_33651,
xfail_issue_38091,
xfail_issue_38699,
xfail_issue_38701,
xfail_issue_38706,
xfail_issue_38708,
xfail_issue_38713,
xfail_issue_38722,
xfail_issue_38723,
xfail_issue_38724,
xfail_issue_38725,
xfail_issue_38732,
xfail_issue_38734,
xfail_issue_38735,
xfail_issue_39656,
xfail_issue_39658,
xfail_issue_39659,
xfail_issue_39662,
xfail_issue_43742,
xfail_issue_44839,
xfail_issue_44848,
xfail_issue_44851,
xfail_issue_44854,
xfail_issue_44858,
xfail_issue_44956,
xfail_issue_44957,
xfail_issue_44958,
xfail_issue_44965,
xfail_issue_44968,
xfail_issue_44976,
xfail_issue_45180,
xfail_issue_45344,
xfail_issue_46762,
xfail_issue_46765,
xfail_issue_47323,
xfail_issue_47337,
xfail_issue_48052,
xfail_issue_49207,
xfail_issue_49750,
xfail_issue_49752,
xfail_issue_49753,
xfail_issue_49754,
xfail_issue_52463)
def expect_fail(test_case_path, xfail): # type: (str) -> None
"""Mark the test as expected to fail."""
module_name, test_name = test_case_path.split(".")
module = globals().get(module_name)
if hasattr(module, test_name):
xfail(getattr(module, test_name))
else:
logging.getLogger().warning("Could not mark test as XFAIL, not found: %s", test_case_path)
OpenVinoTestBackend.backend_name = BACKEND_NAME
# This is a pytest magic variable to load extra plugins
# Uncomment the line below to enable the ONNX compatibility report
# pytest_plugins = "onnx.backend.test.report",
# import all test cases at global scope to make them visible to python.unittest
backend_test = onnx.backend.test.BackendTest(OpenVinoTestBackend, __name__)
skip_tests_general = [
# Big model tests (see test_zoo_models.py):
"test_bvlc_alexnet",
"test_densenet121",
"test_inception_v1",
"test_inception_v2",
"test_resnet50",
"test_shufflenet",
"test_squeezenet",
"test_vgg19",
"test_zfnet512",
]
for test in skip_tests_general:
backend_test.exclude(test)
# NOTE: ALL backend_test.exclude CALLS MUST BE PERFORMED BEFORE THE CALL TO globals().update
OnnxBackendNodeModelTest = None
OnnxBackendSimpleModelTest = None
OnnxBackendPyTorchOperatorModelTest = None
OnnxBackendPyTorchConvertedModelTest = None
globals().update(backend_test.enable_report().test_cases)
tests_expected_to_fail = [
(xfail_issue_49207,
"OnnxBackendNodeModelTest.test_rnn_seq_length_cpu",
"OnnxBackendNodeModelTest.test_simple_rnn_defaults_cpu",
"OnnxBackendNodeModelTest.test_simple_rnn_with_initial_bias_cpu",
"OnnxBackendNodeModelTest.test_gru_defaults_cpu",
"OnnxBackendNodeModelTest.test_gru_seq_length_cpu",
"OnnxBackendNodeModelTest.test_gru_with_initial_bias_cpu",
"OnnxBackendNodeModelTest.test_lstm_defaults_cpu",
"OnnxBackendNodeModelTest.test_lstm_with_initial_bias_cpu",
"OnnxBackendNodeModelTest.test_lstm_with_peepholes_cpu"),
(xfail_issue_49752,
"OnnxBackendNodeModelTest.test_constant_pad_cpu",
"OnnxBackendNodeModelTest.test_edge_pad_cpu",
"OnnxBackendNodeModelTest.test_reflect_pad_cpu"),
(xfail_issue_39656,
"OnnxBackendNodeModelTest.test_reshape_extended_dims_cpu",
"OnnxBackendNodeModelTest.test_reshape_negative_dim_cpu",
"OnnxBackendNodeModelTest.test_reshape_one_dim_cpu",
"OnnxBackendNodeModelTest.test_reshape_reduced_dims_cpu",
"OnnxBackendNodeModelTest.test_reshape_negative_extended_dims_cpu",
"OnnxBackendNodeModelTest.test_reshape_reordered_all_dims_cpu",
"OnnxBackendNodeModelTest.test_reshape_reordered_last_dims_cpu",
"OnnxBackendNodeModelTest.test_reshape_zero_and_negative_dim_cpu",
"OnnxBackendNodeModelTest.test_reshape_zero_dim_cpu"),
(xfail_issue_39658,
"OnnxBackendNodeModelTest.test_tile_cpu",
"OnnxBackendNodeModelTest.test_tile_precomputed_cpu"),
(xfail_issue_39659,
"OnnxBackendNodeModelTest.test_constantofshape_float_ones_cpu",
"OnnxBackendNodeModelTest.test_constantofshape_int_zeros_cpu",
"OnnxBackendNodeModelTest.test_constantofshape_int_shape_zero_cpu"),
(xfail_issue_45344,
"OnnxBackendNodeModelTest.test_nonmaxsuppression_center_point_box_format_cpu",
"OnnxBackendNodeModelTest.test_nonmaxsuppression_flipped_coordinates_cpu",
"OnnxBackendNodeModelTest.test_nonmaxsuppression_identical_boxes_cpu",
"OnnxBackendNodeModelTest.test_nonmaxsuppression_limit_output_size_cpu",
"OnnxBackendNodeModelTest.test_nonmaxsuppression_single_box_cpu",
"OnnxBackendNodeModelTest.test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu",
"OnnxBackendNodeModelTest.test_nonmaxsuppression_suppress_by_IOU_cpu",
"OnnxBackendNodeModelTest.test_nonmaxsuppression_two_batches_cpu",
"OnnxBackendNodeModelTest.test_nonmaxsuppression_two_classes_cpu"),
(xfail_issue_39662,
"OnnxBackendNodeModelTest.test_nonmaxsuppression_two_classes_cpu",
"OnnxBackendNodeModelTest.test_scatter_elements_with_negative_indices_cpu",
"OnnxBackendNodeModelTest.test_gather_negative_indices_cpu"),
(xfail_issue_49753,
"OnnxBackendNodeModelTest.test_slice_default_axes_cpu"),
(xfail_issue_49754,
"OnnxBackendNodeModelTest.test_top_k_cpu",
"OnnxBackendNodeModelTest.test_top_k_negative_axis_cpu",
"OnnxBackendNodeModelTest.test_top_k_smallest_cpu"),
(xfail_issue_33633,
"OnnxBackendNodeModelTest.test_maxpool_2d_dilations_cpu"),
(xfail_issue_46762,
"OnnxBackendNodeModelTest.test_min_uint8_cpu",
"OnnxBackendNodeModelTest.test_min_uint16_cpu",
"OnnxBackendNodeModelTest.test_min_uint32_cpu",
"OnnxBackendNodeModelTest.test_min_uint64_cpu"),
(xfail_issue_46765,
"OnnxBackendNodeModelTest.test_argmax_negative_axis_keepdims_example_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmax_keepdims_example_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmax_no_keepdims_example_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmin_negative_axis_keepdims_example_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmin_keepdims_example_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmin_no_keepdims_example_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmax_default_axis_example_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmax_default_axis_random_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmax_keepdims_random_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmax_negative_axis_keepdims_random_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmax_no_keepdims_random_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmin_default_axis_example_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmin_default_axis_random_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmin_keepdims_random_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmin_negative_axis_keepdims_random_select_last_index_cpu",
"OnnxBackendNodeModelTest.test_argmin_no_keepdims_random_select_last_index_cpu"),
(xfail_issue_38091,
"OnnxBackendNodeModelTest.test_gather_negative_indices_cpu"),
(xfail_issue_52463,
"OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_singleton_broadcast_cpu"),
(xfail_issue_47323,
"OnnxBackendPyTorchOperatorModelTest.test_operator_add_broadcast_cpu",
"OnnxBackendPyTorchOperatorModelTest.test_operator_addconstant_cpu",
"OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_right_broadcast_cpu"),
(xfail_issue_38699,
"OnnxBackendSimpleModelTest.test_gradient_of_add_and_mul_cpu",
"OnnxBackendSimpleModelTest.test_gradient_of_add_cpu"),
(xfail_issue_33596,
"OnnxBackendSimpleModelTest.test_sequence_model5_cpu",
"OnnxBackendSimpleModelTest.test_sequence_model7_cpu",
"OnnxBackendSimpleModelTest.test_sequence_model1_cpu",
"OnnxBackendSimpleModelTest.test_sequence_model3_cpu",
"OnnxBackendSimpleModelTest.test_sequence_model6_cpu",
"OnnxBackendSimpleModelTest.test_sequence_model8_cpu",
"OnnxBackendSimpleModelTest.test_sequence_model4_cpu",
"OnnxBackendSimpleModelTest.test_sequence_model2_cpu"),
(xfail_issue_38701,
"OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_nochangecase_cpu",
"OnnxBackendSimpleModelTest.test_strnorm_model_nostopwords_nochangecase_cpu",
"OnnxBackendSimpleModelTest.test_strnorm_model_monday_empty_output_cpu",
"OnnxBackendSimpleModelTest.test_strnorm_model_monday_insensintive_upper_twodim_cpu",
"OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_lower_cpu",
"OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_upper_cpu",
"OnnxBackendNodeModelTest.test_strnormalizer_nostopwords_nochangecase_cpu",
"OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_nochangecase_cpu",
"OnnxBackendNodeModelTest.test_strnormalizer_export_monday_insensintive_upper_twodim_cpu",
"OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_lower_cpu",
"OnnxBackendNodeModelTest.test_strnormalizer_export_monday_empty_output_cpu",
"OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_upper_cpu",
"OnnxBackendNodeModelTest.test_cast_STRING_to_FLOAT_cpu",
"OnnxBackendNodeModelTest.test_cast_FLOAT_to_STRING_cpu"),
(xfail_issue_33595,
"OnnxBackendNodeModelTest.test_unique_not_sorted_without_axis_cpu",
"OnnxBackendNodeModelTest.test_unique_sorted_with_negative_axis_cpu",
"OnnxBackendNodeModelTest.test_unique_sorted_with_axis_cpu",
"OnnxBackendNodeModelTest.test_unique_sorted_with_axis_3d_cpu",
"OnnxBackendNodeModelTest.test_unique_sorted_without_axis_cpu"),
(xfail_issue_33651,
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_onlybigrams_skip5_cpu",
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_levelempty_cpu",
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_onlybigrams_skip0_cpu",
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_uniandbigrams_skip5_cpu",
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_only_bigrams_skip0_cpu",
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu",
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_skip5_cpu"),
(xfail_issue_38706,
"OnnxBackendNodeModelTest.test_split_zero_size_splits_cpu"),
(xfail_issue_38708,
"OnnxBackendNodeModelTest.test_slice_default_steps_cpu",
"OnnxBackendNodeModelTest.test_slice_negative_axes_cpu",
"OnnxBackendNodeModelTest.test_slice_neg_steps_cpu",
"OnnxBackendNodeModelTest.test_slice_neg_cpu",
"OnnxBackendNodeModelTest.test_slice_cpu",
"OnnxBackendNodeModelTest.test_slice_end_out_of_bounds_cpu",
"OnnxBackendNodeModelTest.test_slice_start_out_of_bounds_cpu"),
(xfail_issue_33538,
"OnnxBackendNodeModelTest.test_scan_sum_cpu",
"OnnxBackendNodeModelTest.test_scan9_sum_cpu"),
(xfail_issue_49750,
"OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_align_corners_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_cpu",
"OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_cpu",
"OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_align_corners_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_scales_nearest_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_asymmetric_cpu",
"OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_align_corners_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_align_corners_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu",
"OnnxBackendNodeModelTest.test_resize_downsample_scales_nearest_cpu",
"OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_cpu",
"OnnxBackendNodeModelTest.test_resize_downsample_sizes_cubic_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_floor_align_corners_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu",
"OnnxBackendNodeModelTest.test_resize_upsample_sizes_cubic_cpu",
"OnnxBackendNodeModelTest.test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu",
"OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_cpu",
"OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu"),
(xfail_issue_33581,
"OnnxBackendNodeModelTest.test_gather_elements_negative_indices_cpu"),
(xfail_issue_38713,
"OnnxBackendNodeModelTest.test_momentum_cpu",
"OnnxBackendNodeModelTest.test_nesterov_momentum_cpu",
"OnnxBackendNodeModelTest.test_momentum_multiple_cpu"),
(xfail_issue_47337,
"OnnxBackendNodeModelTest.test_onehot_without_axis_cpu",
"OnnxBackendNodeModelTest.test_onehot_with_negative_axis_cpu",
"OnnxBackendNodeModelTest.test_onehot_with_axis_cpu",
"OnnxBackendNodeModelTest.test_onehot_negative_indices_cpu"),
(xfail_issue_33488,
"OnnxBackendNodeModelTest.test_maxunpool_export_with_output_shape_cpu",
"OnnxBackendNodeModelTest.test_maxunpool_export_without_output_shape_cpu"),
(xfail_issue_33589,
"OnnxBackendNodeModelTest.test_isnan_cpu",
"OnnxBackendNodeModelTest.test_isinf_positive_cpu",
"OnnxBackendNodeModelTest.test_isinf_negative_cpu",
"OnnxBackendNodeModelTest.test_isinf_cpu"),
(xfail_issue_33535,
"OnnxBackendNodeModelTest.test_dynamicquantizelinear_min_adjusted_cpu",
"OnnxBackendNodeModelTest.test_dynamicquantizelinear_cpu",
"OnnxBackendNodeModelTest.test_dynamicquantizelinear_max_adjusted_cpu"),
(xfail_issue_38722,
"OnnxBackendNodeModelTest.test_matmulinteger_cpu",
"OnnxBackendNodeModelTest.test_qlinearmatmul_2D_cpu",
"OnnxBackendNodeModelTest.test_qlinearmatmul_3D_cpu"),
(xfail_issue_38723,
"OnnxBackendNodeModelTest.test_qlinearconv_cpu"),
(xfail_issue_38724,
"OnnxBackendNodeModelTest.test_resize_tf_crop_and_resize_cpu"),
(xfail_issue_38725,
"OnnxBackendNodeModelTest.test_range_int32_type_negative_delta_expanded_cpu",
"OnnxBackendNodeModelTest.test_range_float_type_positive_delta_expanded_cpu"),
(xfail_issue_33512,
"OnnxBackendNodeModelTest.test_einsum_transpose_cpu",
"OnnxBackendNodeModelTest.test_einsum_batch_diagonal_cpu",
"OnnxBackendNodeModelTest.test_einsum_batch_matmul_cpu",
"OnnxBackendNodeModelTest.test_einsum_sum_cpu",
"OnnxBackendNodeModelTest.test_einsum_inner_prod_cpu"),
(xfail_issue_33606,
"OnnxBackendNodeModelTest.test_det_2d_cpu",
"OnnxBackendNodeModelTest.test_det_nd_cpu"),
(xfail_issue_38732,
"OnnxBackendNodeModelTest.test_convinteger_with_padding_cpu",
"OnnxBackendNodeModelTest.test_basic_convinteger_cpu"),
(xfail_issue_38734,
"OnnxBackendNodeModelTest.test_adam_multiple_cpu",
"OnnxBackendNodeModelTest.test_adam_cpu"),
(xfail_issue_38735,
"OnnxBackendNodeModelTest.test_adagrad_multiple_cpu",
"OnnxBackendNodeModelTest.test_adagrad_cpu"),
(xfail_issue_48052,
"OnnxBackendNodeModelTest.test_training_dropout_cpu",
"OnnxBackendNodeModelTest.test_training_dropout_mask_cpu",
"OnnxBackendNodeModelTest.test_training_dropout_default_cpu",
"OnnxBackendNodeModelTest.test_training_dropout_zero_ratio_cpu",
"OnnxBackendNodeModelTest.test_training_dropout_default_mask_cpu",
"OnnxBackendNodeModelTest.test_training_dropout_zero_ratio_mask_cpu"),
(xfail_issue_45180,
"OnnxBackendNodeModelTest.test_reduce_sum_do_not_keepdims_example_cpu",
"OnnxBackendNodeModelTest.test_reduce_sum_do_not_keepdims_random_cpu",
"OnnxBackendNodeModelTest.test_reduce_sum_keepdims_example_cpu",
"OnnxBackendNodeModelTest.test_reduce_sum_keepdims_random_cpu",
"OnnxBackendNodeModelTest.test_reduce_sum_negative_axes_keepdims_example_cpu"),
(xfail_issue_43742,
"OnnxBackendNodeModelTest.test_if_cpu",
"OnnxBackendNodeModelTest.test_if_seq_cpu"),
(xfail_issue_44839,
"OnnxBackendNodeModelTest.test_logsoftmax_axis_0_cpu",
"OnnxBackendNodeModelTest.test_logsoftmax_axis_1_cpu",
"OnnxBackendNodeModelTest.test_softmax_default_axis_cpu",
"OnnxBackendNodeModelTest.test_hardmax_axis_0_cpu",
"OnnxBackendNodeModelTest.test_hardmax_axis_1_cpu",
"OnnxBackendNodeModelTest.test_hardmax_default_axis_cpu",),
(xfail_issue_44848,
"OnnxBackendNodeModelTest.test_range_float_type_positive_delta_cpu",
"OnnxBackendNodeModelTest.test_range_int32_type_negative_delta_cpu",),
(xfail_issue_44851,
"OnnxBackendNodeModelTest.test_expand_dim_changed_cpu",
"OnnxBackendNodeModelTest.test_expand_dim_unchanged_cpu",
"OnnxBackendSimpleModelTest.test_expand_shape_model1_cpu",
"OnnxBackendSimpleModelTest.test_expand_shape_model2_cpu",
"OnnxBackendSimpleModelTest.test_expand_shape_model3_cpu",
"OnnxBackendSimpleModelTest.test_expand_shape_model4_cpu",),
(xfail_issue_44854,
"OnnxBackendNodeModelTest.test_split_variable_parts_1d_cpu",
"OnnxBackendNodeModelTest.test_split_variable_parts_2d_cpu",
"OnnxBackendNodeModelTest.test_split_variable_parts_default_axis_cpu",),
(xfail_issue_44858,
"OnnxBackendNodeModelTest.test_unsqueeze_axis_0_cpu",
"OnnxBackendNodeModelTest.test_unsqueeze_axis_1_cpu",
"OnnxBackendNodeModelTest.test_unsqueeze_axis_2_cpu",
"OnnxBackendNodeModelTest.test_unsqueeze_negative_axes_cpu",
"OnnxBackendNodeModelTest.test_unsqueeze_three_axes_cpu",
"OnnxBackendNodeModelTest.test_unsqueeze_two_axes_cpu",
"OnnxBackendNodeModelTest.test_unsqueeze_unsorted_axes_cpu",),
(xfail_issue_44956,
"OnnxBackendNodeModelTest.test_loop11_cpu"),
(xfail_issue_44957,
"OnnxBackendNodeModelTest.test_compress_0_cpu",
"OnnxBackendNodeModelTest.test_compress_1_cpu",
"OnnxBackendNodeModelTest.test_compress_default_axis_cpu",
"OnnxBackendNodeModelTest.test_compress_negative_axis_cpu",
"OnnxBackendNodeModelTest.test_nonzero_example_cpu"),
(xfail_issue_44958,
"OnnxBackendNodeModelTest.test_upsample_nearest_cpu"),
(xfail_issue_44965,
"OnnxBackendNodeModelTest.test_loop13_seq_cpu",
"OnnxBackendNodeModelTest.test_sequence_insert_at_back_cpu",
"OnnxBackendNodeModelTest.test_sequence_insert_at_front_cpu",),
(xfail_issue_44968,
"OnnxBackendNodeModelTest.test_squeeze_cpu",
"OnnxBackendNodeModelTest.test_squeeze_negative_axes_cpu",),
(xfail_issue_44976,
"OnnxBackendNodeModelTest.test_quantizelinear_axis_cpu",
"OnnxBackendNodeModelTest.test_dynamicquantizelinear_min_adjusted_expanded_cpu",
"OnnxBackendNodeModelTest.test_dynamicquantizelinear_expanded_cpu",
"OnnxBackendNodeModelTest.test_dynamicquantizelinear_max_adjusted_expanded_cpu",
"OnnxBackendNodeModelTest.test_quantizelinear_cpu"),
(xfail_issue_33593,
"OnnxBackendNodeModelTest.test_maxpool_with_argmax_2d_precomputed_strides_cpu",
"OnnxBackendNodeModelTest.test_maxpool_with_argmax_2d_precomputed_pads_cpu",),
]
for test_group in tests_expected_to_fail:
for test_case in test_group[1:]:
expect_fail("{}".format(test_case), test_group[0])
| [
"noreply@github.com"
] | wchyu-code.noreply@github.com |
ce19ce75c1e14a7d57dfdc6c4f0e7c2ce1c68b70 | 5d87ea3ea5e5ceccf88a3b4425547d02ede68199 | /code/restconf_lab_sdk/restconf_iosxe_main.py | 600929e5eb11fe600cffd543049939c1be0eacc2 | [] | no_license | imadhsissou/spauto | 8b08c39d711c3de67a58c9aa2170897d5e9548b7 | 497346dc9fb46cfc9e493c15ad2389791f156b7a | refs/heads/master | 2023-03-03T07:00:29.856695 | 2021-02-14T19:40:22 | 2021-02-14T19:40:22 | 273,014,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | import sys
import yaml
from restconf_iosxe_classes import TemplateXe, RequestXe, InterfaceXe, AutomateXe
credentials = open("credentials.yml", "r")
interfaces = open("interfaces.yml", "r")
template = TemplateXe()
request = RequestXe(yaml.load(credentials, Loader=yaml.FullLoader)[0])
intf = InterfaceXe(yaml.load(interfaces, Loader=yaml.FullLoader)[0])
instance = AutomateXe()
def create(inst):
inst.autoCreate(template, request, intf)
def read(inst):
inst.autoRead(template, request, intf)
def delete(inst):
inst.autoDelete(template, request, intf)
def autoInstance(crud):
globals()[crud](instance)
if __name__ == "__main__":
autoInstance(sys.argv[1])
| [
"imad.hsissou@edu.uca.ma"
] | imad.hsissou@edu.uca.ma |
b20ca1afe34ac874e9dd05a205c75d038f6ea7b0 | 1ae03694e6826c2c3224647024f66debcebd62dc | /matlab/+robust_andrews_kernel/balance_scale/metrics.py | d78e052129030ea9de8d3552ad6679f6790d35df | [
"Apache-2.0"
] | permissive | Joaggi/Robust-kernels-for-robust-location-estimation | 5ad7f8f3be9a08e5d4283e03e017e5e3b9b186b8 | 9db62273de90547c982d819dc45e66ac86bfcb58 | refs/heads/master | 2023-04-17T22:41:01.652426 | 2022-08-02T23:43:31 | 2022-08-02T23:43:31 | 27,465,913 | 3 | 1 | null | 2022-08-02T23:39:44 | 2014-12-03T02:49:24 | MATLAB | UTF-8 | Python | false | false | 686 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 16 15:50:05 2014
@author: Alejandro
"""
import scipy.io as sio
import os, sys
lib_path = os.path.abspath('G:/Dropbox/Universidad/Machine Learning')
sys.path.append(lib_path)
import numpy as np
import Algorithms.Python.Metrics as Metrics
import Robustes.Experiments.metrics_over_labels as metrics_over_labels
dictionary = {
'Kernelconvexnmf':0,
'KernelKMeans':0,
'Kernelseminmfnnls':0,
'Kernelseminmfrule':0,
'KMeans':0,
'NNMF':0,
'RMNMF':1
}
labels_name = 'balance-scale-labels'
metrics_over_labels.metrics('G:/Dropbox/Universidad/Machine Learning/Robustes/BalanceScale/',dictionary,labels_name) | [
"joaggi@gmail.com"
] | joaggi@gmail.com |
827fd9929d6986f9f351fd4511a7ae13ade1fb79 | aee2190d46259dcddb02d9951c8527c461f9247a | /rules/deeptools_post_processing.smk | 046830a94ea05f2caebe4c69ac4b3a9e85758753 | [
"MIT"
] | permissive | KoesGroup/Snakemake_ATAC_seq | b5b3bdf37685676550e7aacbfccd6ac9d61caaed | 056603108b92f7526d79394d2dd12adb40800781 | refs/heads/master | 2021-06-17T15:22:19.063536 | 2021-02-01T14:00:04 | 2021-02-01T14:00:04 | 155,196,992 | 0 | 1 | MIT | 2018-11-26T14:15:54 | 2018-10-29T11:00:30 | Python | UTF-8 | Python | false | false | 8,383 | smk | rule bamCoverage:
input:
RESULT_DIR + "mapped/{sample}.shifted.rmdup.sorted.bam"
output:
RESULT_DIR + "bigwig/{sample}.bw"
message:
"Converting {wildcards.sample} bam into bigwig file"
log:
RESULT_DIR + "logs/deeptools/{sample}_bamtobigwig.log"
params:
EFFECTIVEGENOMESIZE = str(config["bamCoverage"]["params"]["EFFECTIVEGENOMESIZE"]), #take argument separated as a list separated with a space
EXTENDREADS = str(config["bamCoverage"]["params"]["EXTENDREADS"]),
binSize = str(config['bamCoverage']["params"]['binSize']),
normalizeUsing = str(config['bamCoverage']["params"]['normalizeUsing']),
ignoreForNormalization = str(config['bamCoverage']["params"]['ignoreForNormalization']),
smoothLength = str(config['bamCoverage']["params"]['smoothLength'])
conda:
"../envs/deeptools.yaml"
shell:
"bamCoverage --bam {input} \
-o {output} \
--effectiveGenomeSize {params.EFFECTIVEGENOMESIZE} \
--extendReads {params.EXTENDREADS} \
--binSize {params.binSize} \
--smoothLength {params.smoothLength} \
--ignoreForNormalization {params.ignoreForNormalization} \
&>{log}"
rule multiBamSummary:
input:
lambda wildcards: expand(RESULT_DIR + "mapped/{sample}.shifted.rmdup.sorted.bam", sample = SAMPLES)
output:
RESULT_DIR + "multiBamSummary/MATRIX.npz"
message:
"Computing the read coverage into a numpy array "
threads: 10
params:
binSize = str(config['multiBamSummary']['binSize'])
log:
RESULT_DIR + "logs/deeptools/multibamsummary/MATRIX.log"
shell:
"multiBamSummary bins \
--bamfiles {input} \
--numberOfProcessors {threads}\
--binSize {params.binSize} \
--centerReads \
--extendReads \
-o {output} \
2> {log}"
rule plotCorrelation:
input:
RESULT_DIR + "multiBamSummary/MATRIX.npz"
output:
RESULT_DIR + "plotCorrelation/MATRIX.png"
log:
RESULT_DIR + "logs/deeptools/plotcorrelation/MATRIX.log"
params:
corMethod = str(config['plotCorrelation']['corMethod']),
whatToPlot = str(config['plotCorrelation']['whatToPlot']),
color = str(config['plotCorrelation']['color'])
conda:
"../envs/deeptools.yaml"
shell:
"plotCorrelation \
--corData {input} \
--corMethod {params.corMethod} \
--whatToPlot {params.whatToPlot} \
--skipZeros \
--colorMap {params.color} \
--plotFile {output} \
--plotNumbers \
2> {log}"
rule computeMatrix_ref:
input:
bigwig = RESULT_DIR + "bigwig/{sample}.bw",
bed = WORKING_DIR + "gene_model.gtf"
output:
RESULT_DIR + "computematrix/{sample}.TSS.gz"
threads: 10
params:
binSize = str(config['computeMatrix']['binSize']),
afterRegion = str(config['computeMatrix']['afterRegion']),
beforeRegion= str(config['computeMatrix']['beforeRegion'])
conda:
"../envs/deeptools.yaml"
log:
RESULT_DIR + "logs/deeptools/computematrix/{sample}.log"
shell:
"computeMatrix \
reference-point \
--referencePoint TSS \
-S {input.bigwig} \
-R {input.bed} \
--afterRegionStartLength {params.afterRegion} \
--beforeRegionStartLength {params.beforeRegion} \
--numberOfProcessors {threads} \
--binSize {params.binSize} \
-o {output} \
2> {log}"
rule computeMatrix_scale:
input:
bigwig = RESULT_DIR + "bigwig/{sample}.bw",
bed = WORKING_DIR + "gene_model.gtf"
output:
RESULT_DIR + "computematrix/{sample}.scale-regions.gz"
threads: 10
params:
binSize = str(config['computeMatrix']['binSize']),
afterRegion = str(config['computeMatrix']['afterRegion']),
beforeRegion= str(config['computeMatrix']['beforeRegion'])
conda:
"../envs/deeptools.yaml"
log:
RESULT_DIR + "logs/deeptools/computematrix/{sample}.log"
shell:
"computeMatrix \
scale-regions \
-S {input.bigwig} \
-R {input.bed} \
--afterRegionStartLength {params.afterRegion} \
--beforeRegionStartLength {params.beforeRegion} \
--numberOfProcessors {threads} \
--binSize {params.binSize} \
-o {output} \
2> {log}"
rule plotHeatmap:
input:
RESULT_DIR + "computematrix/{sample}.{type}.gz"
output:
RESULT_DIR + "heatmap/{sample}.{type}.pdf"
params:
kmeans = str(config['plotHeatmap']['kmeans']),
color = str(config['plotHeatmap']['color']),
plot = str(config['plotHeatmap']['plot']),
cluster = RESULT_DIR + "heatmap/{sample}.bed"
conda:
"../envs/deeptools.yaml"
log:
RESULT_DIR + "logs/deeptools/plotHeatmap/{sample}.{type}.log"
shell:
"plotHeatmap \
--matrixFile {input} \
--outFileName {output} \
--kmeans {params.kmeans} \
--colorMap {params.color} \
--legendLocation best \
--outFileSortedRegions {params.cluster} \
2> {log}"
rule plotFingerprint:
input:
expand(RESULT_DIR + "mapped/{sample}.shifted.rmdup.sorted.bam", sample = SAMPLES)
output:
pdf = RESULT_DIR + "plotFingerprint/Fingerplot.pdf"
params:
EXTENDREADS = str(config["bamCoverage"]["params"]["EXTENDREADS"]),
binSize = str(config['bamCoverage']["params"]['binSize'])
conda:
"../envs/deeptools.yaml"
shell:
"plotFingerprint \
-b {input}\
--extendReads {params.EXTENDREADS} \
--binSize {params.binSize} \
--labels ATAC_1 ATAC_2 ATAC_3 ATAC_4 ATAC_5 ATAC_6 \
--plotFile {output} "
rule plotProfile:
input:
RESULT_DIR + "computematrix/{sample}.{type}.gz"
output:
pdf = RESULT_DIR + "plotProfile/{sample}.{type}.pdf",
bed = RESULT_DIR + "plotProfile/{sample}.{type}.bed"
params:
kmeans = str(config['plotProfile']['kmeans']),
startLabel = str(config['plotProfile']['startLabel']),
endLabel = str(config['plotProfile']['endLabel'])
conda:
"../envs/deeptools.yaml"
log:
RESULT_DIR + "logs/deeptools/plotProfile/{sample}.{type}.log"
shell:
"plotProfile \
--matrixFile {input} \
--outFileName {output.pdf} \
--outFileSortedRegions {output.bed} \
--kmeans {params.kmeans} \
--startLabel {params.startLabel} \
--endLabel {params.endLabel} \
2> {log}"
rule bamPEFragmentSize:
input: expand(RESULT_DIR + "mapped/{sample}.shifted.rmdup.sorted.bam", sample = SAMPLES)
output:
png = RESULT_DIR + "bamPEFragmentSize/fragmentSize.png",
RawFragmentLengths = RESULT_DIR + "bamPEFragmentSize/raw",
table = RESULT_DIR + "bamPEFragmentSize/fragmentSize.tab"
conda:
"../envs/deeptools.yaml"
params:
binSize = str(config['bamCoverage']["params"]['binSize']),
#title = "Fragment size of PE ATAC-seq data",
maxFragmentLength = str(config['bamPEFragmentSize']['binSize'])
shell:
"bamPEFragmentSize\
--histogram {output.png} \
--maxFragmentLength {params.maxFragmentLength} \
--bamfiles {input} \
--samplesLabel ATAC_1 ATAC_2 ATAC_3 ATAC_4 ATAC_5 ATAC_6 \
--binSize {params.binSize} \
--outRawFragmentLengths {output.RawFragmentLengths} \
--table {output.table} "
rule plotCoverage:
input:
expand(RESULT_DIR + "mapped/{sample}.shifted.rmdup.sorted.bam", sample = SAMPLES)
output:
png = RESULT_DIR + "plotCoverage/Coverage.png",
table = RESULT_DIR + "plotCoverage/coverage.tab"
conda:
"../envs/deeptools.yaml"
shell:
"plotCoverage \
--bamfiles {input} \
--plotFile {output.png} \
--labels ATAC_1 ATAC_2 ATAC_3 ATAC_4 ATAC_5 ATAC_6 \
--minMappingQuality 10 \
--outRawCounts {output.table} "
| [
"22910541+JihedC@users.noreply.github.com"
] | 22910541+JihedC@users.noreply.github.com |
61e5fc5eaa7fc10e263bab29a970e9c80238b546 | 939b3dd77c2a99f86d1dd86b31f43aebace1267c | /etl/test/test_schema_generator.py | 4206ce529bde8365dd86a2b922eebcab224e855c | [
"MIT"
] | permissive | reinvantveer/edna-ld | f886e32e41df351971dcb018a473610ec33c0bcc | 6522e8e3c7eb1b253b72ee60fb9f9885b98a1f5e | refs/heads/master | 2020-06-16T08:41:06.529627 | 2017-06-26T07:52:36 | 2017-06-26T07:52:36 | 75,120,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | import inspect
import unittest
import collections
import os
import sys
sys.path.insert(0, '..')
from lib import SchemaGenerator
from lib import CSVparser
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
def deep_sort(obj):
"""
Recursively sort list or dict nested lists
"""
if isinstance(obj, dict):
_sorted = {}
for key in sorted(obj):
_sorted[key] = deep_sort(obj[key])
elif isinstance(obj, list):
new_list = []
for val in obj:
new_list.append(deep_sort(val))
_sorted = sorted(new_list)
else:
_sorted = obj
return _sorted
class TestSchemaGenerator(unittest.TestCase):
def test_numpy_float_error(self):
data = CSVparser.to_dict(current_dir + '/mockups/schema/numpy-float64/float64.csv')
schema = SchemaGenerator.generate_schema(data)
self.assertEqual(schema, {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'number'},
'put': {'type': 'number'}
},
'required': ['id', 'put']
}
})
def test_schema_generator(self):
data = CSVparser.to_dict(current_dir + '/mockups/csv/test_simple.csv')
schema = SchemaGenerator.generate_schema(data)
self.assertEqual(schema, {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'column1': {'type': 'string'},
'column2': {'type': 'string'}
},
'required': ['column1', 'column2']
}
})
def test_special_char_escaping(self):
data = CSVparser.to_dict(current_dir + '/mockups/schema/specialCharacterTest/test.csv')
schema = SchemaGenerator.generate_schema(data)
self.assertDictEqual(deep_sort(schema), deep_sort({
'type': 'array',
'items': {
'type': 'object',
'properties': {
'Data\uff0eColumn1': {'type': 'string'},
'Data$Column2': {'type': 'string'}
},
'required': ['Data\uff0eColumn1', 'Data$Column2']
}
}))
| [
"rein.van.t.veer@geodan.nl"
] | rein.van.t.veer@geodan.nl |
9e60a2a1d6acafa444b6e1933582ab0406c15e4e | fb80cc097a9ccdd2e4c32d5d1cff0296a0d9c2ab | /ProcessedData/ProcessedProto.py | ef083e5295a82291865da678a214e7766b628679 | [] | no_license | oscarcheng105/Nuwa_Robotics | dc1ccde4909bb20bfba339260c07fc788d1dba5b | 3a9da3e198edfc502d877ca9ab9066519f8e6aec | refs/heads/main | 2023-04-29T00:24:18.184071 | 2021-05-13T06:48:54 | 2021-05-13T06:48:54 | 365,201,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,745 | py | import requests,sys,bos_sample_conf
from baidubce.services.bos.bos_client import BosClient
from ProcessedParameters import table as tb
class Flurry:
def __init__(self):
self.url = 'https://api-metrics.flurry.com/public/v1/data'
self.token='' #FlurryKey
self.bos_client = BosClient(bos_sample_conf.config)
self.setParams()
self.data = self.requestData()
self.uploadBaidu()
def setParams(self):
self.table = self.setTable()
self.time = self.setTimeGrain()
self.setDimensions()
self.setMetrics()
self.setDateRange()
self.setSort()
self.setFilter()
self.setHavings()
self.url+=self.token
#print(self.url)
def requestData(self):
d = requests.get(self.url).text
#for x in d['rows']:
#print(x)
return d
def uploadBaidu(self):
file = 'processedSample'+'.json'
self.bos_client.put_object_from_string('nuwarobotics-flurry-log', file, self.data)
print('processedSample'+' Upload Complete')
def setTable(self):
for i in range(1,len(info)):
for x in tb:
if(x == info[i]):
self.url += '/'+info[i]
return info[i]
def setTimeGrain(self):
for i in range(1,len(info)):
for x in tb[self.table]['timeGrain']:
if(x == info[i]):
self.url += '/'+info[i]
return info[i]
def setDimensions(self):
for i in range(1,len(info)):
if(info[i].find('|')>-1):
self.setFields(i)
else:
for x in tb[self.table]['dimension']:
if(x == info[i]):
self.url += '/'+info[i]
def setFields(self,index):
main = info[index][0:info[index].find('|')]
sub = info[index][info[index].find('|')+1:len(info[index])]
for i in range(1,index):
if(info[i] == main):
for x in tb[self.table]['dimension'][main]:
if(x == info[index]):
if(self.url.find(main+';show=')==-1):
self.url += ';show='+sub
else:
self.url += ','+sub
def setMetrics(self):
for x in tb[self.table]['metrics']:
for i in range(1,len(info)):
if(x == info[i]):
if (self.url.find('?metrics=')==-1):
self.url+='?metrics='+info[i]
else:
self.url += ','+info[i]
def setDateRange(self):
for x in info:
if (len(x)>=20):
if(x[4]=='-' and x[7]=='-' and x[10]=='/' and
x[15]=='-' and x[18]=='-'):
self.url += '&dateTime='+x
break
def setSort(self):
self.setTopN()
for x in range(len(info)):
if(info[x].find('sort=')>-1):
field = info[x][info[x].find('=')+1:info[x].find('|')]
main = info[x][info[x].find('=')+1:len(info[x])]
for i in range(x):
if (info[i]==field):
if(self.url.find('&sort=')==-1):
self.url+='&sort='+main
else:
self.url+=','+main
def setTopN(self):
for x in info:
if(x.find('topN=')>-1):
self.url += '&'+x
def setFilter(self):
for x in range(len(info)):
if(info[x].find('filters=')>-1):
field = info[x][info[x].find('=')+1:info[x].find('-')]
main = info[x][info[x].find('=')+1:len(info[x])]
for i in range(x):
if (info[i]==field):
if(self.url.find('&filters=')==-1):
self.url+='&filters='+main
else:
self.url+=','+main
def setHavings(self):
for x in range(len(info)):
if(info[x].find('having=')>-1):
field = info[x][info[x].find('=')+1:info[x].find('-')]
main = info[x][info[x].find('=')+1:len(info[x])]
for i in range(x):
if (info[i]==field):
if(self.url.find('&having=')==-1):
self.url+='&having='+main
else:
self.url+=','+main
info = sys.argv
f = Flurry()
| [
"noreply@github.com"
] | oscarcheng105.noreply@github.com |
b21d08e4746f6bfd20a94d26b8969d6bc218791d | cf75b1b565251a75b8ab4ae6e48bdbcad0fbda84 | /why.py | ab6cdd80a4f0df2c23b78983c6b76c52dd695158 | [] | no_license | Shadesoul/i-know-nothing | f5db6115f173328efab29e0aee68272cbf5e91ea | 42aa1c46569fee4d58f29e4d67b92a5a704817b3 | refs/heads/master | 2021-09-05T16:03:44.266167 | 2018-01-29T14:33:18 | 2018-01-29T14:33:18 | 100,176,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | def car(*nums):
sum=0
for n in nums:
sum=sum+n*n
max=sum/n
return max # 为什么计算car(2,3)等会出现奇怪的结果?它是怎么运算的?遍历吗?
def car(*nums):
sum=1
for n in nums:
sum=sum*2/n
return sum # 为什么计算car(3,3)结果是0.444444?
#懂了,只不过是再算一遍。sum=1*2/3,然后这个sum再*2/3就得到了结果。
#这个运算方法是for in函数的效果。
# encoding=utf-8
def total(a=5,*numbers,**phonebook):
print('a',a)
for i in numbers:
print('i',i)
for x in phonebook.items():#为什么必须加上一个items?其他的都会出错。
print(x)
print(total(10,1,2,3,jack=1123,john=2231,inge=1570))
| [
"ruiferest@gmail.com"
] | ruiferest@gmail.com |
05805b1613c6777f828a66f4077f8f0c39bcb3dd | d7bef81a53f18ee31a9add652edc564ee23a2dd4 | /models_automl.py | 45427df053b8e60e700b89b2a1943fba5b63eb36 | [] | no_license | pickyilkis/mandelbot-automl | 9ed78ba45dcdbc18080b2936686ab505e5e07e22 | ba7c13f27996459dc53959e876bb19787a1dcb09 | refs/heads/master | 2020-07-09T00:10:38.205060 | 2019-08-22T19:47:08 | 2019-08-22T19:47:08 | 203,818,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,671 | py | # Modify only these functions for your model
import numpy as np
import pandas as pd
from roarbot.integration.goroar.transform import q_goroar_roarbot, p_roarbot_goroar
from roarbot.models.univariate.automl import AUTOML
from roarbot.datasource import univariate
from roarbot.models import RID, QuestionFactory, Question, QuestionRequest
from datetime import timedelta
from dateutil import parser
## Hyperparamters (Participant can change this to tune their model)
max_size = 100 ## The max size of collected data points
min_size = 10 ## you have to collect more than min_size data point to fit parameters
def on_question(question):
global models
contestID = question["responderInfo"]["contestID"]
test_data = question['units'][0]['value']
print('ContestID:', contestID)
print('Features:', test_data)
try:
roarbot_question = q_goroar_roarbot(question)
model = models[contestID]
print('model is ready')
roarbot_prediction = model.predict(roarbot_question)
print('roarbot prediction is ready')
goroar_prediction = p_roarbot_goroar(roarbot_prediction, question)
print(type(goroar_prediction))
print(goroar_prediction)
print('The prediction is made using mboa!')
except Exception as e:
print('something is wrong')
print(e)
goroar_prediction = question
print('The prediction is the current value!')
print('Prediction:', goroar_prediction['units'][0]['value'])
return goroar_prediction
def on_truth(truth_time=None, values=None, contestID = None, **kwargs):
global model_gens, models, datas, freqs
try:
model_gens
except:
model_gens = {}
try:
models
except:
models = {}
try:
datas
except:
datas = {}
try:
freqs
except:
freqs = {}
try:
datas[contestID]
except:
datas[contestID] = pd.DataFrame(columns=['value'])
datas[contestID].loc[truth_time] = values
if datas[contestID].shape[0] == 2:
inferred_freq = datas[contestID].index[-1] - datas[contestID].index[-2]
if inferred_freq > timedelta(0):
freqs[contestID] = inferred_freq
else:
freqs[contestID] = -inferred_freq
print('initialized frequency')
elif datas[contestID].shape[0] > 2:
inferred_freq = datas[contestID].index[-1] - datas[contestID].index[-2]
if inferred_freq > timedelta(0):
freqs[contestID] = min(freqs[contestID], inferred_freq)
print('frequency is: ', freqs[contestID])
print('type of freq is: ', type(freqs[contestID]) )
print('shape of data is: ', datas[contestID].shape)
# print('training data is: ', datas[contestID])
if datas[contestID].shape[0] > max_size:
datas[contestID] = datas[contestID][-max_size:]
if datas[contestID].shape[0] > min_size:
print('train using m4 benchmark !!!!!')
datas[contestID] = datas[contestID].sort_index()
datas[contestID] = datas[contestID].asfreq(freqs[contestID], method='ffill')
print('data index after adj: ', datas[contestID].index)
rid = RID.make(contestID)
ts = univariate.Univariate(datas[contestID]['value'], rid=rid)
print('start training')
model = AUTOML(ts=ts, n_input=5)
model_gens[contestID] = model
models[contestID] = model.fit(truth_time)
print('finish training !!!!!')
print('ContestID:', contestID, 'Truth_time:', truth_time, 'Quantity:', values)
| [
"hughywang0710@gmail.com"
] | hughywang0710@gmail.com |
0bd72edf9ce1e45ebee7cee32eb1deadb1a1c728 | a618d73b2683f209e9bf4f7b45704c33cbfb99cb | /svm_rfe.py | 76e8cc5082795b79ceabd093e287b82406dbb4c7 | [] | no_license | stxupengyu/SVM-RFE | 140acaebce2fbc36a32d3c345ec6b887b69d0264 | f9ad5140055d0123ade35222777aa7938329e86c | refs/heads/master | 2021-03-04T09:04:07.103806 | 2020-03-09T11:51:44 | 2020-03-09T11:51:44 | 246,022,274 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,036 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import RFE
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
X_train = train[['HMM', 'SSD', 'OGS']].values
y_train = train[['Target']].values.ravel()
X_test = test[['HMM', 'SSD', 'OGS']].values
y_test = test[['Target']].values.ravel()
#(a)
estimator = svm.SVC(kernel="linear")
estimator.fit(X_train, y_train)
a_result = estimator.predict(X_test)
print("accuracy: ", accuracy_score(y_test,a_result))
print("weights of HMM, SSD, OGS: ", estimator.coef_)
#(b)1
selector = RFE(estimator, 2, step=1)
selector = selector.fit(X_train, y_train)
#record the selected variable
summary = np.zeros(sum(selector.support_)).tolist()
j=0
k=0
for i in selector.support_:
j=j+1
if i==True:
summary[k]=j-1
k=k+1
#new X based on selected variable
X_train1 = X_train[:,summary]
X_test1 = X_test[:,summary]
#new fit
estimator.fit(X_train1, y_train)
a_result = estimator.predict(X_test1)
print("accuracy: ", accuracy_score(y_test,a_result))
print("seleted variable",selector.support_)
print("weights of HMM, SSD, OGS: ", estimator.coef_)
#(b)2
selector = RFE(estimator, 1, step=1)
selector = selector.fit(X_train, y_train)
#record the selected variable
summary = np.zeros(sum(selector.support_)).tolist()
j=0
k=0
for i in selector.support_:
j=j+1
if i==True:
summary[k]=j-1
k=k+1
#new X based on selected variable
X_train2 = X_train[:,summary]
X_test2 = X_test[:,summary]
#new fit
estimator.fit(X_train2, y_train)
a_result = estimator.predict(X_test2)
print("accuracy: ", accuracy_score(y_test,a_result))
print("seleted variable",selector.support_)
print("weights of HMM, SSD, OGS: ", estimator.coef_)
plt.scatter(X_test2,np.zeros(len(X_test2)), marker='o',c=y_test)
plt.scatter(X_test1[:,0], X_test1[:,1], marker='o',c=y_test)
| [
"noreply@github.com"
] | stxupengyu.noreply@github.com |
6dbd412c99a89c09fb193118eabb468c74c0f109 | d0ab2de07ffbb4e3a7c4bcb8950d2473c117d0b8 | /blog/urls.py | 518c3c93f32dd1c456401869b63f4e6aad25f779 | [] | no_license | atom2k17/firstblog | 16892ece58056b6f30fb3a9aeb76b11d7fbe0acb | d91f16e67dc0e33f4b37bc618422c205e7bd5c1e | refs/heads/master | 2023-06-15T08:49:38.470280 | 2021-07-13T17:36:29 | 2021-07-13T17:36:29 | 385,680,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | from django.urls import path
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView
)
from . import views
urlpatterns = [
path('', PostListView.as_view(), name='blog-home'),
path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('about/', views.about, name='blog-about')
] | [
"noreply@github.com"
] | atom2k17.noreply@github.com |
55d10b2adb426815413cdb0733266d89da62869f | 03761c90fe87e2af7f692e9d13b592702db63725 | /Programming_Assignment1/Source/kMeans.py | e90c3e2b397d672085c3b998b320cb7729835ac1 | [] | no_license | prathik-sannecy/Machine_Learning | 070ad1120f6a299a5e846c1886a28452d06ab33c | 0b5e2e0bd50beba1d56e76306d6caa3af88fd0f0 | refs/heads/master | 2022-04-11T07:25:52.225262 | 2020-03-24T05:49:06 | 2020-03-24T05:49:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,403 | py | # This file implements kMeans algorithm on a set of 2d data points (GMM_dataset 546.txt in this case).
# The number of clusters (k), can be set by defining the k_values parameter below.
# If many k_values are chosen, the one with the highest k is plotted.
# Written by Prathik Sannecy
# 2/15/2020
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
import random
file_name = r"../Input_Files/GMM_dataset 546.txt" # file with the data set
num_runs = 10 # number of runs to try clustering on. Chooses the best clustering based on which run had the least error
k_values = [6] # which k values to run kmeans on
def get_distance(p0, p1):
"""Returns the distance between two 2d points"""
return np.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
def calc_average_cluster(points, default):
"""Returns the average point of a group of 2d cluster points"""
if points == []:
return default
return [sum([p[0] for p in points])/len(points),sum([p[1] for p in points])/len(points)]
def classify_point(p, cluster_averages):
"""Classifies a point based on which cluster's average it is closest to.
Returns the index of the cluster_averages of which it's closest to"""
distance_from_cluster_averages = [get_distance(p, cluster_average) for cluster_average in cluster_averages]
return distance_from_cluster_averages.index(min(distance_from_cluster_averages))
def create_new_clusters(data_set, cluster_averages):
"""Classifies all the points in the dataset, returns all the new clusters"""
new_clusters = []
for i in range(len(cluster_averages)):
new_clusters.append([])
for p in data_set:
p_classification = classify_point(p, cluster_averages)
new_clusters[p_classification].append(p)
return new_clusters
def get_new_averages(clusters):
"""Returns the centroids/averages of each cluster of data points"""
return [calc_average_cluster(cluster, [nonempty_cluster for nonempty_cluster in clusters if nonempty_cluster != []][0][0]) for cluster in clusters]
def get_data_set(data_set_file):
"""Parses a text file of 2d data points, returns a list of list of data points"""
with open(data_set_file) as f:
data_set = [[float(num.replace(',', '.')) for num in line.split()] for line in f]
return data_set
def initialize_centroids(data_set, k):
"""Uses a random set of k points of the data set as the initial centroids of the clusters"""
return [data_set[int(random.random() * len(data_set))] for i in range(k)]
def run_kmeans(data_set, initial_centroids, r=None):
"""Runs kmeans using k clusters for r iterations (or until centroids don't change)
Returns the clusters, and the cluster averages"""
old_centroids = initial_centroids[:]
# Run the iteration at least once
clusters = create_new_clusters(data_set, old_centroids)
centroids = get_new_averages(clusters)
i = 1
while centroids != old_centroids: # Run until the centroids don't change
# If set number of iterations
if r != None:
if i>= r:
break
old_centroids = centroids[:]
clusters = create_new_clusters(data_set, centroids)
centroids = get_new_averages(clusters)
i += 1
return clusters, centroids
def get_error_squared(p0, p1):
"""Returns the squared error between two 2d points"""
return (p0[0] - p1[0])**2 + (p0[1] - p1[1])**2
def calc_clusters_error(clusters, centroids):
"""Returns the total error of a certain clustering of data points"""
error = 0
for cluster_index in range(len(centroids)):
for p in clusters[cluster_index]:
error += get_error_squared(p, centroids[cluster_index])
return error
def get_best_kmeans(data_set, r, k ):
"""Runs kMeans with k clusters r number of times. Returns the best clustering from those r runs, and the squared error of the resulting clustering"""
runs_centroids = []
runs_clusters = []
min_run_error = float("inf")
for run in range(r):
initial_averages = initialize_centroids(data_set, k)
clusters, centroids = run_kmeans(data_set, initial_averages)
run_error = calc_clusters_error(clusters, centroids)
# Keep track of the best run
if run_error < min_run_error:
min_run_error = run_error
best_clusturing = clusters
return best_clusturing, min_run_error
def main():
global num_runs
global k_values
global file_name
data_set = get_data_set(file_name)
# Run the kmeans algorithms with different values of k
min_kmean_error = float("inf")
for k in k_values:
clusters, error = get_best_kmeans(data_set, num_runs, k)
print("number of clusters: "+ str(k) + "; clustering error:" + str(error))
# Keep track of which value of k gives the smallest error
if error < min_kmean_error:
best_kmeans_clustering_model = clusters
min_kmean_error = error
# Display the kMeans clusters in a scatter plot
colors = 10 * ["r", "g", "c", "b", "k"]
for cluster_index in range(len(best_kmeans_clustering_model)):
color = colors[cluster_index]
for features in best_kmeans_clustering_model[cluster_index]:
plt.scatter(features[0], features[1], color=color, s=30)
plt.show()
if __name__ == "__main__":
main() | [
"prathik@pdx.edu"
] | prathik@pdx.edu |
35957f1b9e531790187aaca8d3b755f96324c069 | 73055992dfd8234fec8d4509974e16c8af2fdbb9 | /manage.py | 308803c16b253cd15c7aa02f674087e804b6e59c | [] | no_license | junyangchen/bixplorer | f98b2ddf2a54278fdb190544620ecc4b23b4750a | 70ba798df2337c268b063f061f88e219ea094188 | refs/heads/master | 2021-01-21T23:03:44.697179 | 2014-11-24T20:22:33 | 2014-11-24T20:22:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bixplorer.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"junyang@vt.edu"
] | junyang@vt.edu |
b6ceef65c9f884380e3f2bea1ee5ea6256f35044 | a97af2ae2b132f4a467c00135e00e7434b722403 | /prac_08/silver_service_taxi.py | ee6817d628603db82f0a81421b61346edcd6ae92 | [] | no_license | daniel-bush/Practicals_CP1404 | a4dec44603d751bc37efcdffb0235c3d757fe777 | 4919d79f86a87969cec38e40cf34dc0f599390e0 | refs/heads/master | 2023-01-04T11:03:46.042788 | 2020-10-28T13:22:21 | 2020-10-28T13:22:21 | 290,442,213 | 0 | 0 | null | 2020-09-03T09:30:54 | 2020-08-26T08:37:46 | Python | UTF-8 | Python | false | false | 612 | py | """Silver Service taxi class."""
from prac_08.taxi import Taxi
class SilverServiceTaxi(Taxi):
"""Represent a silver service taxi."""
flagfall = 4.50
def __init__(self, name, fuel, fanciness):
"""Initialise as SilverServiceTaxi."""
super().__init__(name, fuel)
self.fanciness = fanciness
self.price_per_km *= fanciness
def __str__(self):
return "{} plus flagfall of ${:.2f}".format(super().__str__(), self.flagfall)
def get_fare(self):
"""Return the price for the silver service trip."""
return super().get_fare() + self.flagfall
| [
"Themepw999GH1987"
] | Themepw999GH1987 |
2277163fb77406568bbbbfd4c43fbc3d8f8704ff | 583db8851c609f03f722884557cfc67de0ce564e | /pysmapi/interfaces/Event_Stream_Add.py | 8592834aefd487a4a877f75155c9b4f73ace2267 | [
"Apache-2.0"
] | permissive | lllucius/pysmapi | ab0b4409bfda6a61dab7805e2033d71d09a96493 | c0d802edb58e835e4d48cb9c28ccfccfe5b5c686 | refs/heads/master | 2020-04-20T18:07:46.699611 | 2019-06-25T04:27:41 | 2019-06-25T04:27:41 | 169,009,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py |
# Copyright 2018-2019 Leland Lucius
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from pysmapi.smapi import *
class Event_Stream_Add(Request):
def __init__(self,
event_info = "",
**kwargs):
super(Event_Stream_Add, self).__init__(**kwargs)
# Request parameters
self._event_info = event_info
@property
def event_info(self):
return self._event_info
@event_info.setter
def event_info(self, value):
self._event_info = value
def pack(self, **kwargs):
# event_info (string,1-maxlength,charNA)
buf = s2b(self._event_info)
return buf
| [
"github@homerow.net"
] | github@homerow.net |
0fc6dd9dbb4cfac2a716114c86f891a429c69a81 | c960fdb8b770ab841b2f24db7ae2f6571efc1b64 | /remoteControl/reHaproxy.py | e838c009aac9c2c64b237f7b8c46d144310d5a91 | [] | no_license | ljw-hit/webConf | 2f7dcb3ee0cf87b8f88758bfbf40b59f5ee0ad8b | f670f4f72ebba16309a1f33d86a85b03519135d0 | refs/heads/master | 2022-04-06T04:04:56.082777 | 2020-01-05T09:41:44 | 2020-01-05T09:41:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,005 | py | import os
import paramiko
from manageCFG import transmitCfg
def reStartHaproxy(SSH):
cmd = "systemctl restart haproxy"
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
host = SSH["host"]
port = SSH["port"]
username = SSH["username"]
password = SSH["password"]
client.connect(host, port, username, password)
try:
stdin, stdout, stderr = client.exec_command(cmd,timeout=1000)
result = stdout.read().decode('utf-8')
err = stderr.read().decode('utf-8')
print(result)
print(err)
except Exception as e:
print(e)
print("重启Haproxy失败")
client.close()
#测试远程服务器到vps的延时
def getDelay(SSH,ip):
cmd = "ping -c 5 "+ip
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
host = SSH["host"]
port = SSH["port"]
username = SSH["username"]
password = SSH["password"]
client.connect(host, port, username, password)
try:
stdin, stdout, stderr = client.exec_command(cmd,timeout=4000)
result = stdout.read().decode('utf-8')
lines = result.splitlines()
delay = lines[-1].split("=")[1]
delay = delay.split("/")[1]
print(delay+"ms")
return delay+"ms"
except Exception as e:
print(e)
print("getDelay cmd error")
client.close()
#测试本机到远程服务器的延迟
def getLocalDelay(ip):
cmd = "ping -n 5 "+ip
try:
result = os.popen(cmd)
resultStr = result.readlines()
mess = resultStr[-1].split(",")
delay = mess[-1].strip()
delay = delay.split("=")[1].strip()
print(delay)
return delay
except:
print("getDelay cmd error")
if __name__ == '__main__':
SSH = transmitCfg.getSSH("47.75.200.81","root","hitcs2019!")
#reStartHaproxy(SSH)
getDelay(SSH,"107.173.251.191")
getLocalDelay("47.75.200.81")
getLocalDelay("107.173.251.191") | [
"1360769517@qq.com"
] | 1360769517@qq.com |
00d6156b471f3af1e0d5a9fe6d9d9dc6b7a3d6ab | c4a04bea6bd33cff4cca846e5fa6e11e232f479f | /selfdev/asgi.py | 0df131c5aeb3c523661ad6b6523cce7645e947a4 | [] | no_license | polarfox42/tdd_project | a6197823c340e0223013d67fae3899562c4274c6 | c43531811bcc8665e169c941ada001b7f5389b47 | refs/heads/master | 2022-11-27T03:54:28.840433 | 2020-08-09T11:47:20 | 2020-08-09T11:47:20 | 283,520,148 | 0 | 0 | null | 2020-07-29T15:28:03 | 2020-07-29T14:24:10 | Python | UTF-8 | Python | false | false | 391 | py | """
ASGI config for selfdev project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'selfdev.settings')
application = get_asgi_application()
| [
"vg.mkrtchyan@gmail.com"
] | vg.mkrtchyan@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.