blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ecbea36070dd712629e55b616938b75491ba10b9 | 3a8f8bef453f5eb01cc6f22d8bb140d7791024df | /command/tcommand.py | add4fd8bf60184b1755ced53d3534642b3e2870a | [] | no_license | thomasvs/python-command | 23a68de2ce596a7eed5a2740a5ee1471f62ed569 | 4c31072e9f5f68e22c92cdc8f0a02d911b7e5fc0 | refs/heads/master | 2020-05-02T11:29:24.459355 | 2014-09-07T22:23:58 | 2014-09-07T22:23:58 | 5,668,726 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,146 | py | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
"""
A helper class for Twisted commands.
"""
from twisted.internet import defer
from twisted.python import failure
import command
class TwistedCommand(command.Command):
"""
I am a Command that integrates with Twisted and its reactor.
Instead of implementing the do() method, subclasses should implement a
doLater() method which returns a deferred.
"""
def installReactor(self, reactor=None):
"""
Override me to install your own reactor in the parent
ReactorCommand.
"""
self.debug('installing reactor %r in ancestor ReactorCommand',
reactor)
c = self
while c.parentCommand and not isinstance(c, ReactorCommand):
c = c.parentCommand
if not c:
raise AssertionError(
'%r does not have a parent ReactorCommand' % self)
self.debug('installing reactor %r in ancestor ReactorCommand %r',
reactor, c)
c.installReactor(reactor)
### command.Command implementations
def do(self, args):
self.debug('%r: installing reactor using method %r', self,
self.installReactor)
self.installReactor()
d = self.doLater(args)
return d
### command.TwistedCommand methods to implement by subclasses
def doLater(self, args):
"""
@rtype: L{defer.Deferred}
"""
raise NotImplementedError
class ReactorCommand(command.Command):
"""
I am a Command that runs a reactor for its subcommands if they
return a L{defer.Deferred} from their doLater() method.
"""
reactor = None
returnValue = None
_reactorRunning = False
def installReactor(self, reactor=None):
"""
Override me to install your own reactor.
"""
self.debug('ReactorCommand: installing reactor %r', reactor)
if not reactor:
from twisted.internet import reactor
self.reactor = reactor
### command.Command overrides
def parse(self, argv):
"""
I will run a reactor to get the non-deferred result.
"""
self.debug('parse: chain up')
try:
r = command.Command.parse(self, argv)
except Exception:
# get a full traceback to debug here
f = failure.Failure()
self.warning('Exception during %r.parse: %r\n%s\n',
self, f.getErrorMessage(), f.getTraceback())
self.stderr.write('Exception: %s\n' % f.value)
raise
self.debug('parse: result %r', r)
# if it's not a deferred, return the result as is
if not isinstance(r, defer.Deferred):
return r
# We have a deferred, so we need to run a reactor
d = r
# child commands could have installed a reactor
if not self.reactor:
self.installReactor()
def parseCb(ret):
if ret is None:
self.debug('parse returned None, defaults to exit code 0')
ret = 0
elif ret:
self.debug('parse returned %r' % ret)
elif self.parser.help_printed or self.parser.usage_printed:
ret = 0
self.debug('parse: cb: done')
self.returnValue = ret
if self._reactorRunning:
self._reactorRunning = False
self.debug('stopping reactor')
self.reactor.stop()
return ret
def parseEb(failure):
self.debug('parse: eb: failure: %r\n%s\n',
failure.getErrorMessage(), failure.getTraceback())
# we can get here even before we run the reactor below;
# so schedule a stop instead of doing it here
# self.reactor.stop()
self.reactor.callLater(0, self.reactor.stop)
if failure.check(command.CommandExited):
self.stderr.write(failure.value.output + '\n')
reason = failure.value.status
self.returnValue = reason
return reason
self.warning('errback: %r', failure.getErrorMessage())
self.stderr.write('Failure: %s\n' % failure.value)
self.returnValue = failure
# we handled it by storing it for reraising, so don't
# return it
return
d.addCallback(parseCb)
d.addErrback(parseEb)
def raiseIfFailure():
if isinstance(self.returnValue, failure.Failure):
raise self.returnValue.value
if self.returnValue is not None:
self.debug('got return value before reactor ran, returning %r' %
self.returnValue)
raiseIfFailure()
return self.returnValue
self.debug('running reactor %r', self.reactor)
self._reactorRunning = True
self.reactor.run()
self.debug('ran reactor, got %r' % self.returnValue)
raiseIfFailure()
self.debug('ran reactor, returning %r' % self.returnValue)
return self.returnValue
| [
"thomas (at) apestaart (dot) org"
] | thomas (at) apestaart (dot) org |
9f7bd84d3d6ded16616eb407a8258ccfec9ce5cd | df91a0483fefc9b75de248cc3b3aac55ce706f37 | /efficientnet_pytorch/utils.py | 725e3175e56394978756531cbdfabc515ad45b1a | [] | no_license | dttung2905/kaggle-siim-melanoma-2020 | 19b5685fac57eeb3437efead04913e91dfe617dd | d81677e35cc8bc535c56d40395e3bfa4489893d6 | refs/heads/master | 2022-12-04T22:25:08.299660 | 2020-08-17T15:08:07 | 2020-08-17T15:08:07 | 281,700,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,099 | py | """utils.py - Helper functions for building the model and for loading model parameters.
These helper functions are built to mirror those in the official TensorFlow implementation.
"""
# Author: lukemelas (github username)
# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
# With adjustments and added comments by workingcoder (github username).
import re
import math
import collections
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
################################################################################
### Help functions for model architecture
################################################################################
# GlobalParams and BlockArgs: Two namedtuples
# Swish and MemoryEfficientSwish: Two implementations of the method
# round_filters and round_repeats:
# Functions to calculate params for scaling model width and depth ! ! !
# get_width_and_height_from_size and calculate_output_image_size
# drop_connect: A structural design
# get_same_padding_conv2d:
# Conv2dDynamicSamePadding
# Conv2dStaticSamePadding
# get_same_padding_maxPool2d:
# MaxPool2dDynamicSamePadding
# MaxPool2dStaticSamePadding
# It's an additional function, not used in EfficientNet,
# but can be used in other model (such as EfficientDet).
# Identity: An implementation of identical mapping
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple(
"GlobalParams",
[
"width_coefficient",
"depth_coefficient",
"image_size",
"dropout_rate",
"num_classes",
"batch_norm_momentum",
"batch_norm_epsilon",
"drop_connect_rate",
"depth_divisor",
"min_depth",
],
)
# Parameters for an individual model block
BlockArgs = collections.namedtuple(
"BlockArgs",
[
"num_repeat",
"kernel_size",
"stride",
"expand_ratio",
"input_filters",
"output_filters",
"se_ratio",
"id_skip",
],
)
# Set GlobalParams and BlockArgs's defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
# An ordinary implementation of Swish function
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
# A memory-efficient implementation of Swish function
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
def round_filters(filters, global_params):
"""Calculate and round number of filters based on width multiplier.
Use width_coefficient, depth_divisor and min_depth of global_params.
Args:
filters (int): Filters number to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new_filters: New filters number after calculating.
"""
multiplier = global_params.width_coefficient
if not multiplier:
return filters
# TODO: modify the params names.
# maybe the names (width_divisor,min_width)
# are more suitable than (depth_divisor,min_depth).
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor # pay attention to this line when using min_depth
# follow the formula transferred from official TensorFlow implementation
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
"""Calculate module's repeat number of a block based on depth multiplier.
Use depth_coefficient of global_params.
Args:
repeats (int): num_repeat to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new repeat: New repeat number after calculating.
"""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
# follow the formula transferred from official TensorFlow implementation
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, p, training):
"""Drop connect.
Args:
input (tensor: BCWH): Input of this structure.
p (float: 0.0~1.0): Probability of drop connection.
training (bool): The running mode.
Returns:
output: Output after drop connection.
"""
assert p >= 0 and p <= 1, "p must be in range of [0,1]"
if not training:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
# generate binary_tensor mask according to probability (p for 0, 1-p for 1)
random_tensor = keep_prob
random_tensor += torch.rand(
[batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device
)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_width_and_height_from_size(x):
"""Obtain height and width from x.
Args:
x (int, tuple or list): Data size.
Returns:
size: A tuple or list (H,W).
"""
if isinstance(x, int):
return x, x
if isinstance(x, list) or isinstance(x, tuple):
return x
else:
raise TypeError()
def calculate_output_image_size(input_image_size, stride):
"""Calculates the output image size when using Conv2dSamePadding with a stride.
Necessary for static padding. Thanks to mannatsingh for pointing this out.
Args:
input_image_size (int, tuple or list): Size of input image.
stride (int, tuple or list): Conv2d operation's stride.
Returns:
output_image_size: A list [H,W].
"""
if input_image_size is None:
return None
image_height, image_width = get_width_and_height_from_size(input_image_size)
stride = stride if isinstance(stride, int) else stride[0]
image_height = int(math.ceil(image_height / stride))
image_width = int(math.ceil(image_width / stride))
return [image_height, image_width]
# Note:
# The following 'SamePadding' functions make output size equal ceil(input size/stride).
# Only when stride equals 1, can the output size be the same as input size.
# Don't be confused by their function names ! ! !
def get_same_padding_conv2d(image_size=None):
"""Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models.
Args:
image_size (int or tuple): Size of the image.
Returns:
Conv2dDynamicSamePadding or Conv2dStaticSamePadding.
"""
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow, for a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
# Tips for 'SAME' mode padding.
# Given the following:
# i: width or height
# s: stride
# k: kernel size
# d: dilation
# p: padding
# Output after Conv2d:
# o = floor((i+p-((k-1)*d+1))/s+1)
# If o equals i, i = floor((i+p-((k-1)*d+1))/s+1),
# => p = (i-1)*s+((k-1)*d+1)-i
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=1,
groups=1,
bias=True,
):
super().__init__(
in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias
)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = (
math.ceil(ih / sh),
math.ceil(iw / sw),
) # change the output size according to stride ! ! !
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(
x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
)
return F.conv2d(
x,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
class Conv2dStaticSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow's 'SAME' mode, with the given input image size.
The padding mudule is calculated in construction function, then used in forward.
"""
# With the same calculation as Conv2dDynamicSamePadding
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
image_size=None,
**kwargs,
):
super().__init__(in_channels, out_channels, kernel_size, stride, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d(
(pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2)
)
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(
x,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
return x
def get_same_padding_maxPool2d(image_size=None):
"""Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models.
Args:
image_size (int or tuple): Size of the image.
Returns:
MaxPool2dDynamicSamePadding or MaxPool2dStaticSamePadding.
"""
if image_size is None:
return MaxPool2dDynamicSamePadding
else:
return partial(MaxPool2dStaticSamePadding, image_size=image_size)
class MaxPool2dDynamicSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(
self,
kernel_size,
stride,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
):
super().__init__(
kernel_size, stride, padding, dilation, return_indices, ceil_mode
)
self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride
self.kernel_size = (
[self.kernel_size] * 2
if isinstance(self.kernel_size, int)
else self.kernel_size
)
self.dilation = (
[self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation
)
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(
x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
)
return F.max_pool2d(
x,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.ceil_mode,
self.return_indices,
)
class MaxPool2dStaticSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with the given input image size.
The padding mudule is calculated in construction function, then used in forward.
"""
def __init__(self, kernel_size, stride, image_size=None, **kwargs):
super().__init__(kernel_size, stride, **kwargs)
self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride
self.kernel_size = (
[self.kernel_size] * 2
if isinstance(self.kernel_size, int)
else self.kernel_size
)
self.dilation = (
[self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation
)
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d(
(pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2)
)
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.max_pool2d(
x,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.ceil_mode,
self.return_indices,
)
return x
class Identity(nn.Module):
"""Identity mapping.
Send input to output directly.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, input):
return input
################################################################################
### Helper functions for loading model params
################################################################################
# BlockDecoder: A Class for encoding and decoding BlockArgs
# efficientnet_params: A function to query compound coefficient
# get_model_params and efficientnet:
# Functions to get BlockArgs and GlobalParams for efficientnet
# url_map and url_map_advprop: Dicts of url_map for pretrained weights
# load_pretrained_weights: A function to load pretrained weights
class BlockDecoder(object):
"""Block Decoder for readability,
straight from the official TensorFlow repository.
"""
@staticmethod
def _decode_block_string(block_string):
"""Get a block through a string notation of arguments.
Args:
block_string (str): A string notation of arguments.
Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'.
Returns:
BlockArgs: The namedtuple defined at the top of this file.
"""
assert isinstance(block_string, str)
ops = block_string.split("_")
options = {}
for op in ops:
splits = re.split(r"(\d.*)", op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert ("s" in options and len(options["s"]) == 1) or (
len(options["s"]) == 2 and options["s"][0] == options["s"][1]
)
return BlockArgs(
num_repeat=int(options["r"]),
kernel_size=int(options["k"]),
stride=[int(options["s"][0])],
expand_ratio=int(options["e"]),
input_filters=int(options["i"]),
output_filters=int(options["o"]),
se_ratio=float(options["se"]) if "se" in options else None,
id_skip=("noskip" not in block_string),
)
@staticmethod
def _encode_block_string(block):
"""Encode a block to a string.
Args:
block (namedtuple): A BlockArgs type argument.
Returns:
block_string: A String form of BlockArgs.
"""
args = [
"r%d" % block.num_repeat,
"k%d" % block.kernel_size,
"s%d%d" % (block.strides[0], block.strides[1]),
"e%s" % block.expand_ratio,
"i%d" % block.input_filters,
"o%d" % block.output_filters,
]
if 0 < block.se_ratio <= 1:
args.append("se%s" % block.se_ratio)
if block.id_skip is False:
args.append("noskip")
return "_".join(args)
@staticmethod
def decode(string_list):
"""Decode a list of string notations to specify blocks inside the network.
Args:
string_list (list[str]): A list of strings, each string is a notation of block.
Returns:
blocks_args: A list of BlockArgs namedtuples of block args.
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""Encode a list of BlockArgs to a list of strings.
Args:
blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args.
Returns:
block_strings: A list of strings, each string is a notation of block.
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet_params(model_name):
"""Map EfficientNet model name to parameter coefficients.
Args:
model_name (str): Model name to be queried.
Returns:
params_dict[model_name]: A (width,depth,res,dropout) tuple.
"""
params_dict = {
# Coefficients: width,depth,res,dropout
"efficientnet-b0": (1.0, 1.0, 224, 0.2),
"efficientnet-b1": (1.0, 1.1, 240, 0.2),
"efficientnet-b2": (1.1, 1.2, 260, 0.3),
"efficientnet-b3": (1.2, 1.4, 300, 0.3),
"efficientnet-b4": (1.4, 1.8, 380, 0.4),
"efficientnet-b5": (1.6, 2.2, 456, 0.4),
"efficientnet-b6": (1.8, 2.6, 528, 0.5),
"efficientnet-b7": (2.0, 3.1, 600, 0.5),
"efficientnet-b8": (2.2, 3.6, 672, 0.5),
"efficientnet-l2": (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
def efficientnet(
width_coefficient=None,
depth_coefficient=None,
image_size=None,
dropout_rate=0.2,
drop_connect_rate=0.2,
num_classes=1000,
):
"""Create BlockArgs and GlobalParams for efficientnet model.
Args:
width_coefficient (float)
depth_coefficient (float)
image_size (int)
dropout_rate (float)
drop_connect_rate (float)
num_classes (int)
Meaning as the name suggests.
Returns:
blocks_args, global_params.
"""
# Blocks args for the whole model(efficientnet-b0 by default)
# It will be modified in the construction of EfficientNet Class according to model
blocks_args = [
"r1_k3_s11_e1_i32_o16_se0.25",
"r2_k3_s22_e6_i16_o24_se0.25",
"r2_k5_s22_e6_i24_o40_se0.25",
"r3_k3_s22_e6_i40_o80_se0.25",
"r3_k5_s11_e6_i80_o112_se0.25",
"r4_k5_s22_e6_i112_o192_se0.25",
"r1_k3_s11_e6_i192_o320_se0.25",
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
image_size=image_size,
dropout_rate=dropout_rate,
num_classes=num_classes,
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
drop_connect_rate=drop_connect_rate,
depth_divisor=8,
min_depth=None,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
"""Get the block args and global params for a given model name.
Args:
model_name (str): Model's name.
override_params (dict): A dict to modify global_params.
Returns:
blocks_args, global_params
"""
if model_name.startswith("efficientnet"):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s
)
else:
raise NotImplementedError("model name is not pre-defined: %s" % model_name)
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
# train with Standard methods
# check more details in paper(EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks)
url_map = {
"efficientnet-b0": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth",
"efficientnet-b1": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth",
"efficientnet-b2": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth",
"efficientnet-b3": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth",
"efficientnet-b4": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth",
"efficientnet-b5": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth",
"efficientnet-b6": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth",
"efficientnet-b7": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth",
}
# train with Adversarial Examples(AdvProp)
# check more details in paper(Adversarial Examples Improve Image Recognition)
url_map_advprop = {
"efficientnet-b0": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b0-b64d5a18.pth",
"efficientnet-b1": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b1-0f3ce85a.pth",
"efficientnet-b2": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b2-6e9d97e5.pth",
"efficientnet-b3": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b3-cdd7c0f4.pth",
"efficientnet-b4": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b4-44fb3a87.pth",
"efficientnet-b5": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b5-86493f6b.pth",
"efficientnet-b6": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b6-ac80338e.pth",
"efficientnet-b7": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b7-4652b6dd.pth",
"efficientnet-b8": "https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b8-22a8fe65.pth",
}
# TODO: add the petrained weights url map of 'efficientnet-l2'
def load_pretrained_weights(
model, model_name, weights_path=None, load_fc=True, advprop=False
):
"""Loads pretrained weights from weights path or download using url.
Args:
model (Module): The whole model of efficientnet.
model_name (str): Model name of efficientnet.
weights_path (None or str):
str: path to pretrained weights file on the local disk.
None: use pretrained weights downloaded from the Internet.
load_fc (bool): Whether to load pretrained weights for fc layer at the end of the model.
advprop (bool): Whether to load pretrained weights
trained with advprop (valid when weights_path is None).
"""
if isinstance(weights_path, str):
state_dict = torch.load(weights_path)
else:
# AutoAugment or Advprop (different preprocessing)
url_map_ = url_map_advprop if advprop else url_map
state_dict = model_zoo.load_url(url_map_[model_name])
if load_fc:
ret = model.load_state_dict(state_dict, strict=False)
assert (
not ret.missing_keys
), f"Missing keys when loading pretrained weights: {ret.missing_keys}"
else:
state_dict.pop("_fc.weight")
state_dict.pop("_fc.bias")
ret = model.load_state_dict(state_dict, strict=False)
assert set(ret.missing_keys) == set(
["_fc.weight", "_fc.bias"]
), f"Missing keys when loading pretrained weights: {ret.missing_keys}"
assert (
not ret.unexpected_keys
), f"Missing keys when loading pretrained weights: {ret.unexpected_keys}"
print("Loaded pretrained weights for {}".format(model_name))
| [
"thanhtung.dao@rakuten.com"
] | thanhtung.dao@rakuten.com |
a0cb1eee0ce7279e519465175cbaff109ed4fb60 | e3365a497b6f3afa7afc36381f7a7d1752f09610 | /.venv/bin/jupyter-notebook | 70ee2fde73f1c59914cde9b01c22c06f382ee6ce | [] | no_license | MohamadSheikhAlshabab/Chess_Board- | 4229f7044831b79a8b8b6662a2aea5753d11c7dc | ee2e69d4567b69559584d0b074d91a25793db2f7 | refs/heads/master | 2022-12-08T05:10:59.482582 | 2020-09-04T16:34:18 | 2020-09-04T16:34:18 | 291,529,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | #!/home/mohamad/401/chess_board/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from notebook.notebookapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"alshabab.moh@gmail.com"
] | alshabab.moh@gmail.com | |
967b5276ffd71007c6b33334b7fccee2155aa798 | b38eb298c91f7e74e7857a1c1c421f60eba1ba29 | /Day4/program1.py | 767e2340845b14766d27b7744e32521f91de6e37 | [] | no_license | bhartiyadavdel/tathastu_week_of_code | bf8467183f949dacfbeaebf21603100e1a061f27 | 6d74704760bbf232186cb90e5b4087ec929f7961 | refs/heads/master | 2022-10-14T05:03:23.249916 | 2020-06-10T18:46:04 | 2020-06-10T18:46:04 | 270,074,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | size=int(input("enter the size of the tuple"))
print("enter the elements of the tuple")
tup=[]
for i in range(size):
tup.append(input())
tup=tuple(tup)
e=input("enter the element whose count you wish to obtain")
print("count of the element is",tup.count(e),"times")
| [
"noreply@github.com"
] | noreply@github.com |
79f50a378ab45f7801f359d695045b821ff47443 | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/sudoku_20201101154742.py | c093972aa62bcc31bf99b51feb72a76950605747 | [] | no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,829 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from config import *
from create_board import *
from solve_bloard import *
from display_board import *
from string import *
from math import floor
import pygame as pg
import numpy as np
# For error highlighting
def set_highlight(row, col, blk, lock):
global input_lock
input_lock = lock
global row_index
row_index = row
global col_index
col_index = blk
global blk_index
blk_index = col
def get_cord(pos):
global box_index_x
box_index_x = (pos[0] - TOP_LX)//BLOCK_SIZE
global box_index_y
box_index_y = (pos[1] - TOP_LY)//BLOCK_SIZE
def valid(grid, x, y, val, increase):
input_lock = 0
row = col = blk = (0, 0)
for index in range(9):
# Check if value in column
if grid[x][index] == val:
col = (x, index)
input_lock = 1
# Check if value in row
if grid[index][y] == val:
row = (index, y)
input_lock = 1
# Finds the block
index_x = x // 3 # integer division
index_y = y // 3
# Check if value in block
for i in range(index_x * 3, index_x * 3 + 3):
for j in range (index_y * 3, index_y * 3 + 3):
if grid[i][j] == val:
blk = (i, j)
input_lock = 1
if input_lock == 1:
set_highlight(row, col, blk, input_lock)
return False
return True
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
pg.init()
self.screen = pg.display.set_mode(SCREEN_RES)
pg.display.set_caption('Sudoku solver')
display = Display_board(self.screen)
flag1 = 0
val = 0
pos = (0, 0)
blink = False
input_lock = 0
get_cord((0, 0))
set_highlight((0, 0), (0, 0), (0, 0), input_lock)
board = create_board().board
while 1:
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
exit()
if event.type == pg.MOUSEBUTTONDOWN:
flag1 = 1
pos = pg.mouse.get_pos()
get_cord(pos)
blink = True
if event.type == pg.KEYDOWN and input_lock != 1:
if event.key == pg.K_1:
val = 1
if event.key == pg.K_2:
val = 2
if event.key == pg.K_3:
val = 3
if event.key == pg.K_4:
val = 4
if event.key == pg.K_5:
val = 5
if event.key == pg.K_6:
val = 6
if event.key == pg.K_7:
val = 7
if event.key == pg.K_8:
val = 8
if event.key == pg.K_9:
val = 9
elif event.type == pg.KEYDOWN and input_lock == 1:
if event.key == pg.K_BACKSPACE:
val = 0
set_highlight((0, 0), (0, 0), (0, 0), 0)
if val != 0:
display.draw_val(val, box_index_x, box_index_y)
if valid(board, int(box_index_x), int(box_index_y), val, display):
board[int(box_index_x)][int(box_index_y)] = val
else:
board[int(box_index_x)][int(box_index_y)] = 0
val = 0
pg.draw.rect(self.screen, BLACK, (0, 0, self.screen.get_width(), self.screen.get_height()))
self.screen.fill(BEIGE)
display.draw(board)
if blink:
cell = display.find_cell(box_index_x, box_index_y)
alpha = display.blink()
print("start pos x: ", floor(cell[0]), "start pos y: ", floor(cell[1]), "end pos x: ", floor(cell[2]), "end pos y: ", floor(cell[3]))
cell_width = int(cell[2])
cell_height = int(cell[3])
start_pos_X = int(cell[0])
start_pos_y = int(cell[1])
rect = pg.Surface((cell_width, cell_height))
rect.set_alpha(alpha)
# pg.draw.rect(self.screen, GREEN, cell)
self.screen.blit(rect, (rect.x, rect.y))
# print(box_index_x, box_index_y)
if input_lock == 1:
display.update(board, row_index, col_index, blk_index)
# display.draw_box()
pg.display.update()
self.solution = solve_board(board)
self.solution.assign_flags(board)
if __name__ == '__main__':
Main()
| [
"jle040@uit.no"
] | jle040@uit.no |
5ee65120d959f54b2c17ed09696f5d2d319a54ca | 0337d1ae26b2d8ab61d7484ed00eb2744e9a41d2 | /core/views.py | afb8f3102f8b1839f3764911e6d0c7bc9337a5c7 | [] | no_license | prafullsinha/Company-Employee-Management | 80d4e58954667f8bbbe1e379687f43e9130e92c3 | 002c5c42fe047e1738810c0e822a3eb41e5b407e | refs/heads/master | 2020-09-07T23:11:22.563192 | 2019-11-11T09:06:44 | 2019-11-11T09:06:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,677 | py | from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import TemplateView, ListView, DetailView, CreateView
from django.contrib.auth.models import User
from core.forms import ProfileForm, RegisterForm, AddProfileForm
from .models import Profile, Company
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib.auth.hashers import make_password
@login_required()
def ManagerMemberView(request, email):
post = Profile.objects.get(company_id=email)
if request.method == "POST":
form = ProfileForm(request.POST or None, request.FILES or None, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.save()
return redirect('home')
else:
form = ProfileForm(instance=post)
template = 'core/member.html'
context = {
'form': form,
'post': post
}
return render(request, template, context)
class HomeView(TemplateView):
template_name = 'core/home.html'
def get(self, request, *args, **kwargs):
q = Company.objects.all()
if request.user.is_authenticated:
count = 0
for e in q:
if request.user.username == e.email:
count = 1
if count == 0:
return redirect('company')
else:
return redirect('profile')
context = {
'q': q
}
return render(request, self.template_name, context)
@method_decorator(login_required, name='dispatch')
class ManagerView(ListView):
template_name = 'core/manager.html'
def get(self, request, *args, **kwargs):
p = Profile.objects.get(user=request.user)
queryset = Company.objects.all()
context = {
'q': queryset,
'p': p
}
return render(request, self.template_name, context)
@method_decorator(login_required, name='dispatch')
class NormalView(ListView):
template_name = 'core/normal.html'
def get(self, request, *args, **kwargs):
p = Profile.objects.get(user=request.user)
context = {
'p': p
}
return render(request, self.template_name, context)
@method_decorator(login_required, name='dispatch')
class CompanyView(DetailView):
template_name = 'core/company.html'
def get(self, request, *args, **kwargs):
p = Company.objects.all()
form = AddProfileForm()
context = {
'form': form,
'p': p
}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
hashed_password = make_password("database")
form = AddProfileForm(request.POST)
if form.is_valid():
profile = Profile()
user = User()
profile = form.save(commit=False)
profile.cname = request.user.first_name
profile.save()
user = User.objects.create(username=form.cleaned_data['email'], password=hashed_password,
first_name=form.cleaned_data['name'])
user.save()
return redirect('company')
else:
form = AddProfileForm()
context = {
'form': form
}
return render(request, self.template_name, context)
@method_decorator(login_required, name='dispatch')
class ProfileView(TemplateView):
template_name = 'core/profile.html'
def get(self, request, *args, **kwargs):
p = Profile.objects.all()
q = Company.objects.get(email=request.user.username)
for e in p:
if e.user == request.user:
if e.company.type == 'M':
return redirect('manager')
else:
return redirect('normal')
form = ProfileForm()
context = {
'form': form,
'q': q
}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
p = Company.objects.get(email=request.user.username)
profile = Profile()
picture = Profile.picture
form = ProfileForm(request.POST or None, request.FILES or None)
if form.is_valid():
profile1 = form.save(commit=False)
profile1.user = request.user
profile1.name = request.user.first_name
profile1.company = p
profile1.save()
return redirect('profile')
else:
form = ProfileForm()
context = {
'picture': picture,
'form': form,
'p': p
}
return render(request, self.template_name, context)
class Signup2(TemplateView):
template_name = 'registration/signup2.html'
def get(self, request, *args, **kwargs):
form = RegisterForm()
context = {
'form': form
}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
form = RegisterForm(request.POST)
if form.is_valid():
user = User.objects.create(username=form.cleaned_data['email'],
password=make_password(form.cleaned_data['password1']),
first_name=form.cleaned_data['first_name'])
login(request, user)
return redirect('company')
context = {
'form': form
}
return render(request, self.template_name, context)
| [
"h2so4007@gmail.com"
] | h2so4007@gmail.com |
ea00d3edacff3f5e008bcd8667e5f284c3ce6a44 | 100d91f89d0a2523aa199d7ac090a5cc71f0d6d0 | /task07/safest_asin.py | e17a0dd6cdf763f9349eeceb68f3ea7f55481864 | [] | no_license | DiLysenk/PythonProjects | 574653d1d3e4e14edcc141fda15ed9a7a0814222 | 53c8fe5f2bd8466dad26f0f386ae962e3709a6de | refs/heads/master | 2021-01-02T04:44:37.287916 | 2020-03-06T14:50:33 | 2020-03-06T14:50:33 | 239,494,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py | #!/usr/bin/env python3
# Поймай исключение, если сможешь
#
# Функция asin определена на области [-1, 1] и возвращает
# угол для которого синус равне аргументу
#
# Задача: узнать, что происходит, когда аргумент не имеет
# математического смысла и при помощи обработки исключений
# сделать функцию "безопасной": возвращающей None в случае вне области
# определения
def safe_asin(*args, **kwargs):
from math import asin
return asin(*args, **kwargs)
# Задекорируй меня полностью
#
# Реализовать четыре декоратора:
#
# Декоратор: check_single_arg: проверяет, что на вход функции подали
# только один позиционный аргумент и ни одного опционального
#
# Декоратор: check_float_arg: проверяет, что позиционный аргумент - вещественное число
# (см isinstace(x, float))
#
# Декоратор: check_ge_minus_1: проверяет, что на вход функции подали
# аргумент, не меньший -1.0
# Декоратор: check_le_plus_1: проверяет, что на вход функции подали
# аргумент, не больший 1.0
#
# С их помощью задекорировать функцию safest_asin для работы с произвольными аргументами
# с аналогичным safe_asin поведению
#
# Пример декоратора:
# def check_positive(func):
# def wrapper(x):
# if x > 0:
# return func(x)
# else:
# return None
# return wrapper
#
# Пример использования:
# # @check_positive
# def safe_sqrt(x):
# from math import sqrt
# return sqrt(x)
def safest_asin(x):
from math import asin
return asin(x)
import unittest
class TaskDecoratorSolutionTest(unittest.TestCase):
cases = (
(0.0, (0.0,), {}),
(__import__("math").asin(0.123), (0.123,), {}),
(None, (-2,), {}),
(None, ("abc",), {}),
(None, (0.0, 1.0), {}),
(None, tuple(), {"arg": 0.5}),
)
def test_safe_asin(self):
self.__execute_equal_subcases(safe_asin, self.cases)
def test_safest_asin(self):
self.__execute_equal_subcases(safest_asin, self.cases)
def __gen_message(self, got, expected, function, *args, **kwargs):
return """
Функция {name} для аргументов {arg} вернула неожидаемое значение:
Значение: {got}
Ожидалось: {expected}""".format(
name=function.__name__,
arg=(args, kwargs),
got=got,
expected=expected)
def __execute_equal_subcases(self, function, cases):
for case in cases:
with self.subTest(case=case):
self.assertEqual(*self.__build_equal_test_args(function, case[0], *case[1], **case[2]))
def __build_equal_test_args(self, function, expected, *args, **kwargs):
got = function(*args, **kwargs)
return (
got,
expected,
self.__gen_message(got, expected, function, *args, **kwargs)
)
if __name__ == "__main__":
unittest.main(verbosity=2, exit=False)
| [
"46103863+DiLysenk@users.noreply.github.com"
] | 46103863+DiLysenk@users.noreply.github.com |
c271e9f700049aa637ff956270d6564aaaf8d504 | d5790c787fffb1e825ed31c226052d4da653e468 | /CRF效果评价.py | ee19ccb420dde791bb6446a1971fc6ee526f4c11 | [] | no_license | RealLuhx/Routine-python-projects | a27008db198c3130bf8cc592cd3933f2e78ff308 | 008d52551af8151e51ac44d58d628beebbae7a77 | refs/heads/master | 2020-03-07T22:29:08.552936 | 2018-04-02T13:25:16 | 2018-04-02T13:25:16 | 127,755,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 21 14:08:37 2017
@author: AloneNotLongly
"""
import time
def PRF(name):
path2 = r'D:\learnPython\第十二次任务\十折交叉验证(最终结果)\(10)\output.txt'
with open(path2, 'r', encoding='GBK')as f:
linep = f.readlines()
# print(linep)
num = 0
countname1 = 0
countname2 = 0
for i in linep:
i = str(i).split('\t')
if len(i) == 3:
i[2] = i[2].replace('\n', '')
if i[1] == name:
countname1 += 1
if i[2] == name:
countname2 += 1
if i[1] == name and i[2] == name:
num += 1
# print(countname1)
P = (num / countname2) # 正确率
R = (num / countname1) # 召回率
F = (2 * P * R) / (P + R)
with open(r'D:\learnPython\SRT\SRT中期汇报\CRF评价\%s的PRF值.txt' % name, 'w', encoding='utf8')as f:
f.write('正确率(P)' + '\t' + '%.2f%%' % (P * 100) + '\n')
f.write('召回率(R)' + '\t' + '%.2f%%' % (R * 100) + '\n') # print("分类正确率是:%.2f%%" %(rate*100))
f.write('F值(F)' + '\t' + '%.2f%%' % (F * 100))
def main():
print('开始............')
begin = time.clock()
all_ = ['AB','AM','BE']
for name in all_:
PRF(name)
# PRF('BE')
end = time.clock()
print('结束')
print('用时为:' + str(end - begin))
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
1bfb44b6968b4c337b143375947e1baa358cd736 | c7f963d4b006d1850db39832a6575c8dfe34dfdd | /workout_app/migrations/0072_auto_20210503_2351.py | 25dcff81a697f0f64bc682a958d31c2396856d4b | [] | no_license | calrh007/project-a-07-personal-copy | 45ec109c574bdefc8d20161ab6922e3d91e2e296 | d215c028b92a21a012a0d682a44c28c0746ef3ce | refs/heads/main | 2023-05-29T05:17:58.072964 | 2021-06-10T05:46:05 | 2021-06-10T05:46:05 | 375,151,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # Generated by Django 3.1.7 on 2021-05-04 03:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('workout_app', '0071_auto_20210502_2052'),
]
operations = [
migrations.AlterField(
model_name='workouttype',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='workouttypecount',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
| [
"calrh007@gmail.com"
] | calrh007@gmail.com |
4f894e9fe2c37bd7fdafa11e65ab60ec680c72a2 | 95498d1594e5112d8b44a3d6e0182fe20c374a9d | /simann/verification.py | 3856bcdf550ae0fcca5f7323cea16cad912f0b16 | [] | no_license | jeroenvanriel/job-scheduling | 6ab919dacc6100d25ca4f57d0d0f2a065c39cee2 | 69ee6a63068e31fb9d3bf6db59dff776f96726a0 | refs/heads/main | 2023-02-12T19:16:19.937820 | 2021-01-15T20:56:25 | 2021-01-15T20:56:25 | 313,915,230 | 0 | 0 | null | 2021-01-15T20:56:04 | 2020-11-18T11:35:00 | Python | UTF-8 | Python | false | false | 673 | py | from simann import SimulatedAnnealing
from experiment import Experiment
from p_functions import *
# problem defenition
m = 2 # number of machines
jobs_file = "../ptimes_boundary.txt" # file that contains the jobs
instance = SimulatedAnnealing.fromFile(jobs_file, m)
# output filename
output_prefix = "./experiments/verification_exp"
# run experiments and save their result plots to file
exp1 = instance.start(100, localSearch, 0, 8e10, 4e12)
exp1.plotSchedule(instance, file_name="./experiments/verification_exp1_schedule.png")
exp2 = instance.start(100, localSearch, 1, 8e10, 4e12)
exp2.plotSchedule(instance, file_name="./experiments/verification_exp2_schedule.png") | [
"38719718+willempie167@users.noreply.github.com"
] | 38719718+willempie167@users.noreply.github.com |
09dd43029927a441b357dfb67a390c25dd502a97 | 409224d3dab778d02050dc91f52b11ba5382d478 | /myvenv/lib/python3.6/site-packages/otree/checks/__init__.py | b56457f96a16249df7203c766f7ddfe86d2ddf51 | [
"MIT"
] | permissive | Miyanorococo/yagetapro | f931d2f9080bb54aaa6e3d377654df486a42b405 | c68b5814c1845c0a5722e46e6d8f25f60c916def | refs/heads/master | 2020-04-02T03:31:55.031818 | 2018-10-21T04:17:06 | 2018-10-21T04:17:06 | 153,970,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,919 | py | import glob
import inspect
import io
import os
from otree import common_internal
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.core.checks import register, Error, Warning
from django.template import Template
from django.template import TemplateSyntaxError
import django.db.models.fields
from otree.api import (
BasePlayer, BaseGroup, BaseSubsession, Currency, WaitPage, Page)
from otree.common_internal import _get_all_configs
from pathlib import Path
class AppCheckHelper:
"""Basically a wrapper around the AppConfig
"""
def __init__(self, app_config, errors):
self.app_config = app_config
self.errors = errors
def add_error(self, title, numeric_id: int, **kwargs):
issue_id = 'otree.E' + str(numeric_id).zfill(3)
kwargs.setdefault('obj', self.app_config.label)
return self.errors.append(Error(title, id=issue_id, **kwargs))
def add_warning(self, title, numeric_id: int, **kwargs):
kwargs.setdefault('obj', self.app_config.label)
issue_id = 'otree.W' + str(numeric_id).zfill(3)
return self.errors.append(Warning(title, id=issue_id, **kwargs))
# Helper meythods
def get_path(self, name):
return os.path.join(self.app_config.path, name)
def get_rel_path(self, name):
basepath = os.getcwd()
return os.path.relpath(name, basepath)
def get_module(self, name):
return import_module(self.app_config.name + '.' + name)
def get_template_names(self):
path = self.get_path('templates')
template_names = []
for root, dirs, files in os.walk(path):
for filename in [f for f in files if f.endswith('.html')]:
template_names.append(os.path.join(root, filename))
return template_names
def module_exists(self, module):
try:
self.get_module(module)
return True
except ImportError as e:
return False
def class_exists(self, module, name):
module = self.get_module(module)
cls = getattr(module, name, None)
return inspect.isclass(cls)
# CHECKS
def files(helper: AppCheckHelper, **kwargs):
# don't check views.py because it might be pages.py
for fn in ['models.py']:
if not os.path.isfile(helper.get_path(fn)):
helper.add_error(
'No "%s" file found in game folder' % fn,
numeric_id=102
)
templates_dir = Path(helper.get_path('templates'))
app_label = helper.app_config.label
if templates_dir.is_dir():
# check for files in templates/, but not in templates/<label>
misplaced_files = list(templates_dir.glob('*.html'))
if misplaced_files:
hint = (
'Move template files from "{app}/templates/" '
'to "{app}/templates/{app}" subfolder'.format(
app=app_label)
)
helper.add_error(
"Templates files in wrong folder",
hint=hint, numeric_id=103,
)
all_subfolders = set(templates_dir.glob('*/'))
correctly_named_subfolders = set(
templates_dir.glob('{}/'.format(app_label)))
other_subfolders = all_subfolders - correctly_named_subfolders
if other_subfolders and not correctly_named_subfolders:
msg = (
"The 'templates' folder has a subfolder called '{}', "
"but it should be renamed '{}' to match the name of the app. "
).format(other_subfolders.pop().name, app_label)
helper.add_error(msg, numeric_id=104)
base_model_attrs = {
'Player': set(dir(BasePlayer)),
'Group': set(dir(BaseGroup)),
'Subsession': set(dir(BaseSubsession)),
}
model_field_substitutes = {
int: 'IntegerField',
float: 'FloatField',
bool: 'BooleanField',
str: 'CharField',
Currency: 'CurrencyField',
type(None): 'IntegerField'
# not always int, but it's a reasonable suggestion
}
def model_classes(helper: AppCheckHelper, **kwargs):
for name in ['Subsession', 'Group', 'Player']:
try:
helper.app_config.get_model(name)
except LookupError:
helper.add_error(
'MissingModel: Model "%s" not defined' % name, numeric_id=110)
app_config = helper.app_config
Player = app_config.get_model('Player')
Group = app_config.get_model('Group')
Subsession = app_config.get_model('Subsession')
for Model in [Player, Group, Subsession]:
for attr_name in dir(Model):
if attr_name not in base_model_attrs[Model.__name__]:
try:
attr_value = getattr(Model, attr_name)
_type = type(attr_value)
except AttributeError:
# I got "The 'q_country' attribute can only be accessed
# from Player instances."
# can just filter/ignore these.
pass
else:
if _type in model_field_substitutes.keys():
msg = (
'NonModelFieldAttr: '
'{} has attribute "{}", which is not a model field, '
'and will therefore not be saved '
'to the database.'.format(Model.__name__,
attr_name))
helper.add_error(
msg,
numeric_id=111,
hint='Consider changing to "{} = models.{}(initial={})"'.format(
attr_name, model_field_substitutes[_type],
repr(getattr(Model, attr_name)))
)
# if people just need an iterable of choices for a model field,
# they should use a tuple, not list or dict
elif _type in {list, dict, set}:
warning = (
'MutableModelClassAttr: '
'{ModelName}.{attr} is a {type_name}. '
'Modifying it during a session (e.g. appending or setting values) '
'will have unpredictable results; '
'you should use '
'session.vars or participant.vars instead. '
'Or, if this {type_name} is read-only, '
"then it's recommended to move it outside of this class "
'(e.g. put it in Constants).'
).format(ModelName=Model.__name__,
attr=attr_name,
type_name=_type.__name__)
helper.add_error(warning, numeric_id=112)
# isinstance(X, type) means X is a class, not instance
elif (isinstance(attr_value, type) and
issubclass(attr_value,
django.db.models.fields.Field)):
msg = (
'{}.{} is missing parentheses.'
).format(Model.__name__, attr_name)
helper.add_error(
msg, numeric_id=113,
hint=(
'Consider changing to "{} = models.{}()"'
).format(attr_name, attr_value.__name__)
)
def constants(helper: AppCheckHelper, **kwargs):
if not helper.module_exists('models'):
return
if not helper.class_exists('models', 'Constants'):
helper.add_error(
'models.py does not contain Constants class', numeric_id=11
)
return
models = helper.get_module('models')
Constants = getattr(models, 'Constants')
attrs = ['name_in_url', 'players_per_group', 'num_rounds']
for attr_name in attrs:
if not hasattr(Constants, attr_name):
msg = "models.py: 'Constants' class needs to define '{}'"
helper.add_error(msg.format(attr_name), numeric_id=12)
ppg = Constants.players_per_group
if ppg == 0 or ppg == 1:
helper.add_error(
"models.py: Constants.players_per_group cannot be {}. You "
"should set it to None, which makes the group "
"all players in the subsession.".format(ppg),
numeric_id=13
)
if ' ' in Constants.name_in_url:
helper.add_error(
"models.py: Constants.name_in_url must not contain spaces",
numeric_id=14
)
def orphan_methods(helper: AppCheckHelper, **kwargs):
'''i saw several people making this mistake in the workshop'''
pages_module = common_internal.get_pages_module(helper.app_config.name)
for method_name in ['vars_for_template', 'is_displayed',
'after_all_players_arrive']:
if hasattr(pages_module, method_name):
helper.add_error(
'pages.py has a function {} that is not inside a class.'.format(
method_name),
numeric_id=70
)
return
def pages_function(helper: AppCheckHelper, **kwargs):
pages_module = common_internal.get_pages_module(helper.app_config.name)
views_or_pages = pages_module.__name__.split('.')[-1]
try:
page_list = pages_module.page_sequence
except:
helper.add_error(
'{}.py is missing the variable page_sequence.'.format(
views_or_pages),
numeric_id=21
)
return
else:
for i, ViewCls in enumerate(page_list):
# there is no good reason to include Page in page_sequence.
# As for WaitPage: even though it works fine currently
# and can save the effort of subclassing,
# we should restrict it, because:
# - one user had "class WaitPage(Page):".
# - if someone makes "class WaitPage(WaitPage):", they might
# not realize why it's inheriting the extra behavior.
# overall, I think the small inconvenience of having to subclass
# once per app
# is outweighed by the unexpected behavior if someone subclasses
# it without understanding inheritance.
# BUT: built-in Trust game had a wait page called WaitPage.
# that was fixed on Aug 24, 2017, need to wait a while...
# see below in ensure_no_misspelled_attributes,
# we can get rid of a check there also
if ViewCls.__name__ == 'Page':
msg = (
"page_sequence cannot contain "
"a class called 'Page'."
)
helper.add_error(msg, numeric_id=22)
if ViewCls.__name__ == 'WaitPage':
msg = (
"page_sequence cannot contain "
"a class called 'WaitPage'."
)
helper.add_warning(msg, numeric_id=221)
if issubclass(ViewCls, WaitPage):
if ViewCls.group_by_arrival_time:
if i > 0:
helper.add_error(
'"{}" has group_by_arrival_time=True, so '
'it must be placed first in page_sequence.'.format(
ViewCls.__name__), numeric_id=23)
if ViewCls.wait_for_all_groups:
helper.add_error(
'Page "{}" has group_by_arrival_time=True, so '
'it cannot have wait_for_all_groups=True also.'.format(
ViewCls.__name__), numeric_id=24)
# alternative technique is to not define the method on WaitPage
# and then use hasattr, but I want to keep all complexity
# out of views.abstract
elif (
ViewCls.get_players_for_group != WaitPage.get_players_for_group):
helper.add_error(
'Page "{}" defines get_players_for_group, '
'but in order to use this method, you must set '
'group_by_arrival_time=True'.format(
ViewCls.__name__), numeric_id=25)
elif issubclass(ViewCls, Page):
pass # ok
else:
msg = '"{}" is not a valid page'.format(ViewCls)
helper.add_error(msg, numeric_id=26)
ensure_no_misspelled_attributes(ViewCls, helper)
def ensure_no_misspelled_attributes(ViewCls: type, helper: AppCheckHelper):
'''just a helper function'''
# this messes with the logic of base classes.
# do this instead of ViewCls == WaitPage, because _builtin already
# subclasses it, so you would get a warning like:
# Page "WaitPage" has the following method that is not recognized by oTree:
# "z_autocomplete".
if ViewCls.__name__ == 'WaitPage' or ViewCls.__name__ == 'Page':
return
# make sure no misspelled attributes
base_members = set()
for Cls in ViewCls.__bases__:
base_members.update(dir(Cls))
child_members = set(dir(ViewCls))
child_only_members = child_members - base_members
dynamic_form_methods = set() # needs to be a set
for member in child_only_members:
# error_message, not _error_message
for valid_ending in ['error_message', '_min', '_max', '_choices']:
if member.endswith(valid_ending):
dynamic_form_methods.add(member)
invalid_members = child_only_members - dynamic_form_methods
if invalid_members:
ALLOW_CUSTOM_ATTRIBUTES = '_allow_custom_attributes'
if getattr(ViewCls, ALLOW_CUSTOM_ATTRIBUTES, False):
return
page_attrs = set(dir(Page))
wait_page_attrs = set(dir(WaitPage))
ATTRS_ON_PAGE_ONLY = page_attrs - wait_page_attrs
ATTRS_ON_WAITPAGE_ONLY = wait_page_attrs - page_attrs
for member in invalid_members:
# this assumes that ViewCls is a Page or WaitPage
if member in ATTRS_ON_PAGE_ONLY:
assert issubclass(ViewCls, WaitPage), (ViewCls, member)
msg = (
'WaitPage "{ViewClsName}" has the attribute "{member}" that is not '
'allowed on a WaitPage. '
)
numeric_id = 27
elif member in ATTRS_ON_WAITPAGE_ONLY:
assert issubclass(ViewCls, Page), (ViewCls, member)
msg = (
'Page "{ViewClsName}" has the attribute "{member}" that is '
'only allowed on a WaitPage, not a regular Page. '
)
numeric_id=271
elif callable(getattr(ViewCls, member)):
msg = (
'Page "{ViewClsName}" has the following method that is not '
'recognized by oTree: "{member}". '
'Consider moving it into '
'the Player class in models.py. '
)
numeric_id=28
else:
msg = (
'Page "{ViewClsName}" has the following attribute that is not '
'recognized by oTree: "{member}". '
)
numeric_id=29
fmt_kwargs = {
'ViewClsName': ViewCls.__name__,
'FLAG': ALLOW_CUSTOM_ATTRIBUTES,
'member': member,
}
# when i make this an error, should add this workaround.
#msg += 'If you want to keep it here, you need to set '
# '{FLAG}=True on the page class.'
# at first, just make it a warning.
helper.add_error(msg.format(**fmt_kwargs), numeric_id)
def template_content_is_in_blocks(template_name: str, helper: AppCheckHelper):
from otree.checks.templates import get_unreachable_content
from otree.checks.templates import has_valid_encoding
from otree.checks.templates import format_source_snippet
# Only test files that are valid templates.
if not has_valid_encoding(template_name):
return
try:
with io.open(template_name, 'r', encoding='utf8') as f:
# when we upgraded to Django 1.11, we got an error
# if someone used "{% include %}" with a relative
# path (like ../Foo.html):
# File "c:\otree\ve_dj11\lib\site-packages\django\template\loader_tags.py", line 278, in construct_relative_path
# posixpath.dirname(current_template_name.lstrip('/')),
# AttributeError: 'NoneType' object has no attribute 'lstrip'
# can fix this by passing a dummy 'Origin' param.
# i tried also with Engin.get_default().from_string(template_name),
# but got the same error.
class Origin:
name = ''
template_name = ''
compiled_template = Template(f.read(), origin=Origin)
except (IOError, OSError, TemplateSyntaxError):
# When we used Django 1.8
# we used to show the line from the source that caused the error,
# but django_template_source was removed at some point,
# so it's better to let the yellow error page show the error nicely
return
def format_content(text):
text = text.strip()
lines = text.splitlines()
lines = ['> {0}'.format(line) for line in lines]
return '\n'.join(lines)
contents = get_unreachable_content(compiled_template)
content_bits = '\n\n'.join(
format_content(bit)
for bit in contents)
# note: this seems to not detect unreachable content
# if the template has a relative include,
# like {% include "../Foo.html" %}
# not sure why, but that's not common usage.
if contents:
helper.add_error(
'Template contains the following text outside of a '
'{% block %}. This text will never be displayed.'
'\n\n' + content_bits,
obj=os.path.join(helper.app_config.label,
helper.get_rel_path(template_name)),
numeric_id=7)
def templates_valid(helper: AppCheckHelper, **kwargs):
for template_name in helper.get_template_names():
template_content_is_in_blocks(template_name, helper)
def unique_sessions_names(helper: AppCheckHelper, **kwargs):
already_seen = set()
for st in settings.SESSION_CONFIGS:
st_name = st["name"]
if st_name in already_seen:
msg = "Duplicate SESSION_CONFIG name '{}'".format(st_name)
helper.add_error(msg, numeric_id=40)
else:
already_seen.add(st_name)
def unique_room_names(helper: AppCheckHelper, **kwargs):
already_seen = set()
for room in getattr(settings, 'ROOMS', []):
room_name = room["name"]
if room_name in already_seen:
msg = "Duplicate ROOM name '{}'".format(room_name)
helper.add_error(msg, numeric_id=50)
else:
already_seen.add(room_name)
def template_encoding(helper: AppCheckHelper, **kwargs):
from otree.checks.templates import has_valid_encoding
for template_name in helper.get_template_names():
if not has_valid_encoding(template_name):
helper.add_error(
'The template {template} is not UTF-8 encoded. '
'Please configure your text editor to always save files '
'as UTF-8. Then open the file and save it again.'
.format(template=helper.get_rel_path(template_name)),
numeric_id=60,
)
def make_check_function(func):
def check_function(app_configs, **kwargs):
# if app_configs list is given (e.g. otree check app1 app2), run on those
# if it's None, run on all apps
# (system check API requires this)
app_configs = app_configs or _get_all_configs()
errors = []
for app_config in app_configs:
helper = AppCheckHelper(app_config, errors)
func(helper, **kwargs)
return errors
return check_function
def make_check_function_run_once(func):
def check_function(app_configs, **kwargs):
otree_app_config = apps.get_app_config('otree')
# ignore app_configs list -- just run once
errors = []
helper = AppCheckHelper(otree_app_config, errors)
func(helper, **kwargs)
return errors
return check_function
def register_system_checks():
for func in [
unique_room_names,
unique_sessions_names,
]:
check_function = make_check_function_run_once(func)
register(check_function)
for func in [
model_classes,
files,
constants,
pages_function,
templates_valid,
template_encoding,
orphan_methods,
]:
check_function = make_check_function(func)
register(check_function)
| [
"miyanorococo@yahoo.co.jp"
] | miyanorococo@yahoo.co.jp |
6d683963e34d212a5774a06488179ecaf8155383 | 42b0d71d2f411723a6aefcb2d9b971cfd49d3526 | /Final/CS336/Lib/site-packages/ebcli/operations/sshops.py | cfc951048480671998542264ed255546f9b243de | [] | no_license | c4r1sk1m/CS336-Final-Project | 436eb2e55a3f163f48a6b123039afb417c06cb37 | 2a00ae12b65d4d3577b981d5deb2585960fd0ec3 | refs/heads/master | 2021-04-03T06:42:35.913889 | 2018-12-12T21:17:46 | 2018-12-12T21:17:46 | 124,703,745 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,812 | py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import subprocess
import os
from cement.utils.misc import minimal_logger
from ..lib import ec2, utils
from ..objects.exceptions import NoKeypairError, NotFoundError, CommandError, InvalidOptionsError
from ..resources.strings import strings, prompts
from ..core import io, fileoperations
from . import commonops
LOG = minimal_logger(__name__)
def prepare_for_ssh(env_name, instance, keep_open, force, setup, number,
keyname=None, no_keypair_error_message=None,
custom_ssh=None, command=None):
if setup:
setup_ssh(env_name, keyname)
return
if instance and number:
raise InvalidOptionsError(strings['ssh.instanceandnumber'])
if not instance:
instances = commonops.get_instance_ids(None, env_name)
if number is not None:
if number > len(instances) or number < 1:
raise InvalidOptionsError(
'Invalid index number (' + str(number) +
') for environment with ' + str(len(instances)) +
' instances')
else:
instance = instances[number - 1]
elif len(instances) == 1:
instance = instances[0]
else:
io.echo()
io.echo('Select an instance to ssh into')
instance = utils.prompt_for_item_in_list(instances)
try:
ssh_into_instance(instance, keep_open=keep_open, force_open=force, custom_ssh=custom_ssh, command=command)
except NoKeypairError:
if not no_keypair_error_message:
no_keypair_error_message = prompts['ssh.nokey']
io.log_error(no_keypair_error_message)
def setup_ssh(env_name, keyname):
# Instance does not have a keypair
io.log_warning(prompts['ssh.setupwarn'].replace('{env-name}',
env_name))
keyname = prompt_for_ec2_keyname(env_name=env_name, keyname=keyname)
if keyname:
options = [
{'Namespace': 'aws:autoscaling:launchconfiguration',
'OptionName': 'EC2KeyName',
'Value': keyname}
]
commonops.update_environment(env_name, options, False)
def ssh_into_instance(instance_id, keep_open=False, force_open=False, custom_ssh=None, command=None):
instance = ec2.describe_instance(instance_id)
try:
keypair_name = instance['KeyName']
except KeyError:
raise NoKeypairError()
try:
ip = instance['PublicIpAddress']
except KeyError:
# Now allows access to private subnet
if 'PrivateIpAddress' in instance and 'PrivateDnsName' in instance:
ip = instance['PrivateDnsName']
else:
raise NotFoundError(strings['ssh.noip'])
security_groups = instance['SecurityGroups']
user = 'ec2-user'
# Get security group to open
ssh_group = None
has_restriction = False
rule_existed_before = False
group_id = None
for group in security_groups:
group_id = group['GroupId']
# see if group has ssh rule
group = ec2.describe_security_group(group_id)
for permission in group.get('IpPermissions', []):
if permission.get('ToPort', None) == 22:
# SSH Port group
ssh_group = group_id
for rng in permission.get('IpRanges', []):
ip_restriction = rng.get('CidrIp', None)
if ip_restriction is not None:
if ip_restriction != '0.0.0.0/0':
has_restriction = True
elif ip_restriction == '0.0.0.0/0':
rule_existed_before = True
if has_restriction and not force_open:
io.log_warning(strings['ssh.notopening'])
elif group_id:
# Open up port for ssh
io.echo(strings['ssh.openingport'])
ec2.authorize_ssh(ssh_group or group_id)
io.echo(strings['ssh.portopen'])
# do ssh
try:
if custom_ssh:
custom_ssh = custom_ssh.split()
else:
ident_file = _get_ssh_file(keypair_name)
custom_ssh = ['ssh', '-i', ident_file]
custom_ssh.extend([user + '@' + ip])
if command:
custom_ssh.extend(command.split())
io.echo('INFO: Running ' + ' '.join(custom_ssh))
returncode = subprocess.call(custom_ssh)
if returncode != 0:
LOG.debug(custom_ssh[0] + ' returned exitcode: ' + str(returncode))
raise CommandError('An error occurred while running: ' + custom_ssh[0] + '.')
except OSError:
CommandError(strings['ssh.notpresent'])
finally:
# Close port for ssh
if keep_open:
pass
elif (not has_restriction or force_open) and group_id and not rule_existed_before:
ec2.revoke_ssh(ssh_group or group_id)
io.echo(strings['ssh.closeport'])
def _get_ssh_file(keypair_name):
key_file = fileoperations.get_ssh_folder() + keypair_name
if not os.path.exists(key_file):
if os.path.exists(key_file + '.pem'):
key_file += '.pem'
else:
raise NotFoundError(strings['ssh.filenotfound'].replace(
'{key-name}', keypair_name))
return key_file
def prompt_for_ec2_keyname(env_name=None, message=None, keyname=None):
if message is None:
message = prompts['ssh.setup']
if env_name:
io.validate_action(prompts['terminate.validate'], env_name)
else:
io.echo(message)
ssh = io.get_boolean_response()
if not ssh:
return None
keys = [k['KeyName'] for k in ec2.get_key_pairs()]
default_option = len(keys)
if keyname:
for index, key in enumerate(keys):
if key == keyname:
# The selection is between 1 and len(keys)
default_option = index + 1
if len(keys) < 1:
keyname = _generate_and_upload_keypair(keys)
else:
new_key_option = '[ Create new KeyPair ]'
keys.append(new_key_option)
io.echo()
io.echo(prompts['keypair.prompt'])
keyname = utils.prompt_for_item_in_list(keys, default=default_option)
if keyname == new_key_option:
keyname = _generate_and_upload_keypair(keys)
return keyname
def _generate_and_upload_keypair(keys):
# Get filename
io.echo()
io.echo(prompts['keypair.nameprompt'])
unique = utils.get_unique_name('aws-eb', keys)
keyname = io.prompt('Default is ' + unique, default=unique)
file_name = fileoperations.get_ssh_folder() + keyname
try:
exitcode = subprocess.call(
['ssh-keygen', '-f', file_name, '-C', keyname]
)
except OSError:
raise CommandError(strings['ssh.notpresent'])
if exitcode == 0 or exitcode == 1:
# if exitcode is 1, they file most likely exists, and they are
## just uploading it
commonops.upload_keypair_if_needed(keyname)
return keyname
else:
LOG.debug('ssh-keygen returned exitcode: ' + str(exitcode) +
' with filename: ' + file_name)
raise CommandError('An error occurred while running ssh-keygen.')
| [
"chrisk0208@gmail.com"
] | chrisk0208@gmail.com |
2f0460d311c801ced483431269b0ee3a6c8d76c4 | 44431ab9299367df80667fc5e7b38c0dd916bf98 | /Django/mainMultiApps/apps/Books_Authors/migrations/0002_auto_20180518_2339.py | a3aaf33e6ebd9a3478fcf3c9c583c709c00a4510 | [] | no_license | isoetandar/DojoAssignments | 14df2b0cefe08be8f39975c6ffa1a617dfba243c | ef4e6d75b77d69ccffc0eca91e9083e6e034d4dd | refs/heads/master | 2020-03-11T15:33:04.825745 | 2018-07-06T00:41:32 | 2018-07-06T00:41:32 | 130,087,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-05-18 23:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Books_Authors', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='books_authors',
name='author',
),
migrations.RemoveField(
model_name='books_authors',
name='book',
),
migrations.AddField(
model_name='author',
name='books',
field=models.ManyToManyField(related_name='books', to='Books_Authors.Book'),
),
migrations.DeleteModel(
name='Books_Authors',
),
]
| [
"irwansoetandar@Irwans-MacBook-Pro.local"
] | irwansoetandar@Irwans-MacBook-Pro.local |
5d9474e45b5f91ba0d0d583662c0c565381d1aa4 | b65ef73dd8f20d16c1abeb810a5c70016fcc73ab | /picam_server.py | 807dc2b322e45d4f8da3b0574730ac2dd6328532 | [] | no_license | japhsc/CameraStream | 381fbbeb1136c372161e5ca6eb60e33b1a098f11 | 2b9c58c26e4163c5872ee6ce222649ee68a9cb33 | refs/heads/main | 2023-07-14T14:48:22.189599 | 2020-11-27T12:11:25 | 2020-11-27T12:11:25 | 316,491,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | import time
from stream import stream_server, stream_stack, grayscale, unity
from cam import VideoStream
#framerate = 32
framerate = 46
#resolution = (640, 480)
#resolution = (1920, 1080)
resolution = (320, 240)
video_format = 'bgr'
vs = VideoStream( resolution=resolution, \
video_format=video_format, \
framerate=framerate)
vs.flip(False, True)
vs.info()
server = stream_server()
#stack = stream_stack(server, process=grayscale)
#stack = stream_stack(server, process=unity)
stack = stream_stack(server)
print('Start cam stream')
i,t1,t = 0,0,0
t0 = time.time()
while True:
stack.append(data=vs.raw(), size=vs.size)
i += 1
t1 = time.time()
t += t1-t0
t0 = t1
print('fps', int((i+1)/t), 'queue', len(stack), end='\r', flush=True)
| [
"jan.philipp.schroeder@physik.uni-freiburg.de"
] | jan.philipp.schroeder@physik.uni-freiburg.de |
fa783454ecad923d942564347649cccdb36ec8f1 | 3023cc5b5319e2344b10da6536e640152e66df21 | /Modules/Scripted/RigidAlignmentModule/RigidAlignmentModule.py | fea8661411d68569cd900b6be3e91a2e379dd69f | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | laurapascal/GROUPS | e8f70bc05b5d6fac92a37257797a40874421e89b | 6fb88b7828157b2d8ba2893c7af5cfe4ea3e3f83 | refs/heads/master | 2021-04-30T05:18:23.427791 | 2018-02-12T21:55:24 | 2018-02-12T21:55:24 | 121,412,721 | 0 | 0 | null | 2018-02-13T17:26:57 | 2018-02-13T17:26:57 | null | UTF-8 | Python | false | false | 13,135 | py | import os, sys
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import csv
import platform
import time
import urllib
import shutil
from CommonUtilities import *
#
# RigidAlignmentModule
#
class RigidAlignmentModule(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Rigid Alignment Module"
self.parent.categories = ["Groups"]
self.parent.dependencies = []
self.parent.contributors = ["Mahmoud Mostapha (UNC)"]
self.parent.helpText = """
Rigid alignment of the landmarks on the unit sphere: the input models share the same unit sphere
and their landmarks are defined as spacial coordinates (x,y,z) of the input model.
"""
self.parent.acknowledgementText = """
This work was supported by NIH NIBIB R01EB021391
(Shape Analysis Toolbox for Medical Image Computing Projects).
"""
#
# RigidAlignmentModuleWidget
#
class RigidAlignmentModuleWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
#
# Interface
#
loader = qt.QUiLoader()
self.moduleName = 'RigidAlignmentModule'
scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', '%s.ui' % self.moduleName)
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load(qfile, self.parent)
self.layout = self.parent.layout()
self.widget = widget
self.layout.addWidget(widget)
# Global variables of the Interface
# Directories
self.CollapsibleButton_Directories = self.getWidget('CollapsibleButton_Directories')
self.RigidAlignmentInputModelsDirectory = self.getWidget('DirectoryButton_RigidAlignmentInputModelsDirectory')
self.RigidAlignmentInputFiducialFilesDirectory = self.getWidget('DirectoryButton_RigidAlignmentInputFiducialFilesDirectory')
self.RigidAlignmentCommonSphereDirectory = self.getWidget('DirectoryButton_RigidAlignmentCommonSphereDirectory')
self.RigidAlignmentOutputSphericalModelsDirectory = self.getWidget('DirectoryButton_RigidAlignmentOutputSphericalModelsDirectory')
self.RigidAlignmentOutputModelsDirectory = self.getWidget('DirectoryButton_RigidAlignmentOutputModelsDirectory')
# Apply CLIs
self.ApplyButton = self.getWidget('pushButton_RigidAlignment')
# Connections
# Directories
self.CollapsibleButton_Directories.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_Directories))
self.RigidAlignmentInputModelsDirectory.connect('directoryChanged(const QString &)', self.onSelect)
self.RigidAlignmentInputFiducialFilesDirectory.connect('directoryChanged(const QString &)', self.onSelect)
self.RigidAlignmentCommonSphereDirectory.connect('directoryChanged(const QString &)', self.onSelect)
self.RigidAlignmentOutputSphericalModelsDirectory.connect('directoryChanged(const QString &)', self.onSelect)
self.RigidAlignmentOutputModelsDirectory.connect('directoryChanged(const QString &)', self.onSelect)
# Apply CLIs
self.ApplyButton.connect('clicked(bool)', self.onApplyButton)
# Refresh Apply button state
self.onSelect()
def cleanup(self):
pass
# Functions to recover the widget in the .ui file
def getWidget(self, objectName):
return self.findWidget(self.widget, objectName)
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
#
# Directories
#
def onSelect(self):
InputModelsDirectory = self.RigidAlignmentInputModelsDirectory.directory.encode('utf-8')
self.InputModelsDirectory = InputModelsDirectory
InputFiducialFilesDirectory = self.RigidAlignmentInputFiducialFilesDirectory.directory.encode('utf-8')
self.InputFiducialFilesDirectory = InputFiducialFilesDirectory
CommonSphereDirectory = self.RigidAlignmentCommonSphereDirectory.directory.encode('utf-8')
self.CommonSphereDirectory = CommonSphereDirectory
OutputSphericalModelsDirectory = self.RigidAlignmentOutputSphericalModelsDirectory.directory.encode('utf-8')
self.OutputSphericalModelsDirectory = OutputSphericalModelsDirectory
OutputModelsDirectory = self.RigidAlignmentOutputModelsDirectory.directory.encode('utf-8')
self.OutputModelsDirectory = OutputModelsDirectory
# Check if each directory has been choosen
self.ApplyButton.enabled = self.InputModelsDirectory != "." and self.InputFiducialFilesDirectory != "." and self.CommonSphereDirectory!= "." and self.OutputSphericalModelsDirectory != "." and self.OutputModelsDirectory != "."
def onApplyButton(self):
logic = RigidAlignmentModuleLogic()
endRigidAlignment = logic.runRigidAlignment(modelsDir=self.InputModelsDirectory, fiducialDir=self.InputFiducialFilesDirectory, sphereDir=self.CommonSphereDirectory, outputsphereDir=self.OutputSphericalModelsDirectory, outputsurfaceDir=self.OutputModelsDirectory)
## RigidAlignment didn't run because of invalid inputs
if not endRigidAlignment:
self.errorLabel.show()
#
# RigidAlignmentModuleLogic
#
class RigidAlignmentModuleLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def runRigidAlignment(self, modelsDir, fiducialDir, sphereDir, outputsphereDir, outputsurfaceDir):
# ------------------------------------ #
# ---------- RigidAlignment ---------- #
# ------------------------------------ #
print "--- function runRigidAlignment() ---"
"""
Calling RigidAlignment CLI
Arguments:
--mesh [<std::string> input models directory]
--landmark [<std::string> input fiducial files directory]
--sphere [<std::string> common unit sphere]
--output [<std::string> output sphers directory]
"""
print "--- Inspecting Input Data---"
# List all the vtk files in the modelsDir
listMesh = os.listdir(modelsDir)
if listMesh.count(".DS_Store"):
listMesh.remove(".DS_Store")
# Creation of a CSV file to load the vtk files in ShapePopulationViewer
#filePathCSV = os.path.join( slicer.app.temporaryPath, 'PreviewForVisualizationInSPV.csv')
filePathCSV = slicer.app.temporaryPath + '/' + 'PreviewForVisualizationInSPV.csv'
file = open(filePathCSV, 'w')
cw = csv.writer(file, delimiter=',')
cw.writerow(['VTK Files'])
# Add the path of the vtk files
for i in range(0, len(listMesh)):
#VTKfilepath = os.path.join( modelsDir, listMesh[i])
VTKfilepath = modelsDir + '/' + listMesh[i]
if os.path.exists(VTKfilepath):
cw.writerow([VTKfilepath])
file.close()
# Creation of the parameters of SPV
parameters = {}
parameters["CSVFile"] = filePathCSV
# If a binary of SPV has been installed
if hasattr(slicer.modules, 'shapepopulationviewer'):
SPV = slicer.modules.shapepopulationviewer
# If SPV has been installed via the Extension Manager
elif hasattr(slicer.modules, 'launcher'):
SPV = slicer.modules.launcher
# Launch SPV
slicer.cli.run(SPV, None, parameters, wait_for_completion=True)
# Deletion of the CSV files in the Slicer temporary directory
if os.path.exists(filePathCSV):
os.remove(filePathCSV)
# Creation of a file name for the common unit sphere
listUnitSphere = os.listdir(sphereDir)
if listUnitSphere.count(".DS_Store"):
listUnitSphere.remove(".DS_Store")
#UnitSphere = os.path.join(sphereDir, listUnitSphere[0])
UnitSphere = sphereDir + '/' + listUnitSphere[0]
print "--- Rigid Alignment Running ---"
# Creation of the parameters of Rigid Alignment
RigidAlignment_parameters = {}
RigidAlignment_parameters["mesh"] = modelsDir
RigidAlignment_parameters["landmark"] = fiducialDir
RigidAlignment_parameters["sphere"] = UnitSphere
RigidAlignment_parameters["output"] = outputsphereDir
RA = slicer.modules.rigidwrapper
# Launch Rigid Alignment
slicer.cli.run(RA, None, RigidAlignment_parameters, wait_for_completion=True)
print "--- Rigid Alignment Done ---"
# ------------------------------------ #
# ------------ SurfRemesh ------------ #
# ------------------------------------ #
print "--- function runSurfRemesh() ---"
"""
Calling SurfRemesh CLI
Arguments:
--tempModel [<std::string> input sphere]
--input [<std::string> input surface]
--ref [<std::string> common unit sphere]
--output [<std::string> output surface]
"""
listSphere = os.listdir(outputsphereDir)
if listSphere.count(".DS_Store"):
listSphere.remove(".DS_Store")
for i in range(0,len(listMesh)):
#Mesh = os.path.join( modelsDir, listMesh[i])
Mesh = modelsDir + '/' + listMesh[i]
#Sphere = os.path.join(outputsphereDir, listSphere[i])
Sphere = outputsphereDir + '/' + listSphere[i]
#Mesh = os.path.join(outputsurfaceDir, listSphere[i].split("_rotSphere.vtk",1)[0] + '_aligned.vtk')
OutputMesh = outputsurfaceDir + '/' + listSphere[i].split("_rotSphere.vtk",1)[0] + '_aligned.vtk'
# Creation of the parameters of SurfRemesh
SurfRemesh_parameters = {}
SurfRemesh_parameters["tempModel"] = Sphere
SurfRemesh_parameters["input"] = Mesh
SurfRemesh_parameters["ref"] = UnitSphere
SurfRemesh_parameters["output"] = OutputMesh
SR = slicer.modules.SRemesh
# Launch SurfRemesh
slicer.cli.run(SR, None, SurfRemesh_parameters, wait_for_completion=True)
print "--- Surface " + str(i) + " Remesh Done ---"
# ------------------------------------ #
# ------------ Color Maps ------------ #
# ------------------------------------ #
reader_in = vtk.vtkPolyDataReader()
reader_in.SetFileName(str(Mesh))
reader_in.Update()
init_mesh = reader_in.GetOutput()
phiArray = init_mesh.GetPointData().GetScalars("_paraPhi")
reader_out = vtk.vtkPolyDataReader()
reader_out.SetFileName(str(OutputMesh))
reader_out.Update()
new_mesh = reader_out.GetOutput()
new_mesh.GetPointData().SetActiveScalars("_paraPhi")
new_mesh.GetPointData().SetScalars(phiArray)
new_mesh.Modified()
# write circle out
polyDataWriter = vtk.vtkPolyDataWriter()
polyDataWriter.SetInputData(new_mesh)
polyDataWriter.SetFileName(str(OutputMesh))
polyDataWriter.Write()
print "--- Surf Remesh Done ---"
print "--- Inspecting Results ---"
# List all the vtk files in the outputsurfaceDir
listOutputMesh = os.listdir(outputsurfaceDir)
if listOutputMesh.count(".DS_Store"):
listOutputMesh.remove(".DS_Store")
# Creation of a CSV file to load the output vtk files in ShapePopulationViewer
#filePathCSV = os.path.join( slicer.app.temporaryPath, 'PreviewForVisualizationInSPV.csv')
filePathCSV = slicer.app.temporaryPath + '/' + 'PreviewForVisualizationInSPV.csv'
file = open(filePathCSV, 'w')
cw = csv.writer(file, delimiter=',')
cw.writerow(['VTK Files'])
# Add the path of the vtk files
for i in range(0, len(listOutputMesh)):
#VTKfilepath = os.path.join( outputsurfaceDir, listOutputMesh[i])
VTKfilepath = outputsurfaceDir + '/' + listOutputMesh[i]
if os.path.exists(VTKfilepath):
cw.writerow([VTKfilepath])
file.close()
# Creation of the parameters of SPV
parameters = {}
parameters["CSVFile"] = filePathCSV
# Launch SPV
slicer.cli.run(SPV, None, parameters, wait_for_completion=True)
# Deletion of the CSV files in the Slicer temporary directory
if os.path.exists(filePathCSV):
os.remove(filePathCSV)
| [
"mahmoudm@email.unc.edu"
] | mahmoudm@email.unc.edu |
2637560bd42c72e37758270f085ace4411c325c5 | f6d1d1a4aec526ffb53aa2f932fc0144e20a1cfb | /MachineLearning/natural-language-processing/bangla-words/bangla-word-generator.py | b97cff0bec952217c2b767c908ccc129da8faf2c | [] | no_license | AsifKHasan/archived-codes | 088698fa72d44a15de01ad02cff80d1202caa11a | 1fce54ab01b2d47c649d3998d42a99c73ca1992e | refs/heads/master | 2022-12-08T19:42:17.589544 | 2020-08-29T20:32:23 | 2020-08-29T20:32:23 | 291,284,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,641 | py | #!/usr/bin/env python3
'''
from command line
------------------
./bangla-word-generator.py --data './data/bangla-words.txt' --outdir './output' --startswith 'অ'
from py files
------------------
'''
import argparse
import datetime
import time
import json
import re
import os
patterns = [
{
"search-for": "[ ]*\[.+\][ ]*",
"replace-with": " "
},
{
"search-for": "[ ]*\(.+\)[ ]*",
"replace-with": " "
},
{
"search-for": "[, ]*কি০[ ]*",
"replace-with": " "
},
{
"search-for": "[, ]*তু০[ ]*",
"replace-with": " "
},
{
"search-for": "[, ]*দ্র০[ ]*",
"replace-with": " "
},
{
"search-for": "[, ]*যে০[ ]*",
"replace-with": " "
},
{
"search-for": "[,.;:?‘'’]\n",
"replace-with": "\n"
},
{
"search-for": "[‘'’]",
"replace-with": ""
},
{
"search-for": " [ ]+",
"replace-with": " "
},
{
"search-for": "[ ]+\n",
"replace-with": "\n"
},
{
"search-for": "\n[ ]+",
"replace-with": "\n"
},
{
"search-for": "\n[\n]+",
"replace-with": "\n"
},
{
"search-for": "\n,",
"replace-with": ","
},
{
"search-for": " ,",
"replace-with": ","
}
]
class WordGenerator(object):
def __init__(self, datafile, outdir, startswith):
self._start_time = int(round(time.time() * 1000))
self._DATA_FILE = datafile
self._OUTPUT_DIR = outdir
self._STARTSWITH = startswith
self._START_LIST = startswith.split(',')
# output file name
f_name, f_ext = os.path.splitext(self._DATA_FILE)
f_name = os.path.basename(f_name)
self._output_file_path = '{}/{}-{}.{}'.format(self._OUTPUT_DIR, f_name, self._STARTSWITH, f_ext)
def run(self):
self.set_up()
self.read_data()
self.generate_words()
self.tear_down()
def set_up(self):
pass
def read_data(self):
with open(self._DATA_FILE, "r") as f:
self._INPUT = f.read()
def generate_words(self):
# apply RE patterns for sub
lines = self._INPUT
for p in patterns:
lines = re.sub(p["search-for"], p["replace-with"], lines)
#lines = re.sub(p["search-for"], p["replace-with"], lines, flags=re.DOTALL)
lines = lines.split('\n')
lines_comma = list(filter(lambda line: ',' in line, lines))
lines_words = list(filter(lambda line: ',' not in line, lines))
self._OUTPUT = lines_words
def tear_down(self):
if not isinstance(self._OUTPUT, str):
self._OUTPUT = '\n'.join(self._OUTPUT)
with open(self._output_file_path, "w") as f:
f.write(self._OUTPUT)
self._end_time = int(round(time.time() * 1000))
print("Script took {} seconds".format((self._end_time - self._start_time)/1000))
if __name__ == '__main__':
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--datafile", required=True, help="data file where the raw list is")
ap.add_argument("-o", "--outdir", required=True, help="output directory where output word list will be stored")
ap.add_argument("-s", "--startswith", required=True, help="list of char/word as the begining sequence for words to be extracted")
args = vars(ap.parse_args())
processor = WordGenerator(args["datafile"], args["outdir"], args["startswith"])
processor.run()
| [
"asifhasan@gmail.com"
] | asifhasan@gmail.com |
25df8e23285df7d461532fca359f86f047702c6f | 5a6253dc8f08bf2186c64a14e512c714d36f3460 | /workspace_PJM/api_ex1.py | 1dc2212dd32bc540c52979a8c9d14ef61eb599e1 | [] | no_license | NicolasMonnier/N-Body | 711d7f1c8aa6188df93abbd66deeeb2e5947c12e | 6fd3edb9f0d5515cdfb808dbdc6c84cd258d47e9 | refs/heads/master | 2020-05-07T09:44:47.840082 | 2019-04-30T15:29:19 | 2019-04-30T15:29:19 | 180,390,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import zmq
import time
from qcg.appscheduler.api.manager import Manager
from qcg.appscheduler.api.job import Jobs
#m = Manager("tcp://127.0.0.1:5555")
m = Manager()
print("connected")
print("available resources:\n%s\n" % str(m.resources()))
#print("submited jobs:\n%s\n" % str(m.list().names()))
#j = Jobs()
#j.add( 'j1', { 'exec': '/bin/date' } )
ids = m.submit(Jobs().
add( name = 'j1', exec = '/bin/date', stdout = 'j1.stdout' ).
add( name = 'j2', exec = '/bin/hostname', args = [ '--fqdn'], stdout = 'j2.stdout')
)
status = m.status(ids)
status = m.status('j1')
time.sleep(2)
status = m.status(ids)
m.wait4all()
#m.wait4(ids)
#info = m.info(ids)
m.remove(ids)
m.finish()
time.sleep(1)
| [
"nicolas.monnier44@gmail.com"
] | nicolas.monnier44@gmail.com |
c2515c2721e9a09956edc6813e56c7a57a91842a | 611e5481b709f22d01c7afad4fddd1bb291efcf0 | /User_dashboard/apps/MessageWall/apps.py | 5f0c252112ecc9856af7e466d729a8dc3d46feae | [] | no_license | AaronDasani/Django | 2ffc5ffc69266d59570c3faa19cfde8e13cdd307 | 7acf0721744accb333f15013d9988f5fe3b1ad5c | refs/heads/master | 2021-07-23T23:30:23.457589 | 2019-01-14T19:23:57 | 2019-01-14T19:23:57 | 150,312,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.apps import AppConfig
class MessagewallConfig(AppConfig):
name = 'MessageWall'
| [
"maad@Aarons-iMac.local"
] | maad@Aarons-iMac.local |
05b2155ccf0ee0eac6eba7f388116873fc63c888 | 18ad10a325238fa9f7b4643ace1cb64455f62567 | /Class 1.0/Example 1.0.py | bc0816b71a4456b511d549449f32f5a25f87df64 | [] | no_license | BrendanStringer/CS021 | 754c618010f3574dfd72ad17fb73fa023d0b801c | e91da90f8a398e7e02a464a8a72ca68d281cb349 | refs/heads/master | 2016-08-03T18:12:57.195492 | 2015-05-08T12:00:57 | 2015-05-08T12:00:57 | 32,987,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | print('Nudge Nudge')
print('Wink Wink')
print('Know what I mean?')
| [
"bstringe@uvm.edu"
] | bstringe@uvm.edu |
523392d1551862017fef37d6b4359a0894cdf717 | 35f74b095b0949752dffc0ba3c883168eeb52390 | /tests/test_requests.py | b7f92897d1d0ad0997a57e7221827d59cbd1941c | [
"MIT"
] | permissive | ezdookie/mashina | 89e7dd1da86ccd0d3f104b889a2f8e506349e240 | c018a4e5ee006dc45a89f2ee9e7cf554420b490e | refs/heads/master | 2021-06-05T20:02:23.234262 | 2020-05-18T05:36:27 | 2020-05-18T05:36:27 | 147,943,488 | 0 | 0 | MIT | 2021-03-25T22:53:20 | 2018-09-08T15:02:18 | Python | UTF-8 | Python | false | false | 885 | py | def test_crud(falcon_client):
# testing create
result = falcon_client.simulate_post('/todos', json={
'name': 'testing'
})
assert result.status_code == 200
assert 'name' in result.json
# testing list
result = falcon_client.simulate_get('/todos')
assert result.status_code == 200
assert result.json['count'] == 1
# testing retrieve
todo_id = result.json['results'][0]['id']
todo_url = '/todos/%s' % todo_id
result = falcon_client.simulate_get(todo_url)
assert result.status_code == 200
assert result.json['id'] == todo_id
# testing update
result = falcon_client.simulate_patch(todo_url, json={'name': 'new name'})
assert result.status_code == 200
assert result.json['name'] == 'new name'
# testing delete
result = falcon_client.simulate_delete(todo_url)
assert result.status_code == 200
| [
"brian@bleax.com"
] | brian@bleax.com |
cc31086004a058c6128821d98e1ecfd66a959004 | d49e18cbf59e5200375e33a62243824d3a26366c | /Random_Spike.py | 244ca2dd9bf33daa822bfae3ca5deb0cf0809217 | [] | no_license | ryscet/Cell_Models | 5b64bd2bdb76aee525312f097a5c20a0b984d598 | 34174f96098a74f351e6be0da8fac5043fd0a268 | refs/heads/master | 2022-02-23T11:40:19.196234 | 2022-02-05T16:33:37 | 2022-02-05T16:33:37 | 35,093,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 5 11:11:54 2015
@author: ryszardcetnarski
"""
import numpy as np
from pylab import *
from random import sample
class RandomSpike:
'Generates spikes in a given frequency with uniform noise'
## setup parameters and state variables
T = 1000 # total time to simulate (msec)
## Random Spike variables
spike_amp = 1.5
def __init__(self, _firingRate):
self.Vm = np.zeros(self.T) # potential (V) trace over time
self.firingRate = _firingRate
self.spike_t = sample(range(0, self.T), self.firingRate) # select a random index for a spike. Firing rate determines the amount of spike within the simulation time. They are uniformely distributed.
self.Vm[self.spike_t] = self.spike_amp # At the selected random indexes mark a spike
#def Simulate(self, timestep):
#return self.Vm
def Plot(self):
## plot membrane potential trace
figure()
time = np.linspace(0,self.T,1000)
plot(time, self.Vm)
title('Random Spike')
ylabel('Membrane Potential (V)')
xlabel('Time (msec)')
ylim([0,2])
show()
| [
"cetnarski.ryszard@gmail.com"
] | cetnarski.ryszard@gmail.com |
88311225732da2fab0381d52abffa64ea94d489c | 6ef2e4ca72ba593724198777f1e0e6f553576c1f | /Extractor.py | 9c0e29f86102acdb39488026a13ca43f6943a39b | [
"MIT"
] | permissive | nikhil-mat/Notion-Token_v2-Extractor | 151d1cb5fd68cb3188faad3f8d7d952fa078a27f | 0ac059ad17363665968b7a27c48b1579042e95d5 | refs/heads/master | 2022-12-22T11:35:47.653811 | 2020-09-15T14:32:37 | 2020-09-15T14:32:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | import selenium #Is the webscraping framework
import os # Is used to get the username inorder to get the default profile
from selenium import webdriver
from tkinter import *
# before running this please make sure chrome is not running in the background with the default profile on
# In this version of the implementation the user must have had already logged in to the account
usern = os.getlogin() #getting the username
doka = f'user-data-dir=C:\\Users\\{usern}\\AppData\\Local\\Google\\Chrome\\User Data' # opening chrome's default profile
ch_options = webdriver.ChromeOptions()
ch_options.add_argument(doka) #Path to your chrome profile
driver = webdriver.Chrome(executable_path="C:\Program Files (x86)\Google\chromedriver.exe", options=ch_options)
#getting the cookie
driver.get("https://notion.so") #opening Notion
ilist= driver.get_cookie('token_v2') #searching for the cookie
#searching for the value of the cookie
tokenv2 = {'value'}
v = [ilist[i] for i in tokenv2 if i in ilist ]
result = v[0]
#saving result to file and printing
with open('tokenv2.txt','w') as file:
file.write(result)
driver.quit() #closing chrome
'''
#remove quotation marks in order to print value window
window = Tk()
window.title('Your Notion token')
lbl = Label(window, text = result,font=("Arial", 25))
lbl.grid(column=0, row =0)
window.mainloop()
'''
| [
"nikmat5525@gmail.com"
] | nikmat5525@gmail.com |
dec72522fe21ad2176789e058e07d827abab8e49 | 5820a5e30b2a20e453524ccd3c7367fd52fde4f1 | /admin/cafe/migrations/0011_auto_20200424_1848.py | 6b12b152c935fe3c325bb80e3da03b7d5f567253 | [] | no_license | giulianojordao/maid-cafe | 37b51edddee9ec3bd112df226f3742cc3fb1103f | 843357e485cf3bce269d0833e9341c3e81c10f7c | refs/heads/master | 2022-12-10T19:39:47.489486 | 2020-05-14T00:28:27 | 2020-05-14T00:28:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-04-24 18:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cafe', '0010_order_end_at'),
]
operations = [
migrations.AlterField(
model_name='request',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='requests', related_query_name='request', to='cafe.Order'),
),
]
| [
"sy.fen0@gmail.com"
] | sy.fen0@gmail.com |
ebab8ea00d95993626f94c0ecca900fdf0545178 | 28925c7001008b93f16971363e34d6c28c9eee55 | /blog/migrations/0001_initial.py | 5ae2881f510f3bb4857bd40ce638e1676b052670 | [] | no_license | mikenrowland/miniBlog | 0e97ce3042e4d4353085878edfc76fcedc9c430b | c133c57c99c9072f1329e97db4fac925ff96a002 | refs/heads/main | 2023-04-23T14:09:08.414911 | 2021-05-17T18:09:59 | 2021-05-17T18:09:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # Generated by Django 3.2 on 2021-04-24 15:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('body', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"mykhelmyers@gmail.com"
] | mykhelmyers@gmail.com |
d4533f4cdf53a8a902ef0e5e52f13d6ae690bf32 | cfc3fa658f826d02308453e557d82758895399c2 | /datasets/id_newspapers_2018/id_newspapers_2018.py | 96a294e8fc22502654396c9ba5f85efe68734ddd | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | meehawk/datasets | cac530ec0e17514c01cdff30302521d6303ed93b | b70141e3c5149430951773aaa0155555c5fb3e76 | refs/heads/master | 2023-03-29T12:51:54.700891 | 2021-04-08T17:22:53 | 2021-04-08T17:22:53 | 355,996,122 | 9 | 0 | Apache-2.0 | 2021-04-08T17:31:03 | 2021-04-08T17:31:02 | null | UTF-8 | Python | false | false | 4,123 | py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Indonesian Newspapers 2018"""
from __future__ import absolute_import, division, print_function
import glob
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{id_newspapers_2018,
author = {},
title = {Indonesian Newspapers 2018},
year = {2019},
url = {https://github.com/feryandi/Dataset-Artikel},
}
"""
_DESCRIPTION = """\
The dataset contains around 500K articles (136M of words) from 7 Indonesian newspapers: Detik, Kompas, Tempo,
CNN Indonesia, Sindo, Republika and Poskota. The articles are dated between 1st January 2018 and 20th August 2018
(with few exceptions dated earlier). The size of uncompressed 500K json files (newspapers-json.tgz) is around 2.2GB,
and the cleaned uncompressed in a big text file (newspapers.txt.gz) is about 1GB. The original source in Google Drive
contains also a dataset in html format which include raw data (pictures, css, javascript, ...)
from the online news website
"""
_HOMEPAGE = "https://github.com/feryandi/Dataset-Artikel"
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International Public License"
_URLs = ["http://cloud.uncool.ai/index.php/s/kF83dQHfGeS2LX2/download"]
class IdNewspapers2018Config(datasets.BuilderConfig):
"""BuilderConfig for IdNewspapers2018"""
def __init__(self, **kwargs):
"""BuilderConfig for IdNewspapers2018.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(IdNewspapers2018Config, self).__init__(**kwargs)
class IdNewspapers2018(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
IdNewspapers2018Config(
name="id_newspapers_2018",
version=VERSION,
description="IdNewspapers2018 dataset",
),
]
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"url": datasets.Value("string"),
"date": datasets.Value("string"),
"title": datasets.Value("string"),
"content": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
my_urls = _URLs[0]
data_dir = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"article_dir": os.path.join(data_dir, "newspapers"),
"split": "train",
},
)
]
def _generate_examples(self, article_dir, split):
logger.info("⏳ Generating %s examples from = %s", split, article_dir)
id = 0
for path in sorted(glob.glob(os.path.join(article_dir, "**/*.json"), recursive=True)):
with open(path, encoding="utf-8") as f:
data = json.load(f)
yield id, {
"id": str(id),
"url": data["url"],
"date": data["date"],
"title": data["title"],
"content": data["content"],
}
id += 1
| [
"noreply@github.com"
] | noreply@github.com |
1ffe7508a2c77980239428715f48e1ac4775dab5 | f902c86bdb2f6d34cd36d6a136ce3900a27615c0 | /pic/views.py | 9a705fbd785838c91bf51de8746200f5f4b432e1 | [] | no_license | easy-spider/easyspider-web | a0e14bc49035d380c1d9bd0a2d3996938cb74b1e | a68b8d5f8cc5bdc8e0e1c839f2406dac6d4d3417 | refs/heads/master | 2021-05-17T00:08:15.419969 | 2020-06-15T08:17:50 | 2020-06-15T08:17:50 | 250,528,868 | 1 | 0 | null | 2020-06-15T08:17:51 | 2020-03-27T12:29:50 | HTML | UTF-8 | Python | false | false | 1,978 | py | import os
from django.http import HttpResponseForbidden, HttpResponse, HttpResponseNotFound
from EasySpiderWeb import settings
PIC_DIR = os.path.join(settings.BASE_DIR, 'upload', 'templatepics')
def read_pic(rel_path):
"""从PIC_DIR下读取图片,返回HttpResponse对象。
:param rel_path: PIC_DIR下的相对路径
:return: 200 - 正常读取;404 - 文件不存在;403 - 文件位置超出范围
"""
path = os.path.abspath(os.path.join(PIC_DIR, rel_path))
if not path.startswith(PIC_DIR):
return HttpResponseForbidden()
try:
with open(path, 'rb') as f:
return HttpResponse(f.read(), content_type='image/jpeg')
except FileNotFoundError:
return HttpResponseNotFound()
def save_pic(pic_file, dst_path):
"""将图片保存到PIC_DIR目录,不存在的目录将被自动创建
:param pic_file: 图片文件对象
:param dst_path: 目标文件在PIC_DIR下的相对路径
:exception ValueError: 如果文件位置超出范围
"""
path = os.path.abspath(os.path.join(PIC_DIR, dst_path))
if not path.startswith(PIC_DIR):
raise ValueError('dst out of directory')
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as f:
for chunk in pic_file.chunks():
f.write(chunk)
def site_logo(request, name):
"""网站图标"""
return read_pic(os.path.join(name, 'logo.jpg'))
def template_logo(request, site_name, template_name):
"""模板图标"""
return read_pic(os.path.join(site_name, template_name, 'logo.jpg'))
def template_field(request, site_name, template_name, field_name):
"""采集字段预览图片"""
return read_pic(os.path.join(site_name, template_name, 'field', field_name + '.jpg'))
def template_param(request, site_name, template_name, param_name):
"""模板参数预览图片"""
return read_pic(os.path.join(site_name, template_name, 'param', param_name + '.jpg'))
| [
"979481894@qq.com"
] | 979481894@qq.com |
b0324e908caf5c8d0f997eda2f80a6bd333b38e5 | d2e79ea4373cb3c41dfdaad3418572b301f24eff | /recnici.py | 179c8ee75232e58ca54a7129b0a71ac96f2b6f3d | [] | no_license | kidju/bedni-pokusaj-pocetnika | ef30a9ed58189daf1da789d3dd26941c46a46758 | 57eeba767394f1bd30f678f5c8dd4f93bdc247f6 | refs/heads/master | 2021-06-22T14:30:08.670828 | 2018-07-09T08:04:57 | 2018-07-09T08:04:57 | 95,883,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # -*- coding: utf-8 -*-
telefonski_imenik = {"Paja Patak": 123456, "Mini Maus": 234567, "Šilja": 345678}
vukajlija = {"sojanica": "Posna pljeskavica. Garantovano bez trihinele.", "jahanje": "Omiljena aktivnost šefova za koju je potrebno da radnik ima konjske živce.", "šef": "Čovek koji nema smisao za umor."}
vrste_reci = {"imenice": ["polaznik", "seminar", "lingvistika", "Isidora"], "glagoli": ["slušati", "crtati", "jesti"], "zamenice": ["on", "ona", "ono"]}
print(telefonski_imenik["Mini Maus"])
print(vrste_reci["imenice"])
print(vukajlija["šef"])
print(telefonski_imenik["Paja Patak"])
print(vrste_reci["glagoli"][2])
print(vukajlija["sojanica"]) | [
"noreply@github.com"
] | noreply@github.com |
0550f4a3e869ec8fdcb5f4204c9d9166b5e5527a | 39ce76a939e083a08b11f35b7482f45ddeab5415 | /ex3.py | 174ca67e2410bf0b4efc1631f7c0d353f0755fae | [
"MIT"
] | permissive | wf539/LearnPythonHardWay | b3624f24c1792e089d29d171e88f329599de685d | 854fb5324465f49d163c901c6e2eee5788df172a | refs/heads/master | 2021-02-26T08:25:12.097487 | 2020-03-06T20:26:03 | 2020-03-06T20:26:03 | 245,510,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | # Print a line of text
print "I will now count my chickens:"
print "Hens", 25 + 30 / 6
print "Roosters", 100 - 25 * 3 % 4
# Print a line of text
print "I will now count the eggs:"
# Perform calculation
print 3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6
# Print a line of text
print "Is it true that 3 + 2 < 5 - 7?"
# Return the result of the evaluation of an expression
print 3 + 2 < 5 - 7
# Print two lines of text
# Append to each line the result of the evaluation of an expression
print "What is 3 + 2?:", 3 + 2
print "What is 5 - 7?:", 5 - 7
# Print two lines of text
print "Oh, that's why it's False."
print "How about some more?"
# Print three lines of text
# Append to each line the result of the evaluation of an inequality
print "Is it greater?", 5 > -2
print "Is it greater or equal?", 5 >= -2
print "Is it less than or equal?", 5 <= -2
| [
"willetfarm@yahoo.com"
] | willetfarm@yahoo.com |
17154043547b14982f365d99f3c9ecf178e27f2c | 700d4302a675b6aaaa7514a87d87ccd614051712 | /electrum_dash/gui/qt/dash_net_dialog.py | 99e82390ee338bfa47233139df37e07b96842c65 | [
"MIT"
] | permissive | bynicolas/electrum-pac | ce37033c6274f671674bf5d707010d31dab254b5 | 8c61fc2e14fc307f40d1cc785f2a604ab4a2be04 | refs/heads/master | 2023-03-11T14:29:36.624470 | 2021-02-23T23:25:20 | 2021-02-23T23:25:20 | 340,194,875 | 0 | 0 | MIT | 2021-02-24T00:02:54 | 2021-02-18T22:32:44 | Python | UTF-8 | Python | false | false | 17,492 | py | # -*- coding: utf-8 -*-
import time
from enum import IntEnum
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtWidgets import (QGridLayout, QDialog, QVBoxLayout, QCheckBox,
QTabWidget, QWidget, QLabel, QSpinBox, QLineEdit,
QTreeWidget, QTreeWidgetItem, QMenu, QHeaderView)
from electrum_dash import constants
from electrum_dash.dash_net import MIN_PEERS_LIMIT, MAX_PEERS_LIMIT
from electrum_dash.i18n import _
from electrum_dash.logging import get_logger
from .util import Buttons, CloseButton
_logger = get_logger(__name__)
MATCH_STR_CS = Qt.MatchFixedString | Qt.MatchCaseSensitive
class DashPeersWidget(QTreeWidget):
class Columns(IntEnum):
PEER = 0
UAGENT = 1
PING = 2
READ = 3
WRITE = 4
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Peer'), _('User Agent'), _('Ping time (ms)'),
_('Received KiB'), _('Sent KiB')])
h = self.header()
mode = QHeaderView.ResizeToContents
h.setSectionResizeMode(self.Columns.PEER, mode)
h.setSectionResizeMode(self.Columns.UAGENT, mode)
h.setSectionResizeMode(self.Columns.PING, mode)
h.setSectionResizeMode(self.Columns.READ, mode)
h.setSectionResizeMode(self.Columns.WRITE, mode)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
dash_net = self.parent.network.dash_net
peer = item.text(self.Columns.PEER)
menu = QMenu()
menu.addAction(_('Disconnect'), lambda: self.disconnect(peer))
if not dash_net.use_static_peers:
menu.addAction(_('Ban'),
lambda: self.disconnect(peer, 'ban from gui'))
menu.exec_(self.viewport().mapToGlobal(position))
def disconnect(self, peer, msg=None):
dash_net = self.parent.network.dash_net
dash_peer = dash_net.peers.get(peer)
if dash_peer:
if msg:
dash_peer.ban(msg)
dash_peer.close()
def update(self, event=None, args=None):
dash_net = self.parent.network.dash_net
peers = dash_net.peers
if event is None:
self.clear()
for peer, dash_peer in sorted(list(peers.items())):
self.add_peer(peer, dash_peer)
elif event == 'dash-peers-updated':
action, peer = args
if action == 'added':
dash_peer = peers.get(peer)
if dash_peer:
self.add_peer(peer, dash_peer, insert=True)
elif action == 'removed':
items = self.findItems(peer, MATCH_STR_CS)
if items:
idx = self.indexOfTopLevelItem(items[0])
self.takeTopLevelItem(idx)
elif event == 'dash-net-activity':
for peer, dash_peer in sorted(list(peers.items())):
items = self.findItems(peer, MATCH_STR_CS)
if items:
ping_time = str(dash_peer.ping_time)
read_kbytes = str(round(dash_peer.read_bytes/1024, 1))
write_kbytes = str(round(dash_peer.write_bytes/1024, 1))
for i in items:
i.setText(self.Columns.PING, ping_time)
i.setText(self.Columns.READ, read_kbytes)
i.setText(self.Columns.WRITE, write_kbytes)
super().update()
def add_peer(self, peer, dash_peer, insert=False):
dash_net = self.parent.network.dash_net
peers = dash_net.peers
v = dash_peer.version
user_agent = v.user_agent.decode('utf-8')
ping_time = str(dash_peer.ping_time)
read_kbytes = str(round(dash_peer.read_bytes/1024, 1))
write_kbytes = str(round(dash_peer.write_bytes/1024, 1))
peers_item = QTreeWidgetItem([peer, user_agent, ping_time,
read_kbytes, write_kbytes])
if peers:
sorted_peers = sorted(list(peers.keys()))
if peer in sorted_peers:
idx = sorted_peers.index(peer)
self.insertTopLevelItem(idx, peers_item)
else:
self.addTopLevelItem(peers_item)
else:
self.addTopLevelItem(peers_item)
class SporksWidget(QTreeWidget):
class Columns(IntEnum):
NAME = 0
ACTIVE = 1
VALUE = 2
DEFAULT = 3
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Spork'), _('Active'), _('Value'), ''])
h = self.header()
mode = QHeaderView.ResizeToContents
h.setSectionResizeMode(self.Columns.NAME, mode)
h.setSectionResizeMode(self.Columns.ACTIVE, mode)
h.setSectionResizeMode(self.Columns.VALUE, mode)
h.setSectionResizeMode(self.Columns.DEFAULT, mode)
def update(self):
dash_net = self.parent.network.dash_net
sporks_dict = dash_net.sporks.as_dict()
self.clear()
for k in sorted(list(sporks_dict.keys())):
name = sporks_dict[k]['name']
active = str(sporks_dict[k]['active'])
value = str(sporks_dict[k]['value'])
default = _('Default') if sporks_dict[k]['default'] else ''
spork_item = QTreeWidgetItem([name, active, value, default])
self.addTopLevelItem(spork_item)
super().update()
class BanlistWidget(QTreeWidget):
class Columns(IntEnum):
PEER = 0
UA = 1
MSG = 2
AT = 3
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent
self.setHeaderLabels([_('Peer'), _('User Agent'),
_('Message'), _('Ban time')])
h = self.header()
mode = QHeaderView.ResizeToContents
h.setSectionResizeMode(self.Columns.PEER, mode)
h.setSectionResizeMode(self.Columns.UA, mode)
h.setSectionResizeMode(self.Columns.MSG, mode)
h.setSectionResizeMode(self.Columns.AT, mode)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
peer = item.text(self.Columns.PEER)
menu = QMenu()
menu.addAction(_('Remove'), lambda: self.unban(peer))
menu.exec_(self.viewport().mapToGlobal(position))
def unban(self, peer):
dash_net = self.parent.network.dash_net
if peer:
dash_net._remove_banned_peer(peer)
def update(self, event=None, args=None):
dash_net = self.parent.network.dash_net
banlist = dash_net.banlist
if event is None:
self.clear()
for peer in sorted(list(banlist.keys())):
self.add_peer(peer)
else:
action, peer = args
if action == 'added':
self.add_peer(peer, insert=True)
elif action == 'removed':
items = self.findItems(peer, MATCH_STR_CS)
if items:
idx = self.indexOfTopLevelItem(items[0])
self.takeTopLevelItem(idx)
super().update()
def add_peer(self, peer, insert=False):
dash_net = self.parent.network.dash_net
banlist = dash_net.banlist
ua = banlist[peer]['ua']
at = str(time.ctime(banlist[peer]['at']))
msg = str(banlist[peer]['msg'])
banlist_item = QTreeWidgetItem([peer, ua, msg, at])
if banlist:
sorted_banlist = sorted(list(banlist.keys()))
if peer in sorted_banlist:
idx = sorted_banlist.index(peer)
self.insertTopLevelItem(idx, banlist_item)
else:
self.addTopLevelItem(banlist_item)
else:
self.addTopLevelItem(banlist_item)
class DashNetDialogLayout(object):
def __init__(self, network, config, parent):
self.parent = parent
self.network = network
self.config = config
self.tabs = tabs = QTabWidget()
dash_net_tab = QWidget()
sporks_tab = QWidget()
banlist_tab = QWidget()
bls_speed_tab = QWidget()
tabs.addTab(dash_net_tab, _('Dash Network'))
tabs.addTab(sporks_tab, _('Sporks'))
tabs.addTab(banlist_tab, _('Banlist'))
if parent.is_testnet:
tabs.addTab(bls_speed_tab, _('BLS Speed'))
self.min_t = 1000
self.max_t = 0
self.n_measures = -1
def min_str():
return _('Min time') + f': {self.min_t}'
def max_str():
return _('Max time') + f': {self.max_t}'
self.min_label = QLabel(min_str())
self.max_label = QLabel(max_str())
vbox = QVBoxLayout(bls_speed_tab)
vbox.addWidget(self.min_label)
vbox.addWidget(self.max_label)
self.timer = QTimer()
self.timer.setInterval(500)
def update_bls_speed():
if self.parent.isVisible() and bls_speed_tab.isVisible():
start_t = time.time()
res = self.network.dash_net.test_bls_speed()
res_t = time.time() - start_t
_logger.info(f'Test BLS Speed: res={res}, time={res_t}')
self.min_t = min(self.min_t, res_t)
self.max_t = max(self.max_t, res_t)
self.min_label.setText(min_str())
self.max_label.setText(max_str())
self.n_measures += 1
if self.n_measures >= 100:
self.timer.stop()
self.timer.timeout.connect(update_bls_speed)
def on_tabs_current_changed(*args):
cur_widget = self.tabs.currentWidget()
if cur_widget == bls_speed_tab and self.n_measures < 0:
self.n_measures = 0
self.timer.start()
tabs.currentChanged.connect(on_tabs_current_changed)
# Dash Network tab
grid = QGridLayout(dash_net_tab)
grid.setSpacing(8)
dash_net = self.network.dash_net
net = self.network
# row 0
self.both_kb = QLabel()
self.read_kb = QLabel()
self.write_kb = QLabel()
grid.addWidget(self.both_kb, 0, 0, 1, 2)
grid.addWidget(self.read_kb, 0, 2, 1, 2)
grid.addWidget(self.write_kb, 0, 4, 1, 2)
self.run_dash_net_cb = QCheckBox(_('Enable Dash Network'))
self.run_dash_net_cb.setChecked(self.config.get('run_dash_net', True))
run_dash_net_modifiable = self.config.is_modifiable('run_dash_net')
self.run_dash_net_cb.setEnabled(run_dash_net_modifiable)
def on_run_dash_net_cb_clicked(run_dash_net):
self.config.set_key('run_dash_net', run_dash_net, True)
net.run_from_another_thread(net.dash_net.set_parameters())
self.run_dash_net_cb.clicked.connect(on_run_dash_net_cb_clicked)
grid.addWidget(self.run_dash_net_cb, 0, 6, 1, 2)
# row 1
is_cmd_dash_peers = dash_net.is_cmd_dash_peers
use_static_peers = dash_net.use_static_peers
static_peers_label = QLabel(_('Static Peers:'))
grid.addWidget(static_peers_label, 1, 0, 1, 1)
self.dash_peers_e = QLineEdit()
self.dash_peers_e.setText(dash_net.dash_peers_as_str())
self.dash_peers_e.setReadOnly(is_cmd_dash_peers)
def on_dash_peers_editing_end():
if is_cmd_dash_peers:
return
res = dash_net.dash_peers_from_str(self.dash_peers_e.text())
if type(res) == str:
self.err_label.setText(f'Error: {res}')
else:
self.config.set_key('dash_peers', res, True)
if dash_net.use_static_peers:
net.run_from_another_thread(net.dash_net.set_parameters())
self.dash_peers_e.editingFinished.connect(on_dash_peers_editing_end)
def on_dash_peers_changed():
self.err_label.setText('')
self.dash_peers_e.textChanged.connect(on_dash_peers_changed)
grid.addWidget(self.dash_peers_e, 1, 1, 1, 5)
self.use_static_cb = QCheckBox(_('Use Static Peers'))
self.use_static_cb.setChecked(use_static_peers)
self.use_static_cb.setEnabled(not is_cmd_dash_peers)
def on_use_static_cb_clicked(use_static):
self.config.set_key('dash_use_static_peers', use_static, True)
net.run_from_another_thread(net.dash_net.set_parameters())
self.use_static_cb.clicked.connect(on_use_static_cb_clicked)
grid.addWidget(self.use_static_cb, 1, 6, 1, 2)
# row 2 with error msg
self.err_label = QLabel('')
self.err_label.setObjectName('err-label')
grid.addWidget(self.err_label, 2, 0, 1, -1)
# row 3
self.status_label = QLabel('')
grid.addWidget(self.status_label, 3, 0, 1, 6)
max_peers_label = _('Max Peers:')
grid.addWidget(QLabel(max_peers_label), 3, 6, 1, 1)
self.max_peers = QSpinBox()
self.max_peers.setValue(dash_net.max_peers)
self.max_peers.setRange(MIN_PEERS_LIMIT, MAX_PEERS_LIMIT)
grid.addWidget(self.max_peers, 3, 7, 1, 1)
def on_change_max_peers(max_peers):
dash_net.max_peers = max_peers
self.max_peers.valueChanged.connect(on_change_max_peers)
# row 4
self.dash_peers_list = DashPeersWidget(self)
grid.addWidget(self.dash_peers_list, 4, 0, 1, -1)
# Dash Sporks tab
vbox = QVBoxLayout(sporks_tab)
sporks_label = QLabel(_('Dash Sporks Values'))
self.sporks_list = SporksWidget(self)
vbox.addWidget(sporks_label)
vbox.addWidget(self.sporks_list)
# Dash Banlist tab
vbox = QVBoxLayout(banlist_tab)
banlist_label = QLabel(_('Banned Dash Peers'))
self.banlist_list = BanlistWidget(self)
vbox.addWidget(banlist_label)
vbox.addWidget(self.banlist_list)
# init layout
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
self.update()
def update(self, event=None, args=None):
is_visible = self.parent.isVisible()
if event is not None and not is_visible:
return
if event is None:
self.update_dash_net_tab()
self.sporks_list.update()
self.banlist_list.update()
elif event in ['dash-peers-updated', 'dash-net-activity']:
self.update_dash_net_tab(event, args)
elif event == 'sporks-activity':
self.sporks_list.update()
elif event == 'dash-banlist-updated':
self.banlist_list.update(event, args)
def update_dash_net_tab(self, event=None, args=None):
dash_net = self.network.dash_net
self.dash_peers_list.update(event, args)
if event in [None, 'dash-net-activity']:
read_bytes = dash_net.read_bytes
write_bytes = dash_net.write_bytes
both_kb = round((write_bytes + read_bytes)/1024, 1)
read_kb = round(read_bytes/1024, 1)
write_kb = round(write_bytes/1024, 1)
self.both_kb.setText(_('Total') + f': {both_kb} KiB')
self.read_kb.setText(_('Received') + f': {read_kb} KiB')
self.write_kb.setText(_('Sent') + f': {write_kb} KiB')
if event in [None, 'dash-peers-updated']:
status = _('Connected Peers') + f': {len(dash_net.peers)}'
self.status_label.setText(status)
def layout(self):
return self.layout_
class DashNetDialog(QDialog):
def __init__(self, network, config, dash_net_sobj):
QDialog.__init__(self)
self.setWindowTitle(_('Dash Network'))
self.setMinimumSize(700, 400)
self.is_testnet = constants.net.TESTNET
self.network = network
self.dnlayout = DashNetDialogLayout(network, config, self)
self.dash_net_sobj = dash_net_sobj
vbox = QVBoxLayout(self)
vbox.addLayout(self.dnlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.dash_net_sobj.dlg.connect(self.on_updated)
def show(self):
super(DashNetDialog, self).show()
if self.network:
self.network.dash_net.register_callback(self.on_dash_net,
['dash-peers-updated',
'dash-net-activity',
'sporks-activity',
'dash-banlist-updated'])
def closeEvent(self, e):
if self.dnlayout.err_label.text():
e.ignore()
if self.network:
self.network.dash_net.unregister_callback(self.on_dash_net)
def on_dash_net(self, event, *args):
self.dash_net_sobj.dlg.emit(event, args)
def on_updated(self, event=None, args=None):
self.dnlayout.update(event, args)
| [
"zebra.lucky@gmail.com"
] | zebra.lucky@gmail.com |
81bba9ac8e716ccd0b7d2bf41bd3fcb05d531648 | 7b4c3e5ea038860ced5b4bea060572538f131e0e | /HW04/hw4b.py | 7bae7345b4bbf9cc27936bc23c0a09c85ab10c9d | [] | no_license | DeepDand/TheMachineLearningS18 | a79de594e110c48fb3ee12b0d426f46a6edf2c95 | 1c6a5b261808057b0d7e46d92d8478f8837fb544 | refs/heads/master | 2021-09-14T15:21:02.035768 | 2018-05-15T15:26:59 | 2018-05-15T15:26:59 | 118,802,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | import numpy as np
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
def genDataSet(N):
x = np.random.normal(0, 1, N)
ytrue = (np.cos(x) + 2) / (np.cos(x*1.4) +2)
noise = np.random.normal(0, 0.2, N)
y = ytrue + noise
return x, y, ytrue
N = 100
x, y, ytrue = genDataSet(N)
x = np.array(x)
y = np.array(y)
'''
for train_index,test_index in kf.split(x):
#print("Train: ",train_index,"Test: ",test_index)
X_train, X_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
'''
i=1
k = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21]
scores = []
cv_scores = []
#print("Train Index: ",len(train_index))
#print("Test Index: ",len(test_index))
for i in k:
#print("K IS: ",i)
neigh = KNeighborsRegressor(n_neighbors=i)
x = x.reshape((-1,1))
y = y.reshape((-1,1))
ytrue = ytrue.reshape((-1,1))
neigh.fit(x, ytrue)
score = neigh.score(x, ytrue)
cv_score = cross_val_score(neigh, x, ytrue, cv=10)
scores.append(score)
cv_scores.append(np.mean(cv_score))
print ("R^2 scores: %s" %scores)
print ("CV scores: %s" %cv_scores)
plt.plot(x,y,'.')
plt.plot(x,ytrue,'rx')
#plt.show()
#print x
#print y
#print ytrue
| [
"deep.dand1992@gmail.com"
] | deep.dand1992@gmail.com |
94e2c2a401b125a43cee98d701cd7ec13826b551 | 773dc03117f8b0d51f7a10e2a4577229c8be6ba3 | /migrations/models/36_20230108160220_update.py | 825e7924ebf0640cde169d23190cb1cc5555254b | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | tcprescott/sahasrahbot | 382cdff058d63feb5f42dbbd7729eb4b08c4d1bd | 64a125d948873d0faa5ea3f2d306075ad9e013be | refs/heads/master | 2023-08-31T15:33:01.533206 | 2023-08-31T01:58:48 | 2023-08-31T01:58:48 | 178,310,225 | 22 | 43 | MIT | 2023-09-01T08:45:52 | 2019-03-29T01:34:45 | Python | UTF-8 | Python | false | false | 380 | py | from tortoise import BaseDBAsyncClient
async def upgrade(db: BaseDBAsyncClient) -> str:
return """
ALTER TABLE `ranked_choice_election` ADD `private` BOOL NOT NULL DEFAULT 0;
DROP TABLE IF EXISTS `twitch_channels`;"""
async def downgrade(db: BaseDBAsyncClient) -> str:
return """
ALTER TABLE `ranked_choice_election` DROP COLUMN `private`;"""
| [
"tcprescott@gmail.com"
] | tcprescott@gmail.com |
25eb424a85781706e1be215f02b2a1b5c0335aaa | fb6ef73b6c44b1530c2ae75900285118abf8fe99 | /耿皓宇_李正一/model2/xbg1.py | b758a7fe748bec2ca171b2eb7a82f1b799149a59 | [] | no_license | Kiwisher/stock-price-prediction | d4faf1eabf8362771ed1e997bbd18b3f933601f4 | 999435e50123449704e730cdf66386f9108a9bef | refs/heads/master | 2020-04-14T05:35:45.561759 | 2019-01-01T15:58:00 | 2019-01-01T15:58:00 | 163,664,023 | 0 | 1 | null | 2019-01-01T15:58:01 | 2018-12-31T11:36:13 | Python | UTF-8 | Python | false | false | 8,829 | py | import os
import json
import time
import math
import pandas as pd
import matplotlib.pyplot as plt
from keras import backend as K
import time
import warnings
import numpy as np
from numpy import newaxis
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import matplotlib.pyplot as plt
import keras
import tensorflow as tf
from sklearn import preprocessing
import csv
from keras.regularizers import l2
import xgboost as xgb
import random
warnings.filterwarnings("ignore")
X_seq1 = [[], [], [], [], [], [], []]
train_data_norm = [[], [], [], [], [], [], []]
y = []
X = []
s = 10
empty = 20
split = 1
gap_pos = [0, 4773, 9519, 14246, 18864, 23582, 28357, 33068, 37844, 42555, 47289, 52056, 56856, 61636, 66400, 71152,
75916, 80687, 85573, 90352, 95146, 99942, 104738, 109489, 114277, 119071, 123857, 128626, 133495, 138368,
143228, 147997, 152856, 157730, 162593, 167473, 172308, 177096, 181915, 186748, 191555, 196385, 201203,
206085, 210940, 215777, 220623, 225479, 230350, 235196, 240035, 244875, 249715, 254571, 259360, 264240,
273819, 283375, 288143, 297724, 302485, 312072, 316837, 321660, 326502, 331348, 336191, 341026, 345867,
350700, 355539, 360369, 365158, 369994, 374845, 379678, 385492, 390335, 395234, 401288, 406096, 412221,
418919, 425174]
result = [0 for i in range(1000)]
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
def min_max(train_data, low, high):
global train_data_norm
column_number = 5
for k in range(column_number):
train_data_norm[k] += train_data[:, k][low:high].tolist()
train_data_norm = np.array(train_data_norm)
# print(train_data_norm)
train_data_norm = np.diff(train_data_norm, axis=1)
# print(train_data_norm)
# print(train_data[:,0])
for k in range(column_number):
c_min = 2147483647
c_max = -2147483647
min_index = 0
# print(train_data_norm[k])
length = len(train_data_norm[k])
for i in range(length):
if (train_data_norm[k][i] > c_max):
c_max = train_data_norm[k][i]
if (train_data_norm[k][i] < c_min):
c_min = train_data_norm[k][i]
min_index = i
for i in range(length):
train_data_norm[k][i] = (train_data_norm[k][i] - c_min) / (c_max - c_min)
# print("---------------------")
# print(c_min,c_max,min_index)
# print(train_data_norm[k])
train_data_norm = np.array(train_data_norm).T
# for i in range(50):
# print(train_data_norm[i])
return c_min, c_max
def get_train_data(train_data, low, high): #
global train_data_norm
column_number = 7 # lll
# print(train_data[0])
for k in range(column_number):
train_data_norm[k] += train_data[:, k][low:high].tolist()
train_data_norm = np.array(train_data_norm)
train_data_norm = train_data_norm.T
# print(train_data_norm)
l = len(train_data_norm)
# print(l)
for i in range(l - s - empty + 1):
tmp = train_data_norm[i:i + s, :]
tmp = np.array(tmp)
# print(tmp)
# for j in range(column_number):
# if ((j == 2) or (j == 4) or (j == 6)):
# tmp[j] = np.diff(tmp[j],axis=0)# according to column
# print(tmp)
scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
# tmp1=np.diff(tmp,axis=0)# according to column lll
# tmp1=scaler.fit_transform(tmp1)
tmp = scaler.fit_transform(tmp)
# tmp=np.vstack((tmp,tmp1))#lll
# if(i<3):
# print(i)
#
# print(tmp)
X.append(tmp)
def get_perdi(train_data, low, high):
global X_seq1
column_number = 7
# print(X_seq1)
for k in range(column_number):
X_seq1[k] += train_data[:, k][low:high].tolist()
shift = X_seq1[0][-1]
X_seq1 = np.array(X_seq1)
# print(X_seq1)
# for i in range(column_number):
# if((i==2) or (i==4) or (i==6)):
# X_seq1[i]=np.diff(X_seq1[i],axis=1)
# print(X_seq1)
scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
# tmp1=np.diff(X_seq1,axis=1)#according to row lll
# tmp1=np.array(tmp1).T
# tmp1=scaler.fit_transform(tmp1)
X_seq1 = np.array(X_seq1).T
X_seq1 = scaler.fit_transform(X_seq1)
# X_seq1=np.vstack((X_seq1,tmp1))#lll
# print(X_seq1)
return shift
def get_y(train_data, low, high):
# print("get,y_------------------------------------")
mid_price = train_data[:, 0][low:high] #
l = len(mid_price)
for i in range(s, l - empty + 1):
sum = 0
for j in range(empty):
sum += mid_price[i + j]
# print(i+j)
sum /= empty
sum -= mid_price[i - 1]
# f=(random.uniform(0,1)-0.5)/200
# print(f)
# sum+=f
y.append(sum)
# print(sum)
# print("###########################")
# print(mid_price.tolist())
# print("y",y)
def fenduan(data1, train_data, low, high):
global train_data_norm
global y
train_data = np.array(train_data)
get_y(train_data, low, high)
# y=y[1:] lll
# print(y)
# get_x(train_data_norm,0,high)
get_train_data(train_data, low, high)
# print(X)
length = len(y)
split_pos = int(split * length)
X_train = np.array(X[:split_pos])
y_train = np.array(y[:split_pos])
X_test = np.array(X[split_pos:])
y_test = np.array(y[split_pos:])
trainlen = len(X_train)
tmp = []
for i in range(trainlen):
tmp.append(np.ndarray.flatten(X_train[i]))
# X_train[i]=np.ndarray.flatten(X_train[i])
# X_train=np.ndarray.flatten(X_train)
print(y_train)
model = xgb.XGBRegressor(silent=True, max_depth=6, learning_rate=0.1, n_estimators=300, subsample=0.6)
# tmp=np.array(tmp)
# y=np.array(y)
# s=np.arange(tmp.shape[0])
# np.random.shuffle(s)
# tmp = tmp[s]
# y_train = y_train[s]
model.fit(tmp, y_train)
# model.compile(loss=rmse, optimizer="adam")
# model.fit(X_train, y_train, nb_epoch=14, batch_size=16,verbose=2)
# print(model.evaluate(X_test, y_test, batch_size=32, verbose=2, sample_weight=None))
# result = model.predict(X_test, batch_size=32, verbose=0)
for i in range(1000):
global X_seq1
X_seq1 = [[], [], [], [], [], [], []]
# print(data1)
shift = get_perdi(data1, i * 11, 11 * i + 10)
# print(X_seq1)
# h=[]
# h.append(X_seq1.tolist())
# h=np.array(h)
X_seq1 = np.array(X_seq1)
# print("!!!",shift)
# print(h)
# r = model.predict(h, batch_size=32, verbose=0)
# testlen=len(X_seq1)
# for i in range(testlen):
# X_seq1[i]=np.ndarray.flatten(X_seq1[i])
testline = []
testline.append(np.ndarray.flatten(X_seq1).tolist())
# print(testline)
# X_seq1=np.ndarray.flatten(X_seq1)
r = model.predict(testline)
# print(r)
result[i] += (r + shift)
# if(i<30):
# print("r",r)
# if(r[0][0]!=0):
# print("jjjjjjj")
# print(X_seq1)
def main():
train_data = pd.read_csv('train_data.csv',
usecols=['MidPrice', "LastPrice", "AskPrice", "BidPrice", "Volume", "BidVolume1",
"AskVolume1"])
data1 = pd.read_csv('test_data.csv',
usecols=['MidPrice', "LastPrice", "AskPrice", "BidPrice", "Volume", "BidVolume1", "AskVolume1"])
data1 = np.array(data1)
# fenduan(train_data,0,2000)
number_duan = len(gap_pos)
for i in range(3, 83):
print(i)
fenduan(data1, train_data, gap_pos[i], gap_pos[i + 1])
global X_seq1, X, y, train_data_norm
X_seq1 = [[], [], [], [], [], [], []]
train_data_norm = [[], [], [], [], [], [], []]
y = []
X = []
# print(result)
for i in range(1000):
result[i] /= (80)
b = [i for i in range(1000)]
with open('xgb2.csv', 'w') as fout:
fieldnames = ['caseid', 'midprice']
writer = csv.DictWriter(fout, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(result)):
if (i <= 141):
continue
writer.writerow({'caseid': str(b[i] + 1), 'midprice': float(result[i])})
main() | [
"noreply@github.com"
] | noreply@github.com |
3ea0f5a9c0a242beb6b04a7db790cbd01df22022 | 248efa4b49b69fe38aaf6a8066c30dcde6e7937c | /NadoCoding/함수/함수 퀴즈.py | 85c16b04024196d9af79fef67235f13f1e8cf891 | [] | no_license | apasionesmj/PythonWorkspace | ca6d9d127d8ed4b1f93c2ac472d0ba1ecfe331dd | a1add6fcdb6843a2142232c5884c3e3278a34012 | refs/heads/master | 2023-03-06T19:08:05.728150 | 2021-02-19T04:47:22 | 2021-02-19T04:47:22 | 334,065,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | # Quiz) 표준 체중을 구하는 프로그램을 작성하시오
# * 표준 체중 : 각 개인의 키에 적당한 체중
# (성별에 따른 공식)
# 남자 : 키(m) x 키(m) x 22
# 여자 : 키(m) x 키(m) x 21
# 조건1 : 표준 체중은 별도의 함수 내에서 계산
# * 함수명 : std_weight
# * 전달값 : 키(height), 성별(gender)
# 조건2 : 표준 체중은 소수점 둘째자리까지 표시
# (출력 예제)
# 키 175cm 남자의 표준 체중은 67.38kg 입니다.
def std_weight(height, gender): #키 m단위 (실수), 성별 "남자" / "여자"
if gender == "남자":
return height * height * 22
else:
return height * height *21
height = 175 #cm 단위
gender = "남자"
weight = round(std_weight(height / 100, gender), 2)
print("키 {0}cm {1}의 표준 체중은 {2}kg 입니다.".format(height,gender,weight))
| [
"apasionesmj@naver.com"
] | apasionesmj@naver.com |
baea42f652d2a81af4158f799a25e5c2c0c2de43 | 6718af489ca7d4e41c69f0fa367b98072d426620 | /Lab_9/lab3-2_template.py | 98ffe46441c51366070352d8ea30752fc2d52572 | [] | no_license | liamcannon/CSI-275 | 67aa98359d6ed8ffe9d783f599547c60ecb023cb | 9612a1ff8748d9d58cc929a937a6001e8f0a2494 | refs/heads/main | 2023-04-15T12:34:08.596803 | 2021-04-30T18:20:06 | 2021-04-30T18:20:06 | 333,964,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | """Liam Cannons Code for Lab 9.
Author: Liam Cannon
Class: CSI-275-01/02
Assignment: Lab/HW 5 -- JSON Client/Server
Certification of Authenticity:
I certify that this is entirely my own work, except where I have given
fully-documented references to the work of others. I understand the definition
and consequences of plagiarism and acknowledge that the assessor of this
assignment may, for the purpose of assessing this assignment:
- Reproduce this assignment and provide a copy to another member of academic
- staff; and/or Communicate a copy of this assignment to a plagiarism checking
- service (which may then retain a copy of this assignment on its database for
- the purpose of future plagiarism checking)
"""
import json
import socket
import zlib
def build_list():
"""Collect input from the user and return it as a list.
Only numeric input will be included; strings are rejected.
"""
#Create a list to store our numbers
unsorted_list = []
# Create a variable for input
user_input = ""
sort_type = ""
while sort_type.lower() != 'a' and sort_type.lower() != 'd' and sort_type.lower() != 's':
sort_type = input("Please enter sort type, a, d, or s: ")
unsorted_list.append(sort_type)
while user_input != "done":
# Prompt the user for input
user_input = input("Please enter a number, or 'done' to stop.")
# Validate our input, and add it to out list
# if it's a number
try:
# Were we given an integer?
unsorted_list.append(int(user_input))
except ValueError:
try:
# Were we given a floating-point number?
unsorted_list.append(float(user_input))
except ValueError:
# Non-numeric input - if it's not "done",
# reject it and move on
if (user_input != "done"):
print ("ERROR: Non-numeric input provided.")
continue
# Once we get here, we're done - return the list
return unsorted_list
def sort_list(unsorted_list):
"""Handles compressing and sending the lists to the server."""
json_data = json.dumps(unsorted_list).encode('utf-8')
#json_encoded = json_data.encode('utf-8')
print("Uncompressed: ", len(json_data))
json_data = zlib.compress(json_data)
print("Compressed: ", len(json_data))
json_sock = socket.socket()
json_sock.connect(("localhost", 7778))
print(json_data)
json_sock.sendall(json_data)
recv_data = json_sock.recv(4096)
recv_data = zlib.decompress(recv_data)
print("Decompressed", len(recv_data))
recv_data = recv_data.decode('utf-8')
#recv_json = recv_data.decode('utf-8')
recv_dict = json.loads(recv_data)
json_sock.close()
print(recv_dict)
def main():
"""Call the build_list and sort_list functions, and print the result."""
#sort_type = input("Enter a sort type, a, d, or s: ")
number_list = build_list()
sort_list(number_list)
#for x in range(20):
# sort_list([i for i in range(1, x)])
#Answer the following question in your submission:
# At what value of x does the compressed JSON
# become equal to or smaller than the uncompressed JSON?
# 16
if __name__ == "__main__":
main()
| [
"liamcannon@Liams-MacBook-Pro-2.local"
] | liamcannon@Liams-MacBook-Pro-2.local |
c5254d3511dcc1be95864f63f9271b580734eb6f | 13d9168377d701e482509d530b354e0fd70d66cc | /IPMI_2021_model/MTGNN_local_multiscale_temporal_FC.py | d99a8df766f6b0390cb97c4ac0034f69c998d0a9 | [] | no_license | NareshNandakumarJHU/Multiscale-Spatial-Attention-Applied-To-Dynamic-Connectivity | 1697ed97865dfae9da7a4e3b9d8b7ca7f046c9f3 | d068cdf6a97375207098236e68cd9f8736c99fca | refs/heads/main | 2023-06-01T07:55:57.466680 | 2021-06-25T08:57:48 | 2021-06-25T08:57:48 | 380,181,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,988 | py | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn
from torch.autograd import Variable
import scipy
from data_loader import data_test
from data_loader import data_train
from sklearn.metrics import roc_auc_score
import pickle
import os.path
from scipy import io
import sys
use_cuda = torch.cuda.is_available()
class E2EBlock(torch.nn.Module):
def __init__(self, in_planes, planes, example, bias=True):
super(E2EBlock, self).__init__()
self.d = example.size(3)
self.cnn1 = torch.nn.Conv2d(in_planes, planes, (1, self.d), bias=bias)
self.cnn2 = torch.nn.Conv2d(in_planes, planes, (self.d, 1), bias=bias)
def forward(self, x):
a = self.cnn1(x)
b = self.cnn2(x)
return torch.cat([a] * self.d, 3) + torch.cat([b] * self.d, 2)
class Multiscale_dynamic_model(torch.nn.Module):
def __init__(self, example, num_classes=10):
super(Multiscale_dynamic_model, self).__init__()
self.in_planes = example.size(1)
self.d = example.size(3)
self.e2econv1 = E2EBlock(1, 50, example)
self.max = torch.nn.MaxPool3d((50*21,1,1))
self.avg = torch.nn.AvgPool3d((50*21,1,1))
self.featureCNN1 = torch.nn.Conv2d(2,1,3,padding=1)
self.featureCNN2 = torch.nn.Conv2d(2,1,7,padding=3)
self.featureCNN3 = torch.nn.Conv2d(2,1,11,padding=5)
self.CNNmax = torch.nn.MaxPool3d((3,1,1))
self.E2N = torch.nn.Conv2d(50, 50, (1, self.d))
self.N2G = torch.nn.Conv1d(50, 50, (self.d, 1))
self.temporalFC = torch.nn.Linear(50,2)
self.sm = torch.nn.Softmax(dim=0)
self.fc1 = torch.nn.Linear(50, 250)
self.fc2 = torch.nn.Linear(250, 100)
self.fc3_Fi = torch.nn.Linear(100, 3)
self.fc3_Fo = torch.nn.Linear(100, 3)
self.fc3_T = torch.nn.Linear(100, 3)
self.fc3_L = torch.nn.Linear(100, 3)
def forward(self, x):
e2e = torch.empty(1, 21, 50, 1000, 1000)
temporal_FC_in = torch.empty(21, 50)
FC_Fi = torch.empty(1000, 3, 21)
FC_Fo = torch.empty(1000, 3, 21)
FC_T = torch.empty(1000, 3, 21)
FC_L = torch.empty(1000, 3, 21)
for i in range(21):
map = x[:, :, :, :, i]
out = self.e2econv1(map)
out = F.leaky_relu(out, negative_slope=0.1)
e2e[:,i,:,:,:] = out
e2e_reshape = e2e.view(e2e.size(0),e2e.size(1)*e2e.size(2),e2e.size(3),e2e.size(4))
m_pool = self.max(e2e_reshape)
a_pool = self.avg(e2e_reshape)
feature = torch.cat((m_pool, a_pool), 1)
Conv_feature1 = self.featureCNN1(feature)
Conv_feature_1 = Conv_feature1.view(1,Conv_feature1.size(2)*Conv_feature1.size(3))
Conv_feature2 = self.featureCNN2(feature)
Conv_feature_2 = Conv_feature2.view(1,Conv_feature2.size(2) * Conv_feature2.size(3))
Conv_feature3 = self.featureCNN3(feature)
Conv_feature_3 = Conv_feature3.view(1,Conv_feature3.size(2) * Conv_feature3.size(3))
total_conv = torch.cat((Conv_feature1,Conv_feature2,Conv_feature3),1)
avg_conv = self.CNNmax(total_conv)
attn = torch.softmax(avg_conv.view(avg_conv.size(2)*avg_conv.size(3)),0)
attn = attn.view(Conv_feature1.size(0), Conv_feature1.size(1), Conv_feature1.size(2), Conv_feature1.size(3))
e2e_reshape = e2e_reshape * attn
e2e_back = e2e_reshape.view(e2e.size(0), e2e.size(1), e2e.size(2), e2e.size(3), e2e.size(4))
for i in range(21):
out = e2e_back[:,i,:,:,:]
out = self.E2N(out)
out = F.leaky_relu(out, negative_slope=0.1)
out_FC = out.view(out.size(2), out.size(1))
out_FC = self.fc1(out_FC)
out_FC = F.leaky_relu(out_FC, negative_slope=0.1)
out_FC = self.fc2(out_FC)
out_FC = F.leaky_relu(out_FC, negative_slope=0.1)
FC_Fi[:, :, i] = F.leaky_relu(self.fc3_Fi(out_FC), negative_slope=0.1)
FC_Fo[:, :, i] = F.leaky_relu(self.fc3_Fo(out_FC), negative_slope=0.1)
FC_T[:, :, i] = F.leaky_relu(self.fc3_T(out_FC), negative_slope=0.1)
FC_L[:, :, i] = F.leaky_relu(self.fc3_L(out_FC), negative_slope=0.1)
out_temporal_branch = self.N2G(out)
out_temporal_branch = F.leaky_relu(out_temporal_branch, negative_slope=0.1)
temporal_FC_in[i, :] = out_temporal_branch.view(out_temporal_branch.size(0), out_temporal_branch.size(1))
Attn = self.temporalFC(temporal_FC_in)
# first column for language, second for motor
out_Fi = torch.matmul(FC_Fi, Attn[:, 1])
out_Fo = torch.matmul(FC_Fo, Attn[:, 1])
out_T = torch.matmul(FC_T, Attn[:, 1])
out_L = torch.matmul(FC_L, Attn[:, 0])
return out_Fi, out_Fo, out_T, out_L, attn, Conv_feature1, Conv_feature2, Conv_feature3, Attn
import torch.utils.data.dataset
test_index = 1
lr = 0.005
nbepochs = 140
BATCH_SIZE = 1
class_0 = 0.22
class_M = 1.57
class_L = 2.31
class_2 = 0.42
trainset = data_train(index=test_index,fold=8)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=1)
testset = data_test(index=test_index)
testloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=1)
net = Multiscale_dynamic_model(trainset.X)
if use_cuda:
net = net.cuda(0)
net = torch.nn.DataParallel(net, device_ids=[0])
momentum = 0.9
wd = 0.00005
def init_weights_he(m):
print(m)
if type(m) == torch.nn.Linear:
fan_in = net.dense1.in_features
he_lim = np.sqrt(6) / fan_in
m.weight.data.uniform_(-he_lim, he_lim)
print(m.weight)
class_weight_M = torch.FloatTensor([class_0, class_M, class_2])
criterion1 = torch.nn.CrossEntropyLoss(weight=class_weight_M)
class_weight_L = torch.FloatTensor([class_0, class_L, class_2])
criterion2 = torch.nn.CrossEntropyLoss(weight=class_weight_L)
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=momentum, nesterov=True, weight_decay=wd)
def train(epoch,alpha=1.0):
net.train()
for batch_idx, (X, L, Fi, Fo, T) in enumerate(trainloader):
if use_cuda:
X, L,Fi,Fo,T = X.cuda(), L.cuda(), Fi.cuda(), Fo.cuda(), T.cuda()
optimizer.zero_grad()
X, L, Fi, Fo, T = Variable(X), Variable(L), Variable(Fi), Variable(Fo), Variable(T)
out_Fi, out_Fo, out_T, out_L, attn, Conv_feature1, Conv_feature2, Conv_feature3, Attn = net(X)
L = L.view(L.size(0) * L.size(1), 1)
L = np.squeeze(L)
L = Variable(L)
Fi = Fi.view(Fi.size(0) * Fi.size(1), 1)
Fi = np.squeeze(Fi)
Fi = Variable(Fi)
Fo = Fo.view(Fo.size(0) * Fo.size(1), 1)
Fo = np.squeeze(Fo)
Fo = Variable(Fo)
T = T.view(T.size(0) * T.size(1), 1)
T = np.squeeze(T)
T = Variable(T)
#one way to code multi-branch loss with missing data
loss1 = criterion2((out_L),L)
if Fi[0] == 6:
loss2 = 0
else:
loss2 = criterion1((out_Fi),Fi)
if Fo[0] == 6:
loss3 = 0
else:
loss3 = criterion1((out_Fo),Fo)
if T[0] == 6:
loss4 = 0
else:
loss4 = criterion1((out_T),T)
loss_total = loss1 + loss2 + loss3 + loss4
loss_total.backward()
optimizer.step()
return
def test(alpha=1.0):
net.eval()
test_loss = 0
running_loss = 0.0
total_Fi_out = []
total_Fo_out = []
total_T_out = []
total_L_out = []
total_attn = []
total_conv1 = []
total_conv2 = []
total_conv3 = []
total_temp_attn = []
for batch_idx, (X, L, Fi, Fo, T) in enumerate(testloader):
if use_cuda:
X,L,Fi,Fo,T = X.cuda(), L.cuda(), Fi.cuda(), Fo.cuda(), T.cuda()
with torch.no_grad():
if use_cuda:
X, L, Fi, Fo, T = X.cuda(), L.cuda(), Fi.cuda(), Fo.cuda(), T.cuda()
optimizer.zero_grad()
X, L, Fi, Fo, T = Variable(X), Variable(L), Variable(Fi), Variable(Fo), Variable(T)
out_Fi, out_Fo, out_T, out_L, attn, Conv_feature1, Conv_feature2, Conv_feature3, Attn = net(X)
out_Fi = out_Fi.cpu()
out_Fi = out_Fi.data.numpy()
out_Fo = out_Fo.cpu()
out_Fo = out_Fo.data.numpy()
out_T = out_T.cpu()
out_T = out_T.data.numpy()
out_L = out_L.cpu()
out_L = out_L.data.numpy()
total_Fi_out.append(out_Fi)
total_Fo_out.append(out_Fo)
total_T_out.append(out_T)
total_L_out.append(out_L)
total_attn.append(attn)
total_temp_attn.append(Attn)
return total_Fi_out, total_Fi_out, total_T_out, total_L_out, total_attn, total_temp_attn
for epoch in range(nbepochs):
train(epoch)
print(epoch)
out_Fi, out_Fo, out_T, out_L, attn, temp_attn = test()
| [
"noreply@github.com"
] | noreply@github.com |
4bc7ae67343462bd970a1473cb0dca24ebe4406a | fe941ec9e6d25cb8f6d01a230ca3f63fba3cf0cd | /ortcfront/rules/urls.py | 40a221410c4595be61288cb1b20c33c2d99d12c5 | [] | no_license | rodo/ortcfront | 2b8f4d28ca43704374a9067ccb00cae2ee8c80ac | c71ec3b7e9cd88145b06c278830a16d4a063364b | refs/heads/master | 2021-03-12T22:17:13.020654 | 2014-12-14T11:08:40 | 2014-12-14T11:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,007 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Rodolphe Quiédeville <rodolphe@quiedeville.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from .views import DomainNewView, DomainEditView, DomainView
from .views import RuleNewView, RuleEditView, RuleView, RuleListView, RuleFeed
urlpatterns = patterns('',
url(r'^s/feed/$', RuleFeed()),
url(r'^s/(?P<element>node|way|relation|all)/(?P<action>create|delete|modify|all]$)/', RuleListView.as_view()),
url(r'^s/(?P<element>node|way|relation|all)/$', RuleListView.as_view()),
url(r'^s/$', RuleListView.as_view()),
url(r'^s/domain/new/$', login_required(DomainNewView.as_view()), name='domain_new'),
url(r'^s/domain/(?P<pk>\d+)/edit/$', login_required(DomainEditView.as_view()), name='domain_edit'),
url(r'^s/domain/(?P<pk>\d+)/$', DomainView.as_view()),
url(r'^/new/$', login_required(RuleNewView.as_view()), name='rule_new'),
url(r'^/(?P<pk>\d+)/edit/$', login_required(RuleEditView.as_view()), name='rule_edit'),
url(r'^/(?P<pk>\d+)/$', RuleView.as_view()),
)
| [
"rodolphe@quiedeville.org"
] | rodolphe@quiedeville.org |
4571461aa58e4b431da3c8b25a6f62412e3ef47a | 8f2ec98795b8873c384cec8cfb46521d66184467 | /chem_utilities.py | f324c9c13a7612b48e414fdb26f17c2c9d9fca83 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kyleniemeyer/create_rate_subs | c84b6535fe37cf8d9f36f7f62d3dd6a77a02d9b5 | 1d87991ccc5dd6ed5dc332a71a7192c489e004cc | refs/heads/master | 2021-01-21T19:35:11.525619 | 2016-01-05T01:05:00 | 2016-01-05T01:05:00 | 7,443,919 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 9,801 | py | """Module containing element dict, species and reaction classes, and constants.
"""
# Python 2 compatibility
from __future__ import division
# Standard libraries
import math
import numpy as np
__all__ = ['RU', 'RUC', 'RU_JOUL', 'PA', 'get_elem_wt',
'ReacInfo', 'SpecInfo', 'calc_spec_smh']
# universal gas constants, SI units
RU = 8314.4621 # J/(kmole * K)
RU_JOUL = 8.3144621
RUC = (RU / 4.18400) # cal/(mole * K)
# Avogadro's number
AVAG = 6.0221367e23
# pressure of one standard atmosphere [Pa]
PA = 101325.0
class CommonEqualityMixin(object):
def __eq__(self, other):
try:
for key, value in self.__dict__.iteritems():
if not key in other.__dict__:
return False
if isinstance(value, np.ndarray):
if not np.array_equal(value, other.__dict__[key]):
return False
elif value != other.__dict__[key]:
return False
return True
except Exception, e:
return False
def __ne__(self, other):
return not self.__eq__(other)
def get_elem_wt():
"""Returns dict with built-in element names and atomic weights [kg/kmol].
Attributes
----------
None
Returns
-------
elem_wt : dict
Dictionary with element name keys and atomic weight [kg/kmol] values.
"""
elem_wt = dict([
('h', 1.00794), ('he', 4.00260), ('li', 6.93900),
('be', 9.01220), ('b', 10.81100), ('c', 12.0110),
('n', 14.00674), ('o', 15.99940), ('f', 18.99840),
('ne', 20.18300), ('na', 22.98980), ('mg', 24.31200),
('al', 26.98150), ('si', 28.08600), ('p', 30.97380),
('s', 32.06400), ('cl', 35.45300), ('ar', 39.94800),
('k', 39.10200), ('ca', 40.08000), ('sc', 44.95600),
('ti', 47.90000), ('v', 50.94200), ('cr', 51.99600),
('mn', 54.93800), ('fe', 55.84700), ('co', 58.93320),
('ni', 58.71000), ('cu', 63.54000), ('zn', 65.37000),
('ga', 69.72000), ('ge', 72.59000), ('as', 74.92160),
('se', 78.96000), ('br', 79.90090), ('kr', 83.80000),
('rb', 85.47000), ('sr', 87.62000), ('y', 88.90500),
('zr', 91.22000), ('nb', 92.90600), ('mo', 95.94000),
('tc', 99.00000), ('ru', 101.07000), ('rh', 102.90500),
('pd', 106.40000), ('ag', 107.87000), ('cd', 112.40000),
('in', 114.82000), ('sn', 118.69000), ('sb', 121.75000),
('te', 127.60000), ('i', 126.90440), ('xe', 131.30000),
('cs', 132.90500), ('ba', 137.34000), ('la', 138.91000),
('ce', 140.12000), ('pr', 140.90700), ('nd', 144.24000),
('pm', 145.00000), ('sm', 150.35000), ('eu', 151.96000),
('gd', 157.25000), ('tb', 158.92400), ('dy', 162.50000),
('ho', 164.93000), ('er', 167.26000), ('tm', 168.93400),
('yb', 173.04000), ('lu', 174.99700), ('hf', 178.49000),
('ta', 180.94800), ('w', 183.85000), ('re', 186.20000),
('os', 190.20000), ('ir', 192.20000), ('pt', 195.09000),
('au', 196.96700), ('hg', 200.59000), ('tl', 204.37000),
('pb', 207.19000), ('bi', 208.98000), ('po', 210.00000),
('at', 210.00000), ('rn', 222.00000), ('fr', 223.00000),
('ra', 226.00000), ('ac', 227.00000), ('th', 232.03800),
('pa', 231.00000), ('u', 238.03000), ('np', 237.00000),
('pu', 242.00000), ('am', 243.00000), ('cm', 247.00000),
('bk', 249.00000), ('cf', 251.00000), ('es', 254.00000),
('fm', 253.00000), ('d', 2.01410), ('e', 5.48578e-4)
])
return elem_wt
class ReacInfo(CommonEqualityMixin):
"""Reaction class.
Contains all information about a single reaction.
Attributes
----------
rev : bool
True if reversible reaction, False if irreversible.
reactants : list of str
List of reactant species names.
reac_nu : list of int/float
List of reactant stoichiometric coefficients, either int or float.
products : list of str
List of product species names.
prod_nu : list of int/float
List of product stoichiometric coefficients, either int or float.
A : float
Arrhenius pre-exponential coefficient.
b : float
Arrhenius temperature exponent.
E : float
Arrhenius activation energy.
rev_par : list of float, optional
List of reverse Arrhenius coefficients (default empty).
dup : bool, optional
Duplicate reaction flag (default False).
thd : bool, optional
Third-body reaction flag (default False).
thd_body : list of list of [str, float], optional
List of third body names and efficiencies (default empty).
pdep : bool, optional
Pressure-dependence flag (default False).
pdep_sp : str, optional
Name of specific third-body or 'M' (default '').
low : list of float, optional
List of low-pressure-limit Arrhenius coefficients (default empty).
high : list of float, optional
List of high-pressure-limit Arrhenius coefficients (default empty).
troe : bool, optional
Troe pressure-dependence formulation flag (default False).
troe_par : list of float, optional
List of Troe formulation constants (default empty).
sri : bool, optional
SRI pressure-dependence formulation flag (default False).
sri_par : list of float, optional
List of SRI formulation constants (default empty).
Notes
-----
`rev` does not require `rev_par`; if no explicit coefficients, the
reverse reaction rate will be calculated through the equilibrium
constant.
Only one of [`low`,`high`] can be defined.
If `troe` and `sri` are both False, then the Lindemann is assumed.
"""
def __init__(self, rev, reactants, reac_nu, products, prod_nu, A, b, E):
self.reac = reactants
self.reac_nu = reac_nu
self.prod = products
self.prod_nu = prod_nu
## Arrhenius coefficients
# pre-exponential factor [m, kmol, s]
self.A = A
# Temperature exponent [-]
self.b = b
# Activation energy, stored as activation temperature [K]
self.E = E
# reversible reaction properties
self.rev = rev
self.rev_par = [] # reverse A, b, E
# duplicate reaction
self.dup = False
# third-body efficiencies
self.thd_body = False
self.thd_body_eff = [] # in pairs with species and efficiency
# pressure dependence
self.pdep = False
self.pdep_sp = ''
self.low = []
self.high = []
self.troe = False
self.troe_par = []
self.sri = False
self.sri_par = []
# Parameters for pressure-dependent reaction parameterized by
# bivariate Chebyshev polynomial in temperature and pressure.
self.cheb = False
# Number of temperature values over which fit computed.
self.cheb_n_temp = 0
# Number of pressure values over which fit computed.
self.cheb_n_pres = 0
# Pressure limits for Chebyshev fit [Pa]
self.cheb_plim = [0.001 * PA, 100. * PA]
# Temperature limits for Chebyshev fit [K]
self.cheb_tlim = [300., 2500.]
# 2D array of Chebyshev fit coefficients
self.cheb_par = None
# Parameters for pressure-dependent reaction parameterized by
# logarithmically interpolating between Arrhenius rate expressions at
# various pressures.
self.plog = False
# List of arrays with [pressure [Pa], A, b, E]
self.plog_par = None
class SpecInfo(CommonEqualityMixin):
"""Species class.
Contains all information about a single species.
Attributes
----------
name : str
Name of species.
elem : list of list of [str, float]
Elemental composition in [element, number] pairs.
mw : float
Molecular weight.
hi : list of float
High-temperature range NASA thermodynamic coefficients.
lo : list of float
Low-temperature range NASA thermodynamic coefficients.
Trange : list of float
Temperatures defining ranges of thermodynamic polynomial fits
(low, middle, high), default ([300, 1000, 5000]).
"""
def __init__(self, name):
self.name = name
# elemental composition
self.elem = []
# molecular weight [kg/kmol]
self.mw = 0.0
# high-temp range thermodynamic coefficients
self.hi = np.zeros(7)
# low-temp range thermodynamic coefficients
self.lo = np.zeros(7)
# temperature [K] range for thermodynamic coefficients
self.Trange = [300.0, 1000.0, 5000.0]
def calc_spec_smh(T, specs):
"""Calculate standard-state entropies minus enthalpies for all species.
Parameters
----------
T : float
Temperature of gas mixture.
specs : list of SpecInfo
List of species.
Returns
-------
spec_smh : list of float
List of species' standard-state entropies minus enthalpies.
"""
spec_smh = []
Tlog = math.log(T)
T2 = T * T
T3 = T2 * T
T4 = T3 * T
Thalf = T / 2.0
T2 = T2 / 6.0
T3 = T3 / 12.0
T4 = T4 / 20.0
for sp in specs:
if T <= sp.Trange[1]:
smh = (sp.lo[0] * (Tlog - 1.0) + sp.lo[1] * Thalf + sp.lo[2] *
T2 + sp.lo[3] * T3 + sp.lo[4] * T4 - (sp.lo[5] / T) +
sp.lo[6]
)
else:
smh = (sp.hi[0] * (Tlog - 1.0) + sp.hi[1] * Thalf + sp.hi[2] *
T2 + sp.hi[3] * T3 + sp.hi[4] * T4 - (sp.hi[5] / T) +
sp.hi[6]
)
spec_smh.append(smh)
return (spec_smh)
| [
"kyle.niemeyer@gmail.com"
] | kyle.niemeyer@gmail.com |
3e8dd0a9d0771d0fae58782d615439e782653aa0 | 63dcdb7594dd3f6f6421276178f8308e71b98583 | /day9/Programmers42579.py | 98b7edaa818d1d99c738caa20433b790475b0100 | [] | no_license | KoSangWon/NKLCB_Algorithm | 0a9c87c58fc4fa17a8cb2a5a215bad48bdf97b9e | 68b6cd9b76f03aef2af2ba17bd0ddd61e9cb37ab | refs/heads/master | 2023-04-01T04:04:35.874073 | 2021-04-09T11:35:42 | 2021-04-09T11:35:42 | 350,288,471 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | # https://programmers.co.kr/learn/courses/30/lessons/42579
def solution(genres, plays):
answer = []
ht1 = dict()
ht2 = dict()
for i, elem in enumerate(zip(genres, plays)):
g, p = elem
if g not in ht1:
ht1[g] = 0
ht2[g] = []
ht1[g] += p
ht2[g].append((i, p))
sort_ht1 = sorted(list(ht1.items()), key=lambda x:-x[1]) # 값을 기준으로 역순 정렬
print(sort_ht1)
print(ht2)
for g, p in sort_ht1:
sort_ht2 = sorted(ht2[g], key=lambda x:-x[1]) # stable sort를 사용하고 있기 때문에 재생 횟수 기준 낮은 노래 신경 안써도 됨.
print(sort_ht2)
answer += list(map(lambda x: x[0], sort_ht2))[:2]
return answer
solution(["classic", "pop", "classic", "classic", "pop"], [500, 600, 150, 800, 2500]) | [
"jackoss@naver.com"
] | jackoss@naver.com |
3d4133db6fc7dd14344cf295141712edf3862127 | 64bef15359bc341e2270b452e4800aca8a11f41a | /buildings/ml_tro_funcs.py | 11d6ca63fa652fcb21ac28de82361a028c2ebfa5 | [] | no_license | aa3222119/bi_for_ | 3677685b5f10f88b4db3761f7f837deaed038689 | cd80d76894e7c01f6877137a85ebf8d788a2ff38 | refs/heads/master | 2020-08-29T06:33:58.446968 | 2020-08-08T07:32:30 | 2020-08-08T07:32:30 | 217,955,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,239 | py | import scipy as sp
import numpy as np
# 不用科学计数法显示
np.set_printoptions(suppress=True)
np.set_printoptions(precision=3)
import random
from sklearn.linear_model import LinearRegression
# sklearn.linear_model.LinearRegression http://www.ppvke.com/Blog/archives/19208
from sklearn.cluster import KMeans
# LinearRegression http://blog.csdn.net/viewcode/article/details/8794401
def sigmoid(x):
# Implement sigmoid function
return 1/(1 + np.exp(-x))
def SSE(y_test, y):
return (sp.square(y_test - y)).sum()
def RMSE(y_test, y):
"""
均方误差根
:param y_test: 预测值
:param y: 实际
:return:
"""
return sp.sqrt(sp.mean(sp.square(y_test - y)))
def SSR(y_test, y):
return (sp.square(y_test - y.mean())).sum()
def SST(y):
return (sp.square(y - y.mean())).sum()
def R2(y_test, y):
"""
coefficient of determination 决定系数 #http://blog.sciencenet.cn/blog-651374-975670.html
:param y_test:
:param y:
:return:
"""
return 1 - SSE(y_test, y) / SST(y)
def nth_ladder_create(mat, n=3, col=-1):
"""
构造阶梯属性(rnn 简化成 ann时使用) 实现RNN n阶数
:param mat: 因素矩阵 样本数*因素维
:param n: 阶梯的阶数
:param col: 被选做构成阶梯的列(属性)
:return:
"""
mat_ = mat[n:, :]
for i in range(1, n+1):
mat_ = np.hstack((mat[n-i:-i, [col]], mat_))
return mat_
# def kmeans_dprocess(x_var, n_clusters=4):
# if x_var.__len__() < n_clusters:
# return 'simple size small than n_clusters(%s)' % n_clusters
# kmeans = KMeans(n_clusters=n_clusters)
# r_kmeans = kmeans.fit(x_var)
# x_mean = x_var.mean()
# ind = r_kmeans.labels_ == kmeans.n_clusters
# for mi in range(kmeans.n_clusters):
# indt = r_kmeans.labels_ == mi
# if r_kmeans.cluster_centers_[mi] > 2 * x_mean and r_kmeans.labels_[indt].__len__() / r_kmeans.labels_.__len__() < 0.144:
# # print(uid,r_kmeans.cluster_centers_[mi],x_mean,r_kmeans.labels_[indt].__len__(),r_kmeans.labels_.__len__())
# ind = ind | indt
# if r_kmeans.cluster_centers_[mi] > max_dt:
# ind = ind | indt
# return ind, kmeans
| [
"502202879@qq.com"
] | 502202879@qq.com |
4490770ffae1ec806372d5879e8807f03353bf35 | 702da340ab7a1b5de466d6dd3833774e509aa278 | /Практика/4 задание 2 день.py | d4c264a60317eb0bced3ee32e059068e5b911f92 | [] | no_license | ForemanAqua/PythonFiles1 | 7499b6b6104edf27e911915c27e8165bbd12fb04 | f9ff96041f35e7ef98518a8148c1ff9b77f10705 | refs/heads/master | 2020-03-20T21:02:57.909968 | 2018-06-20T19:05:23 | 2018-06-20T19:05:23 | 137,720,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | import datetime
def printTimeStamp(name):
print('Автор програми: ' + name)
print('Час компіляції: ' + str(datetime.datetime.now()))
printTimeStamp("Valeriy Neroznak")
list1 = [1, 5, 4, 3, 1, 4, 2, 1, 4, 9, 0, 6, 6, 7, 6, 5, 1, 2, 0]
list2 = [[x,list1.count(x)] for x in set(list1)]
list3 = []
for i in range(len(list2)):
if list2[i][1] >= 1 and list2[i][1] <= 5:
list3.append(list2[i][0])
list3 = list(set(list3))
print(list3)
| [
"noreply@github.com"
] | noreply@github.com |
cccef99a3a9e32625f96e0d65b6b0c797aa20efe | 4d6753c1900bd63d707cc6a036ed85009d582d5b | /blog/apps/users/admin.py | ac96f6b9985d4d9c98b7b5b3e38ce3b72e176c03 | [] | no_license | realDNA/blog | b07cfca3c93b601780669bd965f96134ee73fc79 | dcfc54695df107457d431b9fcd4b381b7d510d21 | refs/heads/master | 2022-12-13T20:43:14.288090 | 2020-09-13T12:00:58 | 2020-09-13T12:00:58 | 293,294,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import CustomUser
class CustomUserAdmin(UserAdmin):
model = CustomUser
list_display = ["username", "email", "is_staff"]
admin.site.register(CustomUser, CustomUserAdmin)
| [
"34567092+Aaron-jpt@users.noreply.github.com"
] | 34567092+Aaron-jpt@users.noreply.github.com |
c2ee27335ec1db4df52d38e9bcdabfb39e334cc2 | 8239e45b6b031839dcd464bc80a6c8d17ed2f7b7 | /cloudarmy/contrib/conditions/environment.py | 53177af7d135ab208dc6fbe1359908ca766b4a45 | [] | no_license | geeknam/cloudarmy | 401efaee8c8e5e916ddff757edcc657698d9687f | 4363d5bdf8719a8f8bab8104c8ea7d2247d15746 | refs/heads/master | 2021-07-11T19:44:41.769661 | 2016-03-14T12:43:47 | 2016-03-14T12:43:47 | 52,852,867 | 3 | 1 | null | 2021-03-25T21:40:17 | 2016-03-01T06:11:43 | Python | UTF-8 | Python | false | false | 278 | py | from troposphere import Ref, Equals
class EnvironmentCondition(object):
conditions = {
"IsProduction": Equals(
Ref("EnvironmentType"), "production"
),
"IsStaging": Equals(
Ref("EnvironmentType"), "staging"
),
}
| [
"emoinrp@gmail.com"
] | emoinrp@gmail.com |
b8bc61b600c691bdefd5643f36e0c023f5d21088 | ba4cbf3dfde6138b9ab7ef71f23b86d02430af91 | /cetaa/about1/apps.py | 3efaaf5ae95b5671d351291dbef39715e8a857c9 | [] | no_license | soorajbuday/cetaaconnect1 | b52a2bb60c67d1a517d27fb9e2b4c44e950a1bdf | c6b1a91dfad4828780b723aac018a1551d654e66 | refs/heads/master | 2020-03-20T13:43:09.996522 | 2016-09-26T10:11:19 | 2016-09-26T10:11:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class About1Config(AppConfig):
name = 'about1'
| [
"26shame@gmail.com"
] | 26shame@gmail.com |
3fe21eecf2598ae7479b29ddce155256c9cd28be | 225543bcaa194360aa66c738a99b7ad5c291434b | /main_210610.py | 2a8f1f54bfe2a777202b6b4753473944607604b4 | [] | no_license | m0100434/zendlijsten | f0eecf12ab3fc90c1db9b5c22f1163a92dcdf6f7 | 171e1c427db71dad01408072081c85035c57a2b2 | refs/heads/main | 2023-06-19T05:04:31.619139 | 2021-07-17T07:51:46 | 2021-07-17T07:51:46 | 349,770,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,358 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 11:56:59 2021
@author: ArxXi
"""
from selenium import webdriver
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
import pickle
from datetime import date
def save_cookie(driver, path):
with open(path, 'wb') as filehandler:
pickle.dump(driver.get_cookies(), filehandler)
def load_cookie(driver, path):
with open(path, 'rb') as cookiesfile:
cookies = pickle.load(cookiesfile)
for cookie in cookies:
driver.add_cookie(cookie)
def remove_entry(index):
ourtime.pop(index-entries_deleted)
# print("time which is going to be deleted = "+ ourtime[index])
# ourtime[index] = "-"
"""
Een v
VTM v
Vier v
Canvas v
Vitaya = vtm 4 v
Q2 v
Vijf v
CAZ = vtm 3 v
Zes v
Ketnet v
La Une v
RTL-TVI v
AB3 ?
La Deux v
Club RTL v
Plug RTL ?
La Trois v
Nickelodeon FR ?
"""
def channel_identifier(anchor_link):
tmp = anchor_link.split("/")
if(tmp[4] == "een"):
return "een"
if (tmp[4] == "canvas"):
return "canvas"
if (tmp[4] == "vtm"):
return "vtm"
if (tmp[4] == "vier"):
return "vier"
if (tmp[4] == "vijf"):
return "vijf"
if (tmp[4] == "zes"):
return "zes"
if (tmp[4] == "rtl-tvi-hd"):
return "RTI TVI HD"
if (tmp[4] == "la-une"):
return "LA UNE"
if (tmp[4] == "la-deux"):
return "LA DEUX"
if (tmp[4] == "ketnet"):
return "KETNET"
if (tmp[4] == "vtm2"):
return "vtm2"
if (tmp[4] == "vtm3"):
return "vtm3"
if (tmp[4] == "club-rtl"):
return "club-rtl"
if (tmp[4] == "vtm4"):
return "vtm4"
if (tmp[4] == "caz-2"):
return "caz-2"
if (tmp[4] == "la-trois"):
return "la-trois"
return "null"
# options = FirefoxOptions()
# options.add_arguments("--headless")
# driver = webdriver.Firefox(options=options)
#0 click een, canvas,vtm, vier
#1 click vjtf
#2 click zes
#9 click la une , la deux, ketnet, la trois
#14 click
date_of_movie = ""
links_traveresed = 0
default_link = "https://www.demorgen.be/tv-gids/dag/10-06-2021"
if(len(default_link.split("/")) ==6):
date_of_movie =default_link.split("/")[5]
print("got true")
else:
date_of_movie = date.today()
date_of_movie = date_of_movie.strftime('%d/%m/%y')
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(default_link)
# driver.implicitly_wait(15)
delay = 10 # seconds
try:
myElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.ID, 'sp_message_iframe_404503')))
print("Iframe element ready")
except TimeoutException:
print("Iframe not loaded issue")
a = driver.find_element_by_tag_name("iframe")
driver.switch_to.frame(1)
print("switching to iframe done")
green_button = driver.find_element_by_xpath('//button[text()="Akkoord"]')
green_button.click()
time.sleep(10)
print("It will be on schedule website")
driver.switch_to.default_content()
#declarration
iteration = 0
ourtime = []
channel_names = []
ad_index = 82
associated_channel_name = []
production_date = []
show_title = []
current_episode = []
total_episode = []
season_number = []
myepisode_number = ""
description = []
genre = []
series_movie = []
actors = []
episode_text = " "
entries_deleted = 0
number_of_clicks = [0,1,2,6,9,14]
links = []
while (iteration != (len(number_of_clicks))):
try:
myElem = WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.XPATH, '/html/body/main/div/div/div[2]/div/div/div[1]/div[2]/button[2]')))
next_button = driver.find_element_by_xpath("/html/body/main/div/div/div[2]/div/div/div[1]/div[2]/button[2]")
for i in range(0, number_of_clicks[iteration]):
print("next button should be clicked")
next_button.click()
driver.implicitly_wait(2)
print("Next Button located")
except TimeoutException:
print("Next Button Not Located")
a = driver.find_elements_by_class_name("tvgm-channel__logo-placeholder")
#Getting channel names on current page
for i in range(0,len(a)):
ourlink = a[i].get_property("href")
distributed = ourlink.split("/")
channel = distributed[4]
channel_names.append(channel)
#time of shows
b = driver.find_elements_by_class_name("tvgm-broadcast-teaser__time")
for i in range(0,len(b)):
ourtime.append(b[i].text)
c = driver.find_elements_by_class_name("tvgm-broadcast-teaser__link")
for i in range(0,len(c)):
if((c[i].get_property("href")) not in links):
links.append(c[i].get_property("href"))
#getting link
for i in range(links_traveresed,len(links)):
tmp = links[i]
episode_text = " "
if(channel_identifier(tmp) != "null"):
associated_channel_name.append(channel_identifier(tmp))
driver.get(tmp)
#Page visited
try:
production_date.append(driver.find_element_by_class_name("tvgm-broadcast-detail__productionyear").text)
except NoSuchElementException:
print("Production Date not found")
production_date.append("-")
try:
show_title.append(driver.find_element_by_class_name("tvgm-broadcast-detail__title").text)
except NoSuchElementException:
print("Show title not found")
show_title.append("-")
try:
description.append(driver.find_element_by_class_name("tvgm-broadcast-detail__description").text)
except NoSuchElementException:
print("Description not found")
description.append("-")
try:
actors.append(driver.find_element_by_class_name("tvgm-broadcast-detail__castandcrew").text)
except NoSuchElementException:
print("Actors not found")
actors.append("-")
try:
temp = driver.find_element_by_class_name("tvgm-broadcast-detail__info-playable").text
temp = temp.split(",")
if(len(temp) == 2):
series_movie.append(temp[0])
genre.append(temp[1])
print("This got executed (Genre)")
if (len(temp) == 1):
series_movie.append(temp[0])
genre.append("-")
except NoSuchElementException:
print("Series/Movie not found")
series_movie.append("-")
genre.append("-")
try:
driver.find_element_by_class_name("tvgm-broadcast-detail__episode-numbers")
myepisode_number = driver.find_element_by_class_name("tvgm-broadcast-detail__episode-numbers").text
tmp = myepisode_number.split(" ")
season_number.append(tmp[1])
#changing done
if(len(tmp)>2):
combined_episode_number = tmp[3].split("/")
if(len(combined_episode_number) ==2):
current_episode.append(combined_episode_number[0])
total_episode.append(combined_episode_number[1])
print("This got executed (Episodes)")
if (len(combined_episode_number) == 1):
current_episode.append(combined_episode_number[0])
total_episode.append("-")
else:
#if both not available
total_episode.append("-")
current_episode.append("-")
print("Epsisode starting and ending exist ")
except NoSuchElementException:
print("Starting ending Episode not exist")
season_number.append("-")
current_episode.append("-")
total_episode.append("-")
#tester
#break
else:
#not interested in this channel
remove_entry(i)
entries_deleted = entries_deleted +1
print("****** ENTRY SKIPPED ********")
links_traveresed = len(links)
#tester
# if(i == ad_index):
# break
driver.get(default_link)
iteration = iteration+1
driver.close()
# print("Starting time = " + ourtime[ad_index])
# print("Actors = " + actors[ad_index])
# print("Associated Channel Name = " + associated_channel_name[ad_index])
# print("Production Date = " + production_date[ad_index])
# print("Show title = " + show_title[ad_index])
# print("Current Episode = " + current_episode[ad_index])
# print("Total Episode = " + total_episode[ad_index])
# print("Genre = " + genre[ad_index])
# print("Series_Movie = " + series_movie[ad_index])
# print("Season Number = " + season_number[ad_index])
# for i in range(0,len(ourtime)):
# if(ourtime[i] == "-"):
# del(ourtime[i])
print(ourtime)
print(actors)
print(associated_channel_name)
print(production_date)
print(show_title)
print(current_episode)
print(total_episode)
print(genre)
print(series_movie)
print(season_number)
print(len(ourtime))
print(len(actors))
print(len(associated_channel_name))
print(len(production_date))
print(len(show_title))
print(len(current_episode))
print(len(total_episode))
print(len(genre))
print(len(series_movie))
print(len(season_number))
import csv
with open('channel_data_210610.csv', mode='w',newline='') as employee_file:
employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(0,len(ourtime)):
if(i==0):
employee_writer.writerow(["Date of Movie","Starting Time","Actors","Channel Name","Production Date","Title of Show","Current Episode","Total Episodes","Genre","Series/Movie","Season Number"])
employee_writer.writerow([date_of_movie,ourtime[i],actors[i],associated_channel_name[i],production_date[i],show_title[i],current_episode[i],total_episode[i],genre[i],series_movie[i],season_number[i]])
| [
"m.verschuere@ailvis.com"
] | m.verschuere@ailvis.com |
95907f7c9ac9ff8ba364dcae91b64148eeed71a5 | 53649e3ecb7023935d612a37ecf5ad45568bbb8d | /Aplikace_1_0/Source/ewitis/gui/DEF_COLUMN.py | e47296468af7ab7e9831c98858c5e460564ed47d | [] | no_license | liuqingchn/ew_aplikace | 157fbc7e0564b29ffe4035724c63d8fc3861512f | efaea537385f9fa90e7f4b4bec430a842c9f7ef6 | refs/heads/master | 2021-01-13T07:20:08.738298 | 2016-04-26T18:54:51 | 2016-04-26T18:54:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,556 | py | # -*- coding: utf-8 -*-
'''
Created on 27.12.2011
@author: Meloun
'''
""" WIDTHS """
WIDTH_NUMBER_4DIGIT = 40
WIDTH_NUMBER_3DIGIT = 35
"""
RUNS
"""
RUNS = {}
""" table collumns """
RUNS['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write": False},
#"date" : {"index": 1, "name": "date", "default": "0.0. 2000 00:00:00", "width": 70, "write": True},
#"description" : {"index": 2, "name": "description", "default": "", "width": 10, "write": True}
}
"""
TIMES
"""
TIMES = {}
""" table collumn for times, mode race """
TIMES['table'] = {
"id" : {"index": 0, "name": "id", "name_cz": u"id", "type":"number", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"nr" : {"index": 1, "name": "nr", "name_cz": u"Číslo", "type":"number", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"cell" : {"index": 2, "name": "cell", "name_cz": u"Buňka", "default": 250, "width": 35, "write":True },
"status" : {"index": 3, "name": "status", "name_cz": u"Status", "default": "race", "width": 60, "write":True },
"time1" : {"index": 4, "name": "time1", "name_cz": u"Čas1", "default": "", "width": 80, "write":False },
"lap1" : {"index": 5, "name": "lap1", "name_cz": u"Okruhy1", "default": "", "width": 50, "write":False },
"time2" : {"index": 6, "name": "time2", "name_cz": u"Čas2", "default": "", "width": 80, "write":False },
"lap2" : {"index": 7, "name": "lap2", "name_cz": u"Okruhy2", "default": "", "width": 50, "write":False },
"time3" : {"index": 8, "name": "time3", "name_cz": u"Čas3", "default": "", "width": 80, "write":False },
"lap3" : {"index": 9, "name": "lap3", "name_cz": u"Okruhy3", "default": "", "width": 50, "write":False },
"time4" : {"index": 10, "name": "time4", "name_cz": u"Čas4", "default": "", "width": 80, "write":False },
"lap4" : {"index": 11, "name": "lap4", "name_cz": u"Okruhy4", "default": "", "width": 50, "write":False },
"name" : {"index": 12, "name": "name", "name_cz": u"Jméno", "default": "unknow", "width": 150, "write":False },
"category" : {"index": 13, "name": "category", "name_cz": u"Kategorie", "default": "unknown", "width": 100, "write":False },
"order1" : {"index": 14, "name": "order1", "name_cz": u"Pořadí1", "type":"number", "default": "", "width": 60, "write":False },
"order2" : {"index": 15, "name": "order2", "name_cz": u"Pořadí2", "type":"number", "default": "", "width": 60, "write":False },
"order3" : {"index": 16, "name": "order3", "name_cz": u"Pořadí3", "type":"number", "default": "", "width": 60, "write":False },
"start_nr" : {"index": 17, "name": "start", "name_cz": u"Start", "default": 1, "width": 50, "write":False },
"points1" : {"index": 18, "name": "points1", "name_cz": u"Body", "type":"number", "default": "", "width": 60, "write":False },
"points2" : {"index": 19, "name": "points2", "name_cz": u"Body", "type":"number", "default": "", "width": 60, "write":False },
"points3" : {"index": 20, "name": "points3", "name_cz": u"Body", "type":"number", "default": "", "width": 60, "write":False },
"points4" : {"index": 21, "name": "points4", "name_cz": u"Body", "type":"number", "default": "", "width": 60, "write":False },
"points5" : {"index": 22, "name": "points5", "name_cz": u"Body", "type":"number", "default": "", "width": 60, "write":False },
"un1" : {"index": 23, "name": "un1", "name_cz": u"un1", "default": "", "width": WIDTH_NUMBER_3DIGIT, "write":True },
"un2" : {"index": 24, "name": "un2", "name_cz": u"un2", "default": "", "width": WIDTH_NUMBER_3DIGIT, "write":True },
"un3" : {"index": 25, "name": "un3", "name_cz": u"un3", "default": "", "width": WIDTH_NUMBER_3DIGIT, "write":True },
"us1" : {"index": 26, "name": "us1", "name_cz": u"us1", "default": "", "width": 80, "write":True },
#!! nedavat 'time_raw' => stejne jmeno s tabulkou a kreje se
"timeraw" : {"index": 27, "name": "timeraw", "name_cz": u"Čas Raw", "default": 161, "width": 100, "write":True },
}
"""
USERS
"""
USERS = {}
""" table collumns """
USERS['table'] = { "id" : {"index": 0, "name": "id", "name_cz": u"id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"nr" : {"index": 1, "name": "nr", "name_cz": u"Číslo", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"status" : {"index": 2, "name": "status", "name_cz": u"Status", "default": "race", "width": WIDTH_NUMBER_4DIGIT, "write":True },
"name" : {"index": 3, "name": "name", "name_cz": u"Jméno", "default": "unknown", "width": 100, "write":True },
"first_name" : {"index": 4, "name": "first_name", "name_cz": u"Nevím", "default": "unknown", "width": 100, "write":True },
"category" : {"index": 5, "name": "category", "name_cz": u"Kategorie", "default": "unknown", "width": 100, "write":True },
"club" : {"index": 6, "name": "club", "name_cz": u"Klub", "default": "", "width": 200, "write":True },
"year" : {"index": 7, "name": "year", "name_cz":u"Ročník", "default": "", "width": 70, "write":True },
"sex" : {"index": 8, "name": "sex", "name_cz":u"Pohlaví", "default": "", "width": None, "write":True },
"email" : {"index": 9, "name": "email", "name_cz": u"Email", "default": "", "width": None, "write":True },
"symbol" : {"index": 10, "name": "symbol", "name_cz": u"Nevím", "default": "", "width": None, "write":True },
"paid" : {"index": 11, "name": "paid", "name_cz": u"Nevím", "default": "", "width": None, "write":True },
"note" : {"index": 12, "name": "note", "name_cz": u"Nevím", "default": "", "width": None, "write":True },
"o1" : {"index": 13, "name": "o1", "name_cz":u"#1", "default": "", "width": None, "write":True },
"o2" : {"index": 14, "name": "o2", "name_cz":u"#2", "default": "", "width": None, "write":True },
"o3" : {"index": 15, "name": "o3", "name_cz":u"#3", "default": "", "width": None, "write":True },
"o4" : {"index": 16, "name": "o4", "name_cz":u"#4", "default": "", "width": 10, "write":True },
}
"""
CATEGORIES
"""
CATEGORIES = {}
""" table collumns """
CATEGORIES['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"name" : {"index": 1, "name": "name", "default": "unknown", "width": 200, "write":True },
"description" : {"index": 2, "name": "description", "default": "", "width": 350, "write":True },
"start_nr" : {"index": 3, "name": "start_nr", "default": 1, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g1" : {"index": 4, "name": "g1", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g2" : {"index": 5, "name": "g2", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g3" : {"index": 6, "name": "g3", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g4" : {"index": 7, "name": "g4", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g5" : {"index": 8, "name": "g5", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g6" : {"index": 9, "name": "g6", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g7" : {"index": 10, "name": "g7", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g8" : {"index": 11, "name": "g8", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g9" : {"index": 12, "name": "g9", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
"g10" : {"index": 13, "name": "g10", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True },
#"#" : {"index": 14, "name": "#", "width":0},
}
"""
CATEGORY GROUPS
"""
CGROUPS = {}
""" table collumns """
CGROUPS['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"label" : {"index": 1, "name": "label", "default": "gx", "width": 300, "write":True },
"name" : {"index": 2, "name": "name", "default": "", "width": 300, "write":True },
"description" : {"index": 3, "name": "description", "default": "", "width": 300, "write":True },
}
"""
TAGS
"""
TAGS = {}
""" table collumns """
TAGS['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"tag_id" : {"index": 1, "name": "tag_id", "default": 0, "width": 160, "write":True },
"printed_nr" : {"index": 2, "name": "printed_nr", "default": 0, "width": 80, "write":True },
"user_nr" : {"index": 3, "name": "user_nr", "default": 0, "width": 80, "write":True },
#"#1" : {"index": 4, "name": "", "width":80},
}
"""
ALLTAGS
"""
ALLTAGS = {}
""" database columns """
ALLTAGS['database'] = {
"id" : {"index": 0, "name": "id", "default": 0},
"tag_id" : {"index": 1, "name": "tag_id", "default": 0},
"printed_nr" : {"index": 2, "name": "printed_nr", "default": 0},
"description" : {"index": 3, "name": "description", "default": ""}
}
""" table collumns """
ALLTAGS['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False },
"tag_id" : {"index": 1, "name": "tag_id", "default": 0, "width": 160, "write":True },
"printed_nr" : {"index": 2, "name": "printed_nr", "default": 0, "width": 100, "write":True },
"description" : {"index": 3, "name": "description", "default": "", "width": 300, "write":True }
}
"""
POINTS
"""
POINTS = {}
""" table collumns """
POINTS['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"order_" : {"index": 1, "name": "order", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True},
"points" : {"index": 2, "name": "points", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":True},
"description" : {"index": 3, "name": "description", "default": "", "width": 160, "write":True},
}
"""
RACE INFO
"""
RACEINFO = {}
""" table collumns """
RACEINFO['table'] = {
"id" : {"index": 0, "name": "id", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"name" : {"index": 1, "name": "id", "default": "unknown", "width": 300, "write":False},
"startlist" : {"index": 2, "name": "startlist", "default": 0, "width": 2*WIDTH_NUMBER_4DIGIT, "write":False},
"dns" : {"index": 3, "name": "dns" , "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"finished" : {"index": 4, "name": "finished", "default": 0, "width": 2*WIDTH_NUMBER_4DIGIT, "write":False},
"dnf" : {"index": 5, "name": "dnf", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"dq" : {"index": 6, "name": "dq", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"race" : {"index": 7, "name": "race", "default": 0, "width": 2*WIDTH_NUMBER_4DIGIT, "write":False},
"check" : {"index": 8, "name": "check", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
"-" : {"index": 9, "name": "-", "default": 0, "width": WIDTH_NUMBER_4DIGIT, "write":False},
}
| [
"lubos.melichar@gmail.com"
] | lubos.melichar@gmail.com |
e96c4e354274771e2cc5a75656aabed1495eac89 | 68e2d732dba6fe560e17008d8db1a1dac14a9de2 | /images.py | 33dd614da568844a3bfd7c32045d4ed71b4a3f33 | [] | no_license | MariaGlushikhina/Tormc19 | cd7a5a24ce4d964f09cb41c2636ccc644aad9536 | a9e02a6f2e88dc022d70e2805d838f1722419e0e | refs/heads/master | 2020-08-14T00:42:18.259172 | 2020-03-05T16:39:21 | 2020-03-05T16:39:21 | 215,065,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,707 | py | #!/home/maria/anaconda2/bin/python
import matplotlib as plb
import numpy as np
import pandas as pd
import os
from mpl_toolkits.mplot3d import axes3d
from matplotlib import pyplot as plt
from matplotlib import cm
from radmc3dPy.image import * # Make sure that the shell variable PYTHONPATH points to the RADMC-3D python directory
from radmc3dPy.analyze import * # Make sure that the shell variable PYTHONPATH points to the RADMC-3D python directory
from radmc3dPy import *
pc = 3.08572e18
#dust density contours
fig1=plb.figure()
q = analyze.readGrid(sgrid = True)
xx = q.x[:]
yy = q.y[:]
zz = q.z[:]
#qq = np.meshgrid(xx, yy, zz, indexing='ij')
#xc = qq[0]
#yc = qq[1]
#zc = qq[2]
#x1 = xc[:,:,0]
#y1 = yc[:,:,0]
#z1 = zc[:,:,0]
#z2 = np.pi/2 - y1
data = analyze.readData(ddens=True, binary=False)
v = data.rhodust[0,:,:,0]
c = plb.contourf(zz/natconst.pc, xx/natconst.pc, (data.rhodust[0,:,:,0]), 290)
#plb.xlabel('r [au]')
#plb.axis([-500000, 500000, -250000, 250000])
#plb.ylabel(r' [au]')
#plb.xscale('log')
#plb.yscale('log')
cb = plb.colorbar(c)
cb.set_label(r'$\log_{10}{\rho}$', rotation=270.)
# View a 2-D slice of the 3-D array of the setup
#
#fig2 = plt.figure()
#q = analyze.readGrid(sgrid = True)
#v = analyze.readData(ddens=True, binary=False)
#ax = fig2.add_subplot(111, projection='3d')
#ax.plot_wireframe((y1), (x1), (v), rstride=1, cstride=1)
#surf = ax.plot_surface((y1), (x1) , (v), cmap=cm.coolwarm,linewidth=0, antialiased=False)# change from data1 to zz
#fig2.colorbar(surf, shrink=0.5, aspect=5)
# Make and plot an example image
#
#fig2 = plt.figure()
#makeImage(npix=200,incl=60.,phi=30.,wav=30.,sizeau=300) # This calls radmc3d
#a=readImage()
#plotImage(a,log=True,au=True,maxlog=6,cmap='hot')
# dust temperature contours
#opac = analyze.readOpac(ext=['silicate'])
#fig2=plb.figure()
#data = analyze.readData(dtemp=True)
#c= plb.contourf(data.grid.x/natconst.pc, data.grid.y/natconst.pc, (data.dusttemp[:,:,0,0].T), 298)
#plb.xlabel('r [pc]')
#plb.ylabel(r'$\pi$')
#plb.xscale('log')
#plb.yscale('log')
#plb.axis([-2.5, 2.5, -2, 4])
#cb = plb.colorbar(c)
#cb.set_label('T [K]', rotation=270.)
#c= plb.contour(data.grid.x/natconst.pc, np.pi/2.-data.grid.y/natconst.pc, (data.dusttemp[:,:,0,0].T), 100, colors='k', linestyles='solid')
#plb.clabel(c, inline=1, fontsize=10)
#Plotting it "by hand", the SED as seen at 1 pc distance
#
#fig2 = plt.figure()
#s = readSpectrum()
#lam = s[:,0]
#nu = 1e4*cc/lam
#fnu = s[:,1]
#nufnu = nu*fnu
#plt.plot(lam,nufnu)
#plt.xscale('log')
#plt.yscale('log')
#plt.axis([1e-3, 1e6, 1e+1, 5e17])
#plt.xlabel('$\lambda\; [\mu \mathrm{m}$]')
#plt.ylabel('$\\nu F_\\nu \; [\mathrm{erg}\,\mathrm{cm}^{-2}\,\mathrm{s}^{-1}]$')
# Use the radmc3dPy.analyze tool set for plotting the SED,
# this time let's plot nuLnu in units of Lsun
#
#fig3 = plt.figure()
#plotSpectrum(s,nulnu=True,lsun=True,xlg=True,ylg=False,micron=True)
#plotSpectrum(s,nulnu=True,lsun=True,xlg=True,ylg=True,kev=True)
#plt.axis([1e-8,1e+2,1e+6,3.0e16])
#plot of GIVEN spectrum
#fig4 = plt.figure()
#table = pd.read_csv('inputSpectrum.inp')
#x = table.values[:,0]
#y = table.values[:,1]
#plt.figure(figsize=(15, 7))
#plt.plot(x,y)
#plt.xscale('log')
#plt.yscale('log')
#plt.axis([1e-3, 1e6, 1e+39, 5e+45])
#plt.xlabel('$\lambda')
#plt.ylabel('$\\ \lambda L_\\lambda')
#plot L_lambda dlambda/ L_bol
#fig5 = plt.figure()
#table1 = pd.read_csv('inputSpectrum1.inp')
#x = table1.values[:,0]
#y = table1.values[:,1]
#L_l = y*4*3.1415*(0.001*pc)**2
#L_bol = 10**42
#delta_L = L_l/L_bol
#plt.plot(x,y)
#plt.xscale('log')
#plt.yscale('linear')
#plt.axis([1e-4, 1e6, 9.0e+34, 15.1e+35])
#plt.xlabel('$\lambda')
#plt.ylabel('$\\ L_\\nu')
plb.show()
| [
"noreply@github.com"
] | noreply@github.com |
df5640e1150c5a12ee6d2bf2c276be49536fdbfa | c338b6edd016ce737ada4f46b36eb31d40e44ff8 | /python/gitosis/serve.py | 1dea1853ddea71c29cde79c055f91d328ec89c6b | [] | no_license | HankCoder/workspace_hank | 11636d8a76616ae4a7e7bddae73aa73b477128d4 | 3d4bf554ce84bd5fb29480d1767e9ee73a23c3cb | refs/heads/master | 2020-12-25T15:17:34.965284 | 2016-11-24T02:54:41 | 2016-11-24T02:54:41 | 66,057,839 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,112 | py | """
Enforce git-shell to only serve allowed by access control policy.
directory. The client should refer to them without any extra directory
prefix. Repository names are forced to match ALLOW_RE.
"""
import logging
import sys, os, re
from gitosis import access
from gitosis import repository
from gitosis import gitweb
from gitosis import gitdaemon
from gitosis import app
from gitosis import util
from gitosis import snagit
log = logging.getLogger('gitosis.serve')
ALLOW_RE = re.compile("^'/*(?P<path>[a-zA-Z0-9][a-zA-Z0-9@._-]*(/[a-zA-Z0-9][a-zA-Z0-9@._-]*)*)'$")
COMMANDS_READONLY = [
'git-upload-pack',
'git upload-pack',
]
COMMANDS_WRITE = [
'git-receive-pack',
'git receive-pack',
]
class ServingError(Exception):
"""Serving error"""
def __str__(self):
return '%s' % self.__doc__
class CommandMayNotContainNewlineError(ServingError):
"""Command may not contain newline"""
class UnknownCommandError(ServingError):
"""Unknown command denied"""
class UnsafeArgumentsError(ServingError):
"""Arguments to command look dangerous"""
class AccessDenied(ServingError):
"""Access denied to repository"""
class WriteAccessDenied(AccessDenied):
"""Repository write access denied"""
class ReadAccessDenied(AccessDenied):
"""Repository read access denied"""
def serve(
cfg,
user,
command,
):
if '\n' in command:
raise CommandMayNotContainNewlineError()
try:
verb, args = command.split(None, 1)
except ValueError:
# all known "git-foo" commands take one argument; improve
# if/when needed
raise UnknownCommandError()
if verb == 'git':
try:
subverb, args = args.split(None, 1)
except ValueError:
# all known "git foo" commands take one argument; improve
# if/when needed
raise UnknownCommandError()
verb = '%s %s' % (verb, subverb)
if (verb not in COMMANDS_WRITE
and verb not in COMMANDS_READONLY):
raise UnknownCommandError()
match = ALLOW_RE.match(args)
if match is None:
raise UnsafeArgumentsError()
path = match.group('path')
# write access is always sufficient
newpath = access.haveAccess(
config=cfg,
user=user,
mode='writable',
path=path)
if newpath is None:
# didn't have write access; try once more with the popular
# misspelling
newpath = access.haveAccess(
config=cfg,
user=user,
mode='writeable',
path=path)
if newpath is not None:
log.warning(
'Repository %r config has typo "writeable", '
+'should be "writable"',
path,
)
if newpath is None:
# didn't have write access
newpath = access.haveAccess(
config=cfg,
user=user,
mode='readonly',
path=path)
if newpath is None:
raise ReadAccessDenied()
if verb in COMMANDS_WRITE:
# didn't have write access and tried to write
raise WriteAccessDenied()
(topdir, relpath) = newpath
assert not relpath.endswith('.git'), \
'git extension should have been stripped: %r' % relpath
repopath = '%s.git' % relpath
fullpath = os.path.join(topdir, repopath)
if (not os.path.exists(fullpath)
and verb in COMMANDS_WRITE):
# it doesn't exist on the filesystem, but the configuration
# refers to it, we're serving a write request, and the user is
# authorized to do that: create the repository on the fly
# create leading directories
p = topdir
for segment in repopath.split(os.sep)[:-1]:
p = os.path.join(p, segment)
util.mkdir(p, 0750)
repository.init(path=fullpath)
gitweb.set_descriptions(
config=cfg,
)
generated = util.getGeneratedFilesDir(config=cfg)
gitweb.generate_project_list(
config=cfg,
path=os.path.join(generated, 'projects.list'),
)
gitdaemon.set_export_ok(
config=cfg,
)
# put the verb back together with the new path
newcmd = "%(verb)s '%(path)s'" % dict(
verb=verb,
path=fullpath,
)
return newcmd
class Main(app.App):
def create_parser(self):
parser = super(Main, self).create_parser()
parser.set_usage('%prog [OPTS] USER')
parser.set_description(
'Allow restricted git operations under DIR')
return parser
def handle_args(self, parser, cfg, options, args):
try:
(user,) = args
except ValueError:
parser.error('Missing argument USER.')
main_log = logging.getLogger('gitosis.serve.main')
os.umask(0022)
cmd = os.environ.get('SSH_ORIGINAL_COMMAND', None)
if cmd is None:
main_log.error('Need SSH_ORIGINAL_COMMAND in environment.')
sys.exit(1)
main_log.debug('Got command %(cmd)r' % dict(
cmd=cmd,
))
os.chdir(os.path.expanduser('~'))
if (cmd == "snagit list-repos"):
try:
snagit.list_repos(cfg, user, cmd)
sys.exit(0)
except Exception, e:
main_log.error('%s', e)
sys.exit(1)
try:
newcmd = serve(
cfg=cfg,
user=user,
command=cmd,
)
except ServingError, e:
main_log.error('%s', e)
sys.exit(1)
main_log.debug('Serving %s', newcmd)
os.execvp('git', ['git', 'shell', '-c', newcmd])
main_log.error('Cannot execute git-shell.')
sys.exit(1) | [
"hank199025@gmail.com"
] | hank199025@gmail.com |
dcf8cbefa814888cbcccbb0828952ebf898bce4b | bb852f14d2b1a64ef360e3d31a7db6dcd9e5fba0 | /static/src/document_finder.py | 37f473e391f96dce7f578dac47885affa6949248 | [] | no_license | Damianus04/mlapplications-v3 | 58f97fdd07d44dc690d04bd3e4d7f77870b3adc6 | 4d4f60772f89fa1abd0e9ba266f1207be05bd53b | refs/heads/main | 2023-04-28T15:14:59.571233 | 2021-05-08T16:25:12 | 2021-05-08T16:25:12 | 365,561,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,316 | py | import pandas as pd
from sklearn.metrics.pairwise import cosine_distances
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
def train_tfidf(filepath, col_name, stopwords=None):
df = pd.read_csv(filepath, encoding='iso-8859-1')
tfidf = TfidfVectorizer(ngram_range=(1, 2), stop_words=stopwords)
tfidf_matrix = tfidf.fit_transform(df[col_name].values.astype('U'))
return tfidf, tfidf_matrix
def train_bow(filepath, col_name):
df = pd.read_csv(filepath, encoding='iso-8859-1')
bow = CountVectorizer().fit(df[col_name])
return bow
def document_prediction(query, filepath, col_name, tfidf, tfidf_matrix):
df = pd.read_csv(filepath, encoding='iso-8859-1')
vec = tfidf.transform([query])
dist = cosine_distances(vec, tfidf_matrix)
result_series = dist.argsort()[0, :20]
result_list = result_series.tolist()
result = df[col_name][result_list]
document_list = result.tolist()
return document_list
STOPWORDS = [
'ada', 'adalah', 'adanya', 'adapun',
'agak',
'agaknya',
'agar',
'akan',
'akankah',
'akhir',
'akhiri',
'akhirnya',
'aku',
'akulah',
'amat',
'amatlah',
'anda',
'andalah',
'antar',
'antara',
'antaranya',
'apa',
'apaan',
'apabila',
'apakah',
'apalagi',
'apatah',
'artinya',
'asal',
'asalkan',
'atas',
'atau',
'ataukah',
'ataupun',
'awal',
'awalnya',
'bagai',
'bagaikan',
'bagaimana',
'bagaimanakah',
'bagaimanapun',
'bagi',
'bagian',
'bahkan',
'bahwa',
'bahwasanya',
'baik',
'bakal',
'bakalan',
'balik',
'banyak',
'bapak',
'baru',
'bawah',
'beberapa',
'begini',
'beginian',
'beginikah',
'beginilah',
'begitu',
'begitukah',
'begitulah',
'begitupun',
'bekerja',
'belakang',
'belakangan',
'belum',
'belumlah',
'benar',
'benarkah',
'benarlah',
'berada',
'berakhir',
'berakhirlah',
'berakhirnya',
'berapa',
'berapakah',
'berapalah',
'berapapun',
'berarti',
'berawal',
'berbagai',
'berdatangan',
'beri',
'berikan',
'berikut',
'berikutnya',
'berjumlah',
'berkali-kali',
'berkata',
'berkehendak',
'berkeinginan',
'berkenaan',
'berlainan',
'berlalu',
'berlangsung',
'berlebihan',
'bermacam',
'bermacam-macam',
'bermaksud',
'bermula',
'bersama',
'bersama-sama',
'bersiap',
'bersiap-siap',
'bertanya',
'bertanya-tanya',
'berturut',
'berturut-turut',
'bertutur',
'berujar',
'berupa',
'besar',
'betul',
'betulkah',
'biasa',
'biasanya',
'bila',
'bilakah',
'bisa',
'bisakah',
'boleh',
'bolehkah',
'bolehlah',
'buat',
'bukan',
'bukankah',
'bukanlah',
'bukannya',
'bulan',
'bung',
'cara',
'caranya',
'cukup',
'cukupkah',
'cukuplah',
'cuma',
'dahulu',
'dalam',
'dan',
'dapat',
'dari',
'daripada',
'datang',
'dekat',
'demi',
'demikian',
'demikianlah',
'dengan',
'depan',
'di',
'dia',
'diakhiri',
'diakhirinya',
'dialah',
'diantara',
'diantaranya',
'diberi',
'diberikan',
'diberikannya',
'dibuat',
'dibuatnya',
'didapat',
'didatangkan',
'digunakan',
'diibaratkan',
'diibaratkannya',
'diingat',
'diingatkan',
'diinginkan',
'dijawab',
'dijelaskan',
'dijelaskannya',
'dikarenakan',
'dikatakan',
'dikatakannya',
'dikerjakan',
'diketahui',
'diketahuinya',
'dikira',
'dilakukan',
'dilalui',
'dilihat',
'dimaksud',
'dimaksudkan',
'dimaksudkannya',
'dimaksudnya',
'diminta',
'dimintai',
'dimisalkan',
'dimulai',
'dimulailah',
'dimulainya',
'dimungkinkan',
'dini',
'dipastikan',
'diperbuat',
'diperbuatnya',
'dipergunakan',
'diperkirakan',
'diperlihatkan',
'diperlukan',
'diperlukannya',
'dipersoalkan',
'dipertanyakan',
'dipunyai',
'diri',
'dirinya',
'disampaikan',
'disebut',
'disebutkan',
'disebutkannya',
'disini',
'disinilah',
'ditambahkan',
'ditandaskan',
'ditanya',
'ditanyai',
'ditanyakan',
'ditegaskan',
'ditujukan',
'ditunjuk',
'ditunjuki',
'ditunjukkan',
'ditunjukkannya',
'ditunjuknya',
'dituturkan',
'dituturkannya',
'diucapkan',
'diucapkannya',
'diungkapkan',
'dong',
'dua',
'dulu',
'empat',
'enggak',
'enggaknya',
'entah',
'entahlah',
'guna',
'gunakan',
'hal',
'hampir',
'hanya',
'hanyalah',
'hari',
'harus',
'haruslah',
'harusnya',
'hendak',
'hendaklah',
'hendaknya',
'hingga',
'ia',
'ialah',
'ibarat',
'ibaratkan',
'ibaratnya',
'ibu',
'ikut',
'ingat',
'ingat-ingat',
'ingin',
'inginkah',
'inginkan',
'ini',
'inikah',
'inilah',
'itu',
'itukah',
'itulah',
'jadi',
'jadilah',
'jadinya',
'jangan',
'jangankan',
'janganlah',
'jauh',
'jawab',
'jawaban',
'jawabnya',
'jelas',
'jelaskan',
'jelaslah',
'jelasnya',
'jika',
'jikalau',
'juga',
'jumlah',
'jumlahnya',
'justru',
'kala',
'kalau',
'kalaulah',
'kalaupun',
'kalian',
'kami',
'kamilah',
'kamu',
'kamulah',
'kan',
'kapan',
'kapankah',
'kapanpun',
'karena',
'karenanya',
'kasus',
'kata',
'katakan',
'katakanlah',
'katanya',
'ke',
'keadaan',
'kebetulan',
'kecil',
'kedua',
'keduanya',
'keinginan',
'kelamaan',
'kelihatan',
'kelihatannya',
'kelima',
'keluar',
'kembali',
'kemudian',
'kemungkinan',
'kemungkinannya',
'kenapa',
'kepada',
'kepadanya',
'kesampaian',
'keseluruhan',
'keseluruhannya',
'keterlaluan',
'ketika',
'khususnya',
'kini',
'kinilah',
'kira',
'kira-kira',
'kiranya',
'kita',
'kitalah',
'kok',
'kurang',
'lagi',
'lagian',
'lah',
'lain',
'lainnya',
'lalu',
'lama',
'lamanya',
'lanjut',
'lanjutnya',
'lebih',
'lewat',
'lima',
'luar',
'macam',
'maka',
'makanya',
'makin',
'malah',
'malahan',
'mampu',
'mampukah',
'mana',
'manakala',
'manalagi',
'masa',
'masalah',
'masalahnya',
'masih',
'masihkah',
'masing',
'masing-masing',
'mau',
'maupun',
'melainkan',
'melakukan',
'melalui',
'melihat',
'melihatnya',
'memang',
'memastikan',
'memberi',
'memberikan',
'membuat',
'memerlukan',
'memihak',
'meminta',
'memintakan',
'memisalkan',
'memperbuat',
'mempergunakan',
'memperkirakan',
'memperlihatkan',
'mempersiapkan',
'mempersoalkan',
'mempertanyakan',
'mempunyai',
'memulai',
'memungkinkan',
'menaiki',
'menambahkan',
'menandaskan',
'menanti',
'menanti-nanti',
'menantikan',
'menanya',
'menanyai',
'menanyakan',
'mendapat',
'mendapatkan',
'mendatang',
'mendatangi',
'mendatangkan',
'menegaskan',
'mengakhiri',
'mengapa',
'mengatakan',
'mengatakannya',
'mengenai',
'mengerjakan',
'mengetahui',
'menggunakan',
'menghendaki',
'mengibaratkan',
'mengibaratkannya',
'mengingat',
'mengingatkan',
'menginginkan',
'mengira',
'mengucapkan',
'mengucapkannya',
'mengungkapkan',
'menjadi',
'menjawab',
'menjelaskan',
'menuju',
'menunjuk',
'menunjuki',
'menunjukkan',
'menunjuknya',
'menurut',
'menuturkan',
'menyampaikan',
'menyangkut',
'menyatakan',
'menyebutkan',
'menyeluruh',
'menyiapkan',
'merasa',
'mereka',
'merekalah',
'merupakan',
'meski',
'meskipun',
'meyakini',
'meyakinkan',
'minta',
'mirip',
'misal',
'misalkan',
'misalnya',
'mula',
'mulai',
'mulailah',
'mulanya',
'mungkin',
'mungkinkah',
'nah',
'naik',
'namun',
'nanti',
'nantinya',
'nyaris',
'nyatanya',
'oleh',
'olehnya',
'pada',
'padahal',
'padanya',
'pak',
'paling',
'panjang',
'pantas',
'para',
'pasti',
'pastilah',
'penting',
'pentingnya',
'per',
'percuma',
'perlu',
'perlukah',
'perlunya',
'pernah',
'persoalan',
'pertama',
'pertama-tama',
'pertanyaan',
'pertanyakan',
'pihak',
'pihaknya',
'pukul',
'pula',
'pun',
'punya',
'rasa',
'rasanya',
'rata',
'rupanya',
'saat',
'saatnya',
'saja',
'sajalah',
'saling',
'sama',
'sama-sama',
'sambil',
'sampai',
'sampai-sampai',
'sampaikan',
'sana',
'sangat',
'sangatlah',
'satu',
'saya',
'sayalah',
'se',
'sebab',
'sebabnya',
'sebagai',
'sebagaimana',
'sebagainya',
'sebagian',
'sebaik',
'sebaik-baiknya',
'sebaiknya',
'sebaliknya',
'sebanyak',
'sebegini',
'sebegitu',
'sebelum',
'sebelumnya',
'sebenarnya',
'seberapa',
'sebesar',
'sebetulnya',
'sebisanya',
'sebuah',
'sebut',
'sebutlah',
'sebutnya',
'secara',
'secukupnya',
'sedang',
'sedangkan',
'sedemikian',
'sedikit',
'sedikitnya',
'seenaknya',
'segala',
'segalanya',
'segera',
'seharusnya',
'sehingga',
'seingat',
'sejak',
'sejauh',
'sejenak',
'sejumlah',
'sekadar',
'sekadarnya',
'sekali',
'sekali-kali',
'sekalian',
'sekaligus',
'sekalipun',
'sekarang',
'sekarang',
'sekecil',
'seketika',
'sekiranya',
'sekitar',
'sekitarnya',
'sekurang-kurangnya',
'sekurangnya',
'sela',
'selain',
'selaku',
'selalu',
'selama',
'selama-lamanya',
'selamanya',
'selanjutnya',
'seluruh',
'seluruhnya',
'semacam',
'semakin',
'semampu',
'semampunya',
'semasa',
'semasih',
'semata',
'semata-mata',
'semaunya',
'sementara',
'semisal',
'semisalnya',
'sempat',
'semua',
'semuanya',
'semula',
'sendiri',
'sendirian',
'sendirinya',
'seolah',
'seolah-olah',
'seorang',
'sepanjang',
'sepantasnya',
'sepantasnyalah',
'seperlunya',
'seperti',
'sepertinya',
'sepihak',
'sering',
'seringnya',
'serta',
'serupa',
'sesaat',
'sesama',
'sesampai',
'sesegera',
'sesekali',
'seseorang',
'sesuatu',
'sesuatunya',
'sesudah',
'sesudahnya',
'setelah',
'setempat',
'setengah',
'seterusnya',
'setiap',
'setiba',
'setibanya',
'setidak-tidaknya',
'setidaknya',
'setinggi',
'seusai',
'sewaktu',
'siap',
'siapa',
'siapakah',
'siapapun',
'sini',
'sinilah',
'soal',
'soalnya',
'suatu',
'sudah',
'sudahkah',
'sudahlah',
'supaya',
'tadi',
'tadinya',
'tahu',
'tahun',
'tak',
'tambah',
'tambahnya',
'tampak',
'tampaknya',
'tandas',
'tandasnya',
'tanpa',
'tanya',
'tanyakan',
'tanyanya',
'tapi',
'tegas',
'tegasnya',
'telah',
'tempat',
'tengah',
'tentang',
'tentu',
'tentulah',
'tentunya',
'tepat',
'terakhir',
'terasa',
'terbanyak',
'terdahulu',
'terdapat',
'terdiri',
'terhadap',
'terhadapnya',
'teringat',
'teringat-ingat',
'terjadi',
'terjadilah',
'terjadinya',
'terkira',
'terlalu',
'terlebih',
'terlihat',
'termasuk',
'ternyata',
'tersampaikan',
'tersebut',
'tersebutlah',
'tertentu',
'tertuju',
'terus',
'terutama',
'tetap',
'tetapi',
'tiap',
'tiba',
'tiba-tiba',
'tidak',
'tidakkah',
'tidaklah',
'tiga',
'tinggi',
'toh',
'tunjuk',
'turut',
'tutur',
'tuturnya',
'ucap',
'ucapnya',
'ujar',
'ujarnya',
'umum',
'umumnya',
'ungkap',
'ungkapnya',
'untuk',
'usah',
'usai',
'waduh',
'wah',
'wahai',
'waktu',
'waktunya',
'walau',
'walaupun',
'wong',
'yaitu',
'yakin',
'yakni',
'yang',
'i',
'me',
'my',
'myself',
'we',
'our',
'ours',
'ourselves',
'you',
"you're",
"you've",
"you'll",
"you'd",
'your',
'yours',
'yourself',
'yourselves',
'he',
'him',
'his',
'himself',
'she',
"she's",
'her',
'hers',
'herself',
'it',
"it's",
'its',
'itself',
'they',
'them',
'their',
'theirs',
'themselves',
'what',
'which',
'who',
'whom',
'this',
'that',
"that'll",
'these',
'those',
'am',
'is',
'are',
'was',
'were',
'be',
'been',
'being',
'have',
'has',
'had',
'having',
'do',
'does',
'did',
'doing',
'a',
'an',
'the',
'and',
'but',
'if',
'or',
'because',
'as',
'until',
'while',
'of',
'at',
'by',
'for',
'with',
'about',
'against',
'between',
'into',
'through',
'during',
'before',
'after',
'above',
'below',
'to',
'from',
'up',
'down',
'in',
'out',
'on',
'off',
'over',
'under',
'again',
'further',
'then',
'once',
'here',
'there',
'when',
'where',
'why',
'how',
'all',
'any',
'both',
'each',
'few',
'more',
'most',
'other',
'some',
'such',
'no',
'nor',
'not',
'only',
'own',
'same',
'so',
'than',
'too',
'very',
's',
't',
'can',
'will',
'just',
'don',
"don't",
'should',
"should've",
'now',
'd',
'll',
'm',
'o',
're',
've',
'y',
'ain',
'aren',
"aren't",
'couldn',
"couldn't",
'didn',
"didn't",
'doesn',
"doesn't",
'hadn',
"hadn't",
'hasn',
"hasn't",
'haven',
"haven't",
'isn',
"isn't",
'ma',
'mightn',
"mightn't",
'mustn',
"mustn't",
'needn',
"needn't",
'shan',
"shan't",
'shouldn',
"shouldn't",
'wasn',
"wasn't",
'weren',
"weren't",
'won',
"won't",
'wouldn',
"wouldn't",
'!',
'"',
'#',
'$',
'%',
'&',
"'",
'(',
')',
'*',
'+',
',',
'-',
'.',
'/',
':',
';',
'<',
'=',
'>',
'?',
'@',
'[',
'\\',
']',
'^',
'_',
'`',
'{',
'|',
'}',
'~']
| [
"damianus.deni@gmail.com"
] | damianus.deni@gmail.com |
3570f49c1da012c350d5d4c2dcf29dd5c6a06d95 | 364d517aedbca646a819ae2c6d894192916a419b | /QIK-Videos/QIK_Web/util/clip_caption_generator.py | 3254096bbf37c88360e18f3ff7754d5f5b318386 | [
"Apache-2.0"
] | permissive | MU-Data-Science/QIK | ac3c3cc8d3d934572ad4a840424df459ab7ff1d2 | 148b3e41e65cf4482535f253e3a9f0985a0339bf | refs/heads/master | 2023-05-25T19:56:58.711646 | 2023-04-25T14:25:16 | 2023-04-25T14:25:16 | 246,338,228 | 3 | 1 | Apache-2.0 | 2023-05-23T03:08:33 | 2020-03-10T15:28:54 | Python | UTF-8 | Python | false | false | 11,416 | py | # Setup
# pip install transformers==4.15.0
# cd $QIK_HOME && pip install git+https://github.com/openai/CLIP.git
# pip install scikit-image==0.17.2
# pip install gdown==4.2.0
# mkdir /mydata/QIK-Videos/QIK_Web/pretrained_models && gdown --id 14pXWwB4Zm82rsDdvbGguLfx9F8aM7ovT -O /mydata/QIK-Videos/QIK_Web/pretrained_models/model_wieghts.pt
import clip
from torch import nn
import numpy as np
import torch
import torch.nn.functional as nnf
from typing import Tuple, List, Union, Optional
from transformers import GPT2Tokenizer, GPT2LMHeadModel, AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm, trange
import skimage.io as io
import PIL.Image
DEVICE = "cpu"
N = type(None)
V = np.array
ARRAY = np.ndarray
ARRAYS = Union[Tuple[ARRAY, ...], List[ARRAY]]
VS = Union[Tuple[V, ...], List[V]]
VN = Union[V, N]
VNS = Union[VS, N]
T = torch.Tensor
TS = Union[Tuple[T, ...], List[T]]
TN = Optional[T]
TNS = Union[Tuple[TN, ...], List[TN]]
TSN = Optional[TS]
TA = Union[T, ARRAY]
D = torch.device
CPU = torch.device('cpu')
MODEL_PATH = "/mydata/QIK-Videos/QIK_Web/pretrained_models/model_wieghts.pt"
PREFIX_LENGTH = 10
# Global Variables
is_init = False
clip_model = None
preprocess = None
tokenizer = None
model = None
def init():
global is_init, clip_model, preprocess, tokenizer, model
if not is_init:
clip_model, preprocess = clip.load("ViT-B/32", device=DEVICE, jit=False)
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = ClipCaptionModel(PREFIX_LENGTH)
model.load_state_dict(torch.load(MODEL_PATH, map_location=CPU))
model = model.eval()
model = model.to(DEVICE)
is_init = True
class MLP(nn.Module):
def forward(self, x: T) -> T:
return self.model(x)
def __init__(self, sizes: Tuple[int, ...], bias=True, act=nn.Tanh):
super(MLP, self).__init__()
layers = []
for i in range(len(sizes) - 1):
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=bias))
if i < len(sizes) - 2:
layers.append(act())
self.model = nn.Sequential(*layers)
class ClipCaptionModel(nn.Module):
def get_dummy_token(self, batch_size: int, device: D) -> T:
return torch.zeros(batch_size, self.prefix_length, dtype=torch.int64, device=device)
def forward(self, tokens: T, prefix: T, mask: Optional[T] = None, labels: Optional[T] = None):
embedding_text = self.gpt.transformer.wte(tokens)
prefix_projections = self.clip_project(prefix).view(-1, self.prefix_length, self.gpt_embedding_size)
embedding_cat = torch.cat((prefix_projections, embedding_text), dim=1)
if labels is not None:
dummy_token = self.get_dummy_token(tokens.shape[0], tokens.device)
labels = torch.cat((dummy_token, tokens), dim=1)
out = self.gpt(inputs_embeds=embedding_cat, labels=labels, attention_mask=mask)
return out
def __init__(self, prefix_length: int, prefix_size: int = 512):
super(ClipCaptionModel, self).__init__()
self.prefix_length = prefix_length
self.gpt = GPT2LMHeadModel.from_pretrained('gpt2')
self.gpt_embedding_size = self.gpt.transformer.wte.weight.shape[1]
if prefix_length > 10: # not enough memory
self.clip_project = nn.Linear(prefix_size, self.gpt_embedding_size * prefix_length)
else:
self.clip_project = MLP(
(prefix_size, (self.gpt_embedding_size * prefix_length) // 2, self.gpt_embedding_size * prefix_length))
class ClipCaptionPrefix(ClipCaptionModel):
def parameters(self, recurse: bool = True):
return self.clip_project.parameters()
def train(self, mode: bool = True):
super(ClipCaptionPrefix, self).train(mode)
self.gpt.eval()
return self
def generate_beam(model, tokenizer, beam_size: int = 5, prompt=None, embed=None,
entry_length=67, temperature=1., stop_token: str = '.'):
model.eval()
stop_token_index = tokenizer.encode(stop_token)[0]
tokens = None
scores = None
device = next(model.parameters()).device
seq_lengths = torch.ones(beam_size, device=device)
is_stopped = torch.zeros(beam_size, device=device, dtype=torch.bool)
with torch.no_grad():
if embed is not None:
generated = embed
else:
if tokens is None:
tokens = torch.tensor(tokenizer.encode(prompt))
tokens = tokens.unsqueeze(0).to(device)
generated = model.gpt.transformer.wte(tokens)
for i in range(entry_length):
outputs = model.gpt(inputs_embeds=generated)
logits = outputs.logits
logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
logits = logits.softmax(-1).log()
if scores is None:
scores, next_tokens = logits.topk(beam_size, -1)
generated = generated.expand(beam_size, *generated.shape[1:])
next_tokens, scores = next_tokens.permute(1, 0), scores.squeeze(0)
if tokens is None:
tokens = next_tokens
else:
tokens = tokens.expand(beam_size, *tokens.shape[1:])
tokens = torch.cat((tokens, next_tokens), dim=1)
else:
logits[is_stopped] = -float(np.inf)
logits[is_stopped, 0] = 0
scores_sum = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
scores_sum_average = scores_sum / seq_lengths[:, None]
scores_sum_average, next_tokens = scores_sum_average.view(-1).topk(beam_size, -1)
next_tokens_source = next_tokens // scores_sum.shape[1]
seq_lengths = seq_lengths[next_tokens_source]
next_tokens = next_tokens % scores_sum.shape[1]
next_tokens = next_tokens.unsqueeze(1)
tokens = tokens[next_tokens_source]
tokens = torch.cat((tokens, next_tokens), dim=1)
generated = generated[next_tokens_source]
scores = scores_sum_average * seq_lengths
is_stopped = is_stopped[next_tokens_source]
next_token_embed = model.gpt.transformer.wte(next_tokens.squeeze()).view(generated.shape[0], 1, -1)
generated = torch.cat((generated, next_token_embed), dim=1)
is_stopped = is_stopped + next_tokens.eq(stop_token_index).squeeze()
if is_stopped.all():
break
scores = scores / seq_lengths
output_list = tokens.cpu().numpy()
output_texts = [tokenizer.decode(output[:int(length)]) for output, length in zip(output_list, seq_lengths)]
order = scores.argsort(descending=True)
output_texts = [output_texts[i] for i in order]
return output_texts
def generate2(
model,
tokenizer,
tokens=None,
prompt=None,
embed=None,
entry_count=1,
entry_length=67, # maximum number of words
top_p=0.8,
temperature=1.,
stop_token: str = '.',
):
model.eval()
generated_num = 0
generated_list = []
stop_token_index = tokenizer.encode(stop_token)[0]
filter_value = -float("Inf")
device = next(model.parameters()).device
with torch.no_grad():
for entry_idx in trange(entry_count):
if embed is not None:
generated = embed
else:
if tokens is None:
tokens = torch.tensor(tokenizer.encode(prompt))
tokens = tokens.unsqueeze(0).to(device)
generated = model.gpt.transformer.wte(tokens)
for i in range(entry_length):
outputs = model.gpt(inputs_embeds=generated)
logits = outputs.logits
logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(nnf.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cumulative_probs > top_p
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[
..., :-1
].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[:, indices_to_remove] = filter_value
next_token = torch.argmax(logits, -1).unsqueeze(0)
next_token_embed = model.gpt.transformer.wte(next_token)
if tokens is None:
tokens = next_token
else:
tokens = torch.cat((tokens, next_token), dim=1)
generated = torch.cat((generated, next_token_embed), dim=1)
if stop_token_index == next_token.item():
break
output_list = list(tokens.squeeze().cpu().numpy())
output_text = tokenizer.decode(output_list)
generated_list.append(output_text)
return generated_list[0]
def clean_captions(caption):
if "." in caption:
caption = caption.strip()[:-1].strip()
if " : " in caption:
return caption.split(" : ")[-1]
if "," in caption:
splits = caption.split(",")
if len(splits) > 3:
print("clip_caption_generator :: clean_captions :: Caption has more than two comas")
else:
splits.pop(1)
return "".join(splits)
return caption
def get_captions(img_file):
global is_init, clip_model, preprocess, tokenizer, model
if not is_init:
print("clip_caption_generator :: get_captions :: Initializing")
init()
use_beam_search = False
image = io.imread(img_file)
pil_image = PIL.Image.fromarray(image)
image = preprocess(pil_image).unsqueeze(0).to(DEVICE)
with torch.no_grad():
prefix = clip_model.encode_image(image).to(DEVICE, dtype=torch.float32)
prefix_embed = model.clip_project(prefix).reshape(1, PREFIX_LENGTH, -1)
if use_beam_search:
generated_text_prefix = generate_beam(model, tokenizer, embed=prefix_embed)[0]
else:
generated_text_prefix = generate2(model, tokenizer, embed=prefix_embed)
cleaned_caption = clean_captions(generated_text_prefix)
return cleaned_caption
def get_captions_from_img_arr(image_arr):
global is_init, clip_model, preprocess, tokenizer, model
if not is_init:
print("clip_caption_generator :: get_captions :: Initializing")
init()
use_beam_search = False
image = preprocess(image_arr).unsqueeze(0).to(DEVICE)
with torch.no_grad():
prefix = clip_model.encode_image(image).to(DEVICE, dtype=torch.float32)
prefix_embed = model.clip_project(prefix).reshape(1, PREFIX_LENGTH, -1)
if use_beam_search:
generated_text_prefix = generate_beam(model, tokenizer, embed=prefix_embed)[0]
else:
generated_text_prefix = generate2(model, tokenizer, embed=prefix_embed)
cleaned_caption = clean_captions(generated_text_prefix)
return cleaned_caption
if __name__ == '__main__':
print(get_captions("/mydata/MSR_VTT/video7960_keyframe_205.jpg"))
| [
"az2z7@mail.umkc.edu"
] | az2z7@mail.umkc.edu |
189638b913ac8e4f95628be830208ded60454bf1 | 994e5b7156a8c1429238facc1463ad1846f1a89a | /models/official/nlp/xlnet/xlnet_config.py | 95ab092442ef4f4b96e61d91ed391051469e8441 | [
"Apache-2.0"
] | permissive | TrellixVulnTeam/Felect_M46O | f0c2a9a6c48695705e0b68c92c3a414bacfaa599 | 6d8b80e216c40233d2c1b9e51fe6f605a3b5ef4b | refs/heads/main | 2023-04-22T11:33:59.448117 | 2021-05-06T13:01:12 | 2021-05-06T13:01:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,317 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions used in XLNet model."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import json
import os
import tensorflow as tf
def create_run_config(is_training, is_finetune, flags):
"""Helper function for creating RunConfig."""
kwargs = dict(
is_training=is_training,
use_tpu=flags.use_tpu,
dropout=flags.dropout,
dropout_att=flags.dropout_att,
init_method=flags.init_method,
init_range=flags.init_range,
init_std=flags.init_std,
clamp_len=flags.clamp_len)
if not is_finetune:
kwargs.update(
dict(
mem_len=flags.mem_len,
reuse_len=flags.reuse_len,
bi_data=flags.bi_data,
clamp_len=flags.clamp_len,
same_length=flags.same_length))
return RunConfig(**kwargs)
# TODO(hongkuny): refactor XLNetConfig and RunConfig.
class XLNetConfig(object):
"""Configs for XLNet model.
XLNetConfig contains hyperparameters that are specific to a model checkpoint;
i.e., these hyperparameters should be the same between
pretraining and finetuning.
The following hyperparameters are defined:
n_layer: int, the number of layers.
d_model: int, the hidden size.
n_head: int, the number of attention heads.
d_head: int, the dimension size of each attention head.
d_inner: int, the hidden size in feed-forward layers.
ff_activation: str, "relu" or "gelu".
untie_r: bool, whether to untie the biases in attention.
n_token: int, the vocab size.
"""
def __init__(self, FLAGS=None, json_path=None, args_dict=None):
"""Constructing an XLNetConfig.
One of FLAGS or json_path should be provided.
Args:
FLAGS: An FLAGS instance.
json_path: A path to a json config file.
args_dict: A dict for args.
"""
assert FLAGS is not None or json_path is not None or args_dict is not None
self.keys = [
'n_layer', 'd_model', 'n_head', 'd_head', 'd_inner', 'ff_activation',
'untie_r', 'n_token'
]
if FLAGS is not None:
self.init_from_flags(FLAGS)
if json_path is not None:
self.init_from_json(json_path)
if args_dict is not None:
self.init_from_dict(args_dict)
def init_from_dict(self, args_dict):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
for key in self.keys:
setattr(self, key, args_dict[key])
def init_from_flags(self, flags):
for key in self.keys:
setattr(self, key, getattr(flags, key))
def init_from_json(self, json_path):
with tf.io.gfile.GFile(json_path) as f:
json_data = json.load(f)
self.init_from_dict(json_data)
def to_json(self, json_path):
"""Save XLNetConfig to a json file."""
json_data = {}
for key in self.keys:
json_data[key] = getattr(self, key)
json_dir = os.path.dirname(json_path)
if not tf.io.gfile.exists(json_dir):
tf.io.gfile.makedirs(json_dir)
with tf.io.gfile.GFile(json_path, 'w') as f:
json.dump(json_data, f, indent=4, sort_keys=True)
class RunConfig(object):
"""Class of RunConfig.
RunConfig contains hyperparameters that could be different
between pretraining and finetuning.
These hyperparameters can also be changed from run to run.
We store them separately from XLNetConfig for flexibility.
"""
def __init__(self,
is_training,
use_tpu,
dropout,
dropout_att,
init_method='normal',
init_range=0.1,
init_std=0.02,
mem_len=None,
reuse_len=None,
bi_data=False,
clamp_len=-1,
same_length=False,
use_cls_mask=True):
"""Initializes RunConfig.
Args:
is_training: bool, whether in training mode.
use_tpu: bool, whether TPUs are used.
dropout: float, dropout rate.
dropout_att: float, dropout rate on attention probabilities.
init_method: str, the initialization scheme, either "normal" or "uniform".
init_range: float, initialize the parameters with a uniform distribution
in [-init_range, init_range]. Only effective when init="uniform".
init_std: float, initialize the parameters with a normal distribution with
mean 0 and stddev init_std. Only effective when init="normal".
mem_len: int, the number of tokens to cache.
reuse_len: int, the number of tokens in the currect batch to be cached and
reused in the future.
bi_data: bool, whether to use bidirectional input pipeline. Usually set to
True during pretraining and False during finetuning.
clamp_len: int, clamp all relative distances larger than clamp_len. -1
means no clamping.
same_length: bool, whether to use the same attention length for each
token.
use_cls_mask: bool, whether to introduce cls mask.
"""
self.init_method = init_method
self.init_range = init_range
self.init_std = init_std
self.is_training = is_training
self.dropout = dropout
self.dropout_att = dropout_att
self.use_tpu = use_tpu
self.mem_len = mem_len
self.reuse_len = reuse_len
self.bi_data = bi_data
self.clamp_len = clamp_len
self.same_length = same_length
self.use_cls_mask = use_cls_mask
| [
"noreply@github.com"
] | noreply@github.com |
c53f03b21f9f7008df76eca8a8135aa0f563011b | b6afc1dbf97e05409fa4ab39529255abcf023473 | /mysite/aaa.py | 78cdca01dcd060bf341fceca7510fade9447fdf4 | [] | no_license | yhnn123/my-first-blog | acde2fc4447e9e15b010a44c96108f28fda18268 | 52e3a581773e922d27e94f9fe9209d41eb65d7ac | refs/heads/master | 2021-01-10T01:16:32.672273 | 2015-10-28T15:21:06 | 2015-10-28T15:21:06 | 43,590,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # encoding='utf-8'
a = 'sonja'
if a == 'soonja':
print("a")
elif a == 'sonja':
print("b")
else:
print("c")
def hi(name):
if a == 'soonja':
print("a")
elif a == 'sonja':
print("b")
else:
print("c")
hi('sonja')
def hi(name):
print('Hi ' + name + '!')
girls = ['aaa', 'bbb', 'ccc', 'ddd']
for name in girls:
hi(name)
print('next girl') | [
"yhnn123@gmail.com"
] | yhnn123@gmail.com |
0e364ee751aa1da342a06e4810db079e8ef1c9eb | 1d08546fb80047b5b39c95f5a6a2829276ad1694 | /sync_m3u8_downloader.py | 7a87f36e90ea5d6f9ce11e4c6bfda7eb016d596d | [] | no_license | hyzsj0106/m3u8_downloader | aecb97c7935ccfbb9e0ee47bd8353c530f60a1e4 | 8c832ec9099ba9acaca94e0078ae9566eed43769 | refs/heads/main | 2023-03-03T23:19:31.890515 | 2021-02-22T08:21:02 | 2021-02-22T08:21:02 | 341,113,917 | 0 | 0 | null | 2021-02-22T08:21:02 | 2021-02-22T07:24:53 | null | UTF-8 | Python | false | false | 7,113 | py | import random, os, sys, shutil
import time, string, logging
from urllib.parse import urljoin
import subprocess
import platform
sys.path.append('..')
from downloader_req import fetch
from parsel_url import ParseUrl
from concurrent.futures import ProcessPoolExecutor
prefix = '总有妖怪想害朕'
workers = 12
urls_str = '''
第01集$http://iqiyi.cdn9-okzy.com/20210209/22159_7d0fb636/index.m3u8
第02集$http://iqiyi.cdn9-okzy.com/20210209/22160_203248b8/index.m3u8
第03集$http://iqiyi.cdn9-okzy.com/20210209/22158_806c82a6/index.m3u8
第04集$http://iqiyi.cdn9-okzy.com/20210209/22162_b1bcb173/index.m3u8
第05集$http://iqiyi.cdn9-okzy.com/20210209/22161_24a477d6/index.m3u8
'''
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s: %(message)s')
class DownloadM3u8:
def __init__(self):
self.m3u8_video_list = []
self.workers = workers
self.final_url = ''
# 合并 video 片段
def merge(self, video_name, short_name):
os.chdir(os.path.dirname(__file__) + f'/{short_name}/')
try:
if platform.system() == 'Windows':
merge_cmd = f'copy /b *.ts {video_name}.ts'
else:
merge_cmd = f'cat *.ts > {video_name}.ts'
logging.info('正在合并视频片段: %s : %s', video_name, merge_cmd)
devNull = open(os.devnull, 'w')
subprocess.call(merge_cmd, shell=True, stdout=devNull)
logging.info('合并视频片段-%s-成功: %s', short_name, video_name)
except Exception as e:
logging.error('合并视频片段-%s-失败详情: %s', short_name, e)
# 移动文件
logging.info('尝试移动合成文件: %s', video_name)
from_path = os.path.dirname(__file__) + f'/{short_name}/' + video_name + '.ts'
if not os.path.exists(from_path):
logging.info('移动合成文件不存在: %s', video_name)
return
try:
to_path = os.path.dirname(__file__)
shutil.copy(from_path, to_path)
logging.info('视频移动成功')
except Exception as e:
logging.error('视频移动失败: %s', e)
# 合并成功后删除源video片段
def del_video_part(self, short_name):
dir_path = os.path.dirname(__file__) + f'/{short_name}'
os.chdir(os.path.dirname(__file__))
try:
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
except Exception as e:
logging.error('文件夹删除失败! %s', e)
# 下载失败重试
def retry(self, v_url, binary=True):
counter = 0
while counter < 3:
counter += 1
status, final_url, html = fetch(v_url, binary=binary, timeout=10)
logging.info('video_url: %s 正在执行重新下载, 状态码: %s 重试第 %s 次', v_url, status, counter)
if status == 200 and html:
# 处理 video 下载
return status, final_url, html
if binary:
html = b''
else:
html = ''
return 0, v_url, html
# 处理 video 下载
def download_video(self, video_name, part_url, index, short_name, video_list_length):
# 计算进度
percent = int(index / video_list_length * 100)
# 拼接视频片段url
video_url = urljoin(self.final_url, part_url)
part_name = short_name + str(index).zfill(4) + '.ts'
logging.info('主任务: %s 正在执行子任务: %s 当前进度 %s %% ', video_name, part_name, percent)
# 文件保存全路径
file_path = os.path.dirname(__file__) + f'/{short_name}/' + part_name
status, _, content_part = fetch(video_url, binary=True)
if status != 200 or not content_part:
logging.info('主任务: %s 正在执行子任务: %s 状态码: %s 即将重试', video_name, part_name, status)
status, _, content_part = self.retry(video_url, binary=True)
logging.info('主任务: %s 正在执行子任务: %s 状态码: %s 重试成功,继续下载', video_name, part_name, status)
# 创建视频文件夹
dir_path = os.path.dirname(__file__) + '/' + short_name
if not os.path.exists(dir_path):
os.mkdir(dir_path)
try:
with open(file_path, 'wb')as f:
f.write(content_part)
except Exception as e:
logging.error('写入文件失败 %s', e)
# 正确的m3u8链接
def final_m3u8_url(self, url):
status, _, response = fetch(url)
if '#EXTM3U' not in response:
logging.error('非m3u8视频格式文件')
return False
res_list = response.strip().split('\n')
if ('.asp' not in response or '.asp' not in response) and len(res_list) <= 3:
return urljoin(url, res_list[-1])
else:
return url
# 解析返回的m3u8链接,获取ts段落
def get_v_list(self, final_url):
status, _, response = fetch(final_url)
m3u8_v_list = []
for index, part_url in enumerate(response.strip().split('\n')):
if not part_url: continue
if '.asp' in part_url or '.ts' in part_url:
# 未经修改的index.m3u8源视频片段
part_url = part_url.strip('\r')
m3u8_v_list.append(part_url)
logging.info('m3u8_v_list : %s', m3u8_v_list)
return m3u8_v_list
def start(self):
# 初始化成功
logging.info('The program started successfully!')
# 判断 url 类型, 并返回item
item = ParseUrl.urls_type(prefix, urls_str.strip())
logging.info('The item: %s', item)
# 遍历下载视频
for video_name, url in item.items():
# 正确的url
self.final_url = self.final_m3u8_url(url)
logging.info('final_url : %s', self.final_url)
m3u8_video_list = self.get_v_list(self.final_url)
if m3u8_video_list:
video_list_length = len(m3u8_video_list)
# 开启多任务
short_name = ''.join(random.sample(string.ascii_letters, 3))
with ProcessPoolExecutor(max_workers=self.workers) as executor:
# 下载的视频片段
for index, part_url in enumerate(m3u8_video_list):
executor.submit(self.download_video, video_name, part_url, index, short_name, video_list_length)
logging.info('正在合并 %s 片段', video_name)
self.merge(video_name, short_name)
logging.info('正在删除 %s 片段', video_name)
self.del_video_part(short_name)
logging.info('%s 视频处理完成~', video_name)
else:
logging.error('m3u8.m3u8_video_list 是空的!')
if __name__ == '__main__':
start = time.time()
downloader = DownloadM3u8()
downloader.start()
time_cost = time.time() - start
print(time_cost)
| [
"hyzsj0106@outlook.com"
] | hyzsj0106@outlook.com |
9bc5c6832087d414ceb41dd501185016b6ad0f74 | a2e587dec2538977cb653757de748d39f1baf7cf | /strinstrip.py | bf8b66ffc2456005d852903f267568f61491d855 | [] | no_license | byyasar/python-ders | 2b177f9af75f763036346396aca348a310f4444c | 9e4918da568b52d06fefdc11c427cce4d6198594 | refs/heads/master | 2023-04-28T14:56:43.412872 | 2021-05-17T08:42:55 | 2021-05-17T08:42:55 | 340,045,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | cumle = " bilişim teknolojilerine giriş "
# baştaki ve sondaki boşluklar silinir.
# print(cumle.strip())
# <bosluk>,b,i,l karakterleri silinir.
print(cumle.strip(" bil"))
# Parametre cümlede bulunmadığı için
# hiçbir karakter çıkarılmaz.
# print(cumle.strip("prg"))
# cumle2 = 'python çok kullanışlı'
# print(cumle2.strip("pyt")) | [
"45915186+byyasar@users.noreply.github.com"
] | 45915186+byyasar@users.noreply.github.com |
34162f273f83ff44ba9b5751d76399be86ce110d | ed63a0e28568da84a937e17049f49b4b51d32db8 | /chap5 exer2 nums.py | daaa739a7ad0915a92d2f74146b481a2b08aaf63 | [] | no_license | Ramzi331/learning_python | e1612f8d312b0383df8d655236cc22decdd50651 | 74e0188779a8f909f83beecfa50987be2799794f | refs/heads/main | 2023-03-31T18:13:48.799027 | 2021-04-09T10:29:13 | 2021-04-09T10:29:13 | 351,750,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | total = 0
count = 0
numlist = []
while True:
num = input("inter a number: ")
if num == ("done"):
print ("total: ",total,"\ncount: ",count,"\nmin: ",min(numlist),"\nmax :",max(numlist),"\nDone!")
quit()
else:
try:
num = float(num)
except:
print("numbers only please!")
continue
total += num
count += 1
numlist.append(num) | [
"noreply@github.com"
] | noreply@github.com |
5e08adeaa3bd2f143bd74229189142d41b1c447c | ec93efe07bcda655ee65782090b16c1f0c2c3153 | /embedvis.py | 98d6aedde8c5e7e65d212d4cfba1a77cc1e831e7 | [] | no_license | BaileyDalton007/mnist-cnn | 2a90e58aff8a1951849859ffaa462ba27fd94967 | 6bd224a5a38d9ef51750f2435d9f2b6af8b55d9a | refs/heads/main | 2023-09-01T03:27:40.572503 | 2021-11-01T22:53:25 | 2021-11-01T22:53:25 | 403,068,902 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorboard.plugins import projector
test_df = pd.read_csv(r'data\fashion-mnist_test.csv')
test_data = np.array(test_df, dtype='float32')
embed_count = 1600
xTest = test_data[:embed_count, 1:] / 255
yTest = test_data[:embed_count, 0]
logdir = r'C:\Users\Bailey\Desktop\Projects\OSUSARC\mnist-cnn\logs\embed'
summary_writer = tf.summary.FileWriter(logdir)
embedding_var = tf.Variable(xTest, name='fmnist_embedding')
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.metadata_path = os.path.join(logdir, 'metadata.tsv')
embedding.sprite.image_path = os.path.join(logdir, 'sprite.png')
embedding.sprite.single_image_dim.extend([28,28])
# passed in logdir instead of summary_writer
projector.visualize_embeddings(summary_writer, config)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.save(sess, os.path.join(logdir, 'model.ckpt'))
rows = 28
cols = 28
label = ['t_shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle_boot']
sprite_dim = int(np.sqrt(xTest.shape[0]))
sprite_image = np.ones((cols * sprite_dim, rows * sprite_dim))
index = 0
labels = []
for i in range(sprite_dim):
for j in range(sprite_dim):
labels.append(label[int(yTest[index])])
sprite_image[
i * cols: (i + 1) * cols,
j * rows: (j + 1) * rows
] = xTest[index].reshape(28, 28) * -1 + 1
index += 1
# Metadata file column 1 is index and column 2 is label
with open(embedding.metadata_path, 'w') as meta:
meta.write('Index\tLabel\n')
for index, label in enumerate(labels):
meta.write('{}\t{}\n'.format(index, label))
plt.imsave(embedding.sprite.image_path, sprite_image, cmap='gray')
plt.imshow(sprite_image, cmap='gray')
plt.show() | [
"baileydalton007@gmail.com"
] | baileydalton007@gmail.com |
c3d2893240ca2e78ab668db51e08f86f89dbb6b2 | 45a15bbaf9f5ecc41d64ce2331cf9687999ce0c5 | /Sift.py | b53aa5410a0b57be8fbcaa70ea6d3f3f092d4bd9 | [] | no_license | KKDT123/Image-stitching-based-on-sift | 446b3d870cb8a770efa86685618e38eb797a6135 | 571a6991070729fc406d80ef608bf3b9f1788e63 | refs/heads/master | 2022-04-23T16:28:41.542958 | 2020-04-29T15:04:07 | 2020-04-29T15:04:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,314 | py | import func # func是我自己编写的函数库
import numpy as np
import math
import matplotlib.pyplot as plt
import copy
import cv2
from PIL import Image
class KEYPOINTS:
def __init__(self):
self.x, self.y, self.layer, self.sig, self.val = 0, 0, 0, 0, 0
self.sigo = 0 #
self.r = 0
self.dir = 0
self.o = 0
class SIFT:
"""
Sift类
输入图像和初始化参数
可以从中得到DOG keypoints 特征描述符
"""
def __init__(self, img, n=3, sigma=1.52, BinNum=36):
"""
初始化函数
:param img: 待处理图像
:param n: 每组内可以计算关键点的层数
:param sigma: 高斯核初始大小
"""
self.ori = img
self.Img = img
if len(img.shape) >= 3:
self.Img = img.mean(axis=-1)
self.n, self.sigma = n, sigma
self.BinNum = BinNum
self.T = 0.04
self.gam = 10
self.the = 0.5 * self.T / (self.n *255)
self.s = n + 3 # 每组层数
self.o = int(math.log2(min(img.shape[0], img.shape[1]))) - 3 # 组数
self.Gaussian_Filter = self.Get_All_Gaussian() # 每层的高斯核
self.G, self.DoG = self.Get_GDoG() # GDOG所有图像
self.KeyPoints = np.array(self.Cal_KeyPoints()) # 算关键点位置和方向
self.descriptors = self.Cal_Descriptors() # 算关键点描述子
def Get_All_Gaussian(self):
"""
计算n个一维高斯核
:return: 一个列表(因为长度不一致) 每一行是一个高斯核
"""
k = np.math.pow(2, 1 / self.n)
Gaussian_F = [func.OneD_Gaussian(self.sigma * k ** i) for i in range(self.s)]
return Gaussian_F
def Get_GDoG(self):
"""
计算DOG图像
:return: DOG
"""
img0 = self.Img
Gau, DoG = [], []
for i in range(self.o): # 一共有o组
G = [func.TwoD_Convolve(img0, self.Gaussian_Filter[j]) for j in range(self.s)]
Gau.append(G)
img0 = G[-3][1::2, 1::2] # 倒数第三幅为下一组的第一幅的下采样
DoG.append([G[j + 1] - G[j] for j in range(self.s - 1)])
return Gau, DoG
def Is_Local_Extrema(self, onow, snow, x, y, max_step=5, border=5):
"""
调整极值点位置
:param onow: 当前组
:param snow: 当前层
:param x,y: 位置
:param max_step: 最大迭代次数
:param border: 边界处理
:return:
"""
key_p = KEYPOINTS()
h = 1.0
d1, d22, d3 = 0.5 * h, h, 0.25 * h
img, i = self.DoG[onow][snow], 0
while i < max_step:
# 如果解超过边界 则返回空值
if snow < 1 or snow > self.n or y < border or y >= img.shape[1] - border or \
x < border or x >= img.shape[0] - border:
return None, None, None
# 三个区域
img, pre, nex = self.DoG[onow][snow], self.DoG[onow][snow - 1], self.DoG[onow][snow + 1]
# 一阶导数 梯度
dg = np.array([(img[x][y + 1] - img[x][y - 1]) * d1, (img[x + 1][y] - img[x - 1][y]) * d1,
(nex[x][y] - pre[x][y]) * d1])
# 海森矩阵
d2 = img[x][y] * 2
dxx = (img[x][y + 1] + img[x][y - 1] - d2) * d22
dyy = (img[x + 1][y] + img[x - 1][y] - d2) * d22
dss = (nex[x][y] + pre[x][y] - d2) * d22
dxy = (img[x + 1][y + 1] - img[x + 1][y - 1] - img[x - 1][y + 1] + img[x - 1][y - 1]) * d3
dxs = (nex[x][y + 1] - nex[x][y - 1] - pre[x][y + 1] + pre[x][y - 1]) * 0.25 * d3
dys = (nex[x + 1][y] - nex[x - 1][y] - pre[x + 1][y] + pre[x - 1][y]) * 0.25 * d3
H = np.array([[dxx, dxy, dxs],
[dxy, dyy, dys],
[dxs, dys, dss]])
X = - np.linalg.pinv(H) @ dg
# 如果都小于0.5 说明解稳定了 退出循环
if np.abs(X).max() < 0.5:
break
# 更新解 进入下一次迭代
y, x, snow = int(np.round(y + X[0])), int(np.round(x + X[1])), int(np.round(snow + X[2]))
i += 1
if i >= max_step:
return None, x, y
if snow < 1 or snow > self.n or y < border or y >= img.shape[1] - border or \
x < max_step or x >= img.shape[0] - border:
return None, None, None
# 响应过小就滤去
ans = img[x][y] * h + 0.5 * (dg @ X)
if np.abs(ans) * self.n < 0.04:
return None, x, y
key_p.val = np.abs(ans) * self.n
# 利用Hessian矩阵的迹和行列式计算主曲率的比值
tr, det = dxx + dyy, dxx * dyy - dxy * dxy
if det <= 0 or tr * tr >= 12.1 * det:
return None, x, y
# 获得一个关键点
key_p.x, key_p.y = (x + X[1]) * (1 << onow), (y + X[0]) * (1 << onow)
key_p.o = onow
key_p.layer = snow
key_p.sigo = self.sigma * np.power(2.0, (snow + X[2]) / self.n)
key_p.r = int(np.round(3 * 1.52 * key_p.sigo)) # 半径
return key_p, x, y
def Get_MainDir(self, kp, x, y):
'''
获得主方向
:param kp: 关键点信息
:param x: x坐标(调整后的)
:param y: y坐标
:return:
'''
signow = 1.52 * kp.sigo
exp_scale = -1.0 / (2.0 * signow * signow)
img = self.G[kp.o][kp.layer]
# 投票点的 DXDY和对应梯度值
DX, DY, W = [], [], []
# 设置初值
ans = [0] * self.BinNum
# 图像梯度直方图统计的像素范围
k = 0
for i in range(-kp.r, kp.r + 1):
xx = x + i
if xx <= 0 or xx >= img.shape[0] - 1:
continue
for j in range(-kp.r, kp.r + 1):
yy = y + j
if yy <= 0 or yy >= img.shape[1] - 1:
continue
dx = img[xx][yy + 1] - img[xx][yy - 1]
dy = img[xx - 1][yy] - img[xx + 1][yy]
DX.append(dx)
DY.append(dy)
W.append((i * i + j * j) * exp_scale)
# 方向 倒数 权重
W = np.exp(np.array(W))
DX, DY = np.array(DX), np.array(DY)
Ori = np.arctan2(DY, DX) * 180 / np.pi
Mag = (np.array(DY) ** 2 + np.array(DX) ** 2) ** 0.5
# 计算直方图的每个bin
for k in range(Ori.shape[0]):
bin = int(np.round((self.BinNum / 360.0) * Ori[k]))
if bin >= self.BinNum:
bin -= self.BinNum
elif bin < 0:
bin += self.BinNum
ans[bin] += W[k] * Mag[k]
# 用高斯平滑
temp = [ans[self.BinNum - 1], ans[self.BinNum - 2], ans[0], ans[1]]
ans.insert(0, temp[0])
ans.insert(0, temp[1])
ans.insert(len(ans), temp[2])
ans.insert(len(ans), temp[3])
# 统计直方图
hist = []
for i in range(self.BinNum):
hist.append(
(ans[i] + ans[i + 4]) * (1.0 / 16.0) + (ans[i + 1] + ans[i + 3]) * (4.0 / 16.0) +
ans[i + 2] * (6.0 / 16.0))
return max(hist), hist
def Cal_KeyPoints(self):
"""
计算关键点的信息
:return: 关键点列表
"""
boder = 1 # 控制边缘
key_points = []
for oo in range(self.o):
for ss in range(1, self.s - 2):
img_now, img_back, img_nex = self.DoG[oo][ss], self.DoG[oo][ss - 1], self.DoG[oo][ss + 1]
for i in range(boder,img_now.shape[0]-boder):
for j in range(boder,img_now.shape[1]-boder):
val = img_now[i][j]
# 阈值筛选以及是否是极值的初步判定
if np.abs(val) > self.the and \
((val < 0 and val <= img_now[i - 1:i + 2, j - 1:j + 2].min() and \
val <= img_back[i - 1:i + 2, j - 1:j + 2].min() and val <= img_nex[i - 1:i + 2, j - 1:j + 2].min()) \
or \
(val > 0 and val >= img_now[i - 1:i + 2, j - 1:j + 2].max() and \
val >= img_back[i - 1:i + 2, j - 1:j + 2].max() and val >= img_nex[i - 1:i + 2,j - 1:j + 2].max())):
# 调整并判断极值点
kp, x, y = self.Is_Local_Extrema(oo, ss, i, j)
if kp is None:
continue
# 获得主方向的极大值还有对应的直方图
max_D, hist = self.Get_MainDir(kp, x, y)
oth_D = max_D * 0.8 # 大于0.8就预设为次方向
for k in range(self.BinNum):
# 抛物线插值
L = (k - 1) % self.BinNum
R = (k + 1) % self.BinNum
if hist[k] > hist[L] and hist[k] > hist[R] and hist[k] >= oth_D:
bin = k + 0.5 * (hist[L] - hist[R]) / (hist[L] - 2 * hist[k] + hist[R])
if bin < 0:
bin = self.BinNum + bin
elif bin >= self.BinNum:
bin = bin - self.BinNum
kp.dir = (360.0 / self.BinNum) * bin
key_points.append(copy.deepcopy(kp))
return key_points
def calcSIFTDescriptor(self, dex, d=4, n=8):
kpt = self.KeyPoints[dex]
scale = 1.0 / (1 << kpt.o) # 缩放倍数
scl = kpt.sigo # 该特征点所在组的图像尺寸
pt = [int(np.round(kpt.y * scale)), int(np.round(kpt.x * scale))] # 坐标点取整
img = self.G[kpt.o][kpt.layer] # 该点所在的金字塔图像
rows, cols = img.shape[0], img.shape[1]
ori = kpt.dir
# 初始参数
cos_t = np.cos(ori * (np.pi / 180)) # 余弦值
sin_t = np.sin(ori * (np.pi / 180)) # 正弦值
bins_per_rad = n / 360.0
exp_scale = -1.0 / (d * d * 0.5) # 高斯加权参数
hist_width = 3 * scl # 小区域的边长
R = int(np.round(hist_width * 1.4142135623730951 * (d + 1) * 0.5)) # 半径
cos_t /= hist_width
sin_t /= hist_width
dst, X, Y, YBin, XBin, W = [], [], [], [], [], []
hist = [0.0] * ((d + 2) * (d + 2) * (n + 2))
# 遍历圆内所有点
v = d // 2 - 0.5
for i in range(-R, R + 1):
for j in range(-R, R + 1):
yrot = j * sin_t + i * cos_t
xrot = j * cos_t - i * sin_t
ybin = yrot + v
xbin = xrot + v
y = pt[1] + i
x = pt[0] + j
if d > ybin > -1 < xbin < d and 0 < y < rows - 1 and 0 < x < cols - 1:
X.append(img[y, x + 1] - img[y, x - 1])
Y.append(img[y - 1, x] - img[y + 1, x])
YBin.append(ybin)
XBin.append(xbin)
W.append((xrot * xrot + yrot * yrot) * exp_scale)
# 计算每个点的方向 梯度 权值
length = len(W)
Y, X = np.array(Y), np.array(X)
Ori = np.arctan2(Y, X) * 180 / np.pi
Mag = (X ** 2 + Y ** 2) ** 0.5
W = np.exp(np.array(W))
# 判断每个点的归属
for k in range(length):
ybin, xbin, obin = YBin[k], XBin[k], (Ori[k] - ori) * bins_per_rad
y0, x0, o0 = int(ybin), int(xbin), int(obin)
ybin -= y0
xbin -= x0
obin -= o0
mag = Mag[k] * W[k]
o0 = o0 + (n if o0 < 0 else -n)
# 三线性插值
v_r1 = mag * ybin
v_r0 = mag - v_r1
v_rc11 = v_r1 * xbin
v_rc10 = v_r1 - v_rc11
v_rc01 = v_r0 * xbin
v_rc00 = v_r0 - v_rc01
v_rco111 = v_rc11 * obin
v_rco110 = v_rc11 - v_rco111
v_rco101 = v_rc10 * obin
v_rco100 = v_rc10 - v_rco101
v_rco011 = v_rc01 * obin
v_rco010 = v_rc01 - v_rco011
v_rco001 = v_rc00 * obin
v_rco000 = v_rc00 - v_rco001
idx = ((y0 + 1) * (d + 2) + x0 + 1) * (n + 2) + o0
hist[idx] += v_rco000
hist[idx + 1] += v_rco001
hist[idx + (n + 2)] += v_rco010
hist[idx + (n + 3)] += v_rco011
hist[idx + (d + 2) * (n + 2)] += v_rco100
hist[idx + (d + 2) * (n + 2) + 1] += v_rco101
hist[idx + (d + 3) * (n + 2)] += v_rco110
hist[idx + (d + 3) * (n + 2) + 1] += v_rco111
# 统计最终结果
for i in range(d):
for j in range(d):
idx = ((i + 1) * (d + 2) + (j + 1)) * (n + 2)
hist[idx] += hist[idx + n]
hist[idx + 1] += hist[idx + n + 1]
for k in range(n):
dst.append(hist[idx + k])
# 归一化 门限值处理
dst = np.array(dst[:d*d*n])
thr = np.sqrt((dst**2).sum()) * 0.2
for i in range(dst.shape[0]):
dst[i] = min(dst[i], thr)
nrm2 = np.sqrt((dst**2).sum())
nb = 512 / max(nrm2, 1.19209290E-07)
for i in range(dst.shape[0]):
dst[i] = min(max(dst[i] * nb, 0), 255)
return dst
def Cal_Descriptors(self):
descriptors = []
for i in range(len(self.KeyPoints)):
descriptors.append(self.calcSIFTDescriptor(i))
return np.array(descriptors,dtype='float32')
def Get_inArray(self):
kp = np.array([[X.y, X.x] for X in self.KeyPoints], dtype='float32')
return kp, self.descriptors
def Get_inCV(self):
kp = np.array([cv2.KeyPoint(X.y, X.x, X.r, X.dir, X.val, X.o) for X in self.KeyPoints])
return kp, self.descriptors
def Show_d(self):
imgnow = copy.deepcopy(self.ori)
if len(imgnow.shape) > 2:
imgnow[:, :, 0] = self.ori[:, :, 2]
imgnow[:, :, 2] = self.ori[:, :, 0]
plt.imshow(imgnow)
plt.axis('off')
su = self.KeyPoints.shape[0]
dex = np.random.choice(a=su, size=min(100, su), replace=False, p=None)
for x in self.KeyPoints[dex]:
dx = 2 * x.r * np.cos(x.dir / 180 * np.pi)
dy = 2 * x.r * np.sin(x.dir / 180 * np.pi)
plt.arrow(x.y, x.x, dy, dx, head_width=x.r, head_length=x.r, fc='blue', ec='blue')
plt.scatter(x.y, x.x, s=2, c='r')
plt.show()
| [
"1050743651@qq.com"
] | 1050743651@qq.com |
49a897a64a6e54e1d849ff60e53984509d0e9230 | 715a3d93f97f88d8935db34fecb0a595bcca1b82 | /random_codes/conway's_game_of_life/tableConstructor.py | aea2c1981fd8363819922f20efb5be6bea6873d9 | [] | no_license | juliaokmenezes/the-codes-dark-side | 023854e304adbbbfa09116fc97050d95bdbd6e26 | dbcf15e3c58046e8ae49e07d261726e1cfab250c | refs/heads/main | 2023-08-27T22:53:17.535179 | 2021-10-31T19:53:07 | 2021-10-31T19:53:07 | 421,542,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py |
#parametros: quantY, quantX, posCelula
#quantY = nº de linhas
#quantX = numero de casas por linha
#posCelula = array de arrays, onde cada item representa as coordenadas de uma celula viva
def setScenario(quantY, quantX, posCelula):
casa = [1, 1] #[x,y]
#o primeiro loop, vai construir a primeira linha, o seguno a segunda e por ai vai
while casa[1] <= quantY:
linha = ''
while casa[0] <= quantX:
if casa in posCelula:
caractere = 'O'
else:
caractere = '-'
linha += caractere
casa[0] += 1
print(linha)
casa[0], casa[1] = 1, casa[1] + 1
retorno = {
'posCelula': posCelula,
}
return retorno
| [
"yohanhxh@gmail.com"
] | yohanhxh@gmail.com |
afffb5c6b0d291c1cc0f2e6fa4487ef0d5ef15a4 | 043e2f54fcdac26eff8608e0e624170da24934d6 | /src/projects/migrations/0014_auto_20200206_0933.py | 8d6bad9d6da081c29f9d3fa2a6a6982b69a972e9 | [] | no_license | russianmax/RentSpot | a83a534490b340ff2e67b96eaf433526a701bad3 | c66eaf57cd01892ac54449bb8a702c32707b6a2e | refs/heads/master | 2023-07-25T21:01:45.011324 | 2021-08-27T13:03:32 | 2021-08-27T13:03:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # Generated by Django 3.0.2 on 2020-02-06 09:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0013_listing_images_listingid'),
]
operations = [
migrations.AddField(
model_name='listing_database',
name='image',
field=models.ImageField(default='default.jpg', upload_to='house_preview'),
),
migrations.DeleteModel(
name='Listing_Images',
),
]
| [
"maksimk2@winlabs.com"
] | maksimk2@winlabs.com |
3ccbf8883c86965571f090c36bced556f00efdd1 | f60ec2c12c6d56be853bec9c222b8ea91b170130 | /apps/pig/src/pig/models.py | a38ff955d4c0be321ef26bdb2d085598b63d858f | [
"Apache-2.0"
] | permissive | jackerxff/hue | b33911f62129cc949096dd48b3fdcf0584bbba69 | 2418050cafd75aab043900c28a867f5c13bc1c0e | refs/heads/master | 2020-12-29T02:54:39.947205 | 2013-04-05T21:25:07 | 2013-04-05T21:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,071 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import json
except ImportError:
import simplejson as json
import posixpath
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from desktop.lib.exceptions_renderable import PopupException
from hadoop.fs.hadoopfs import Hdfs
from oozie.models import Workflow
class Document(models.Model):
owner = models.ForeignKey(User, db_index=True, verbose_name=_t('Owner'), help_text=_t('User who can modify the job.'))
is_design = models.BooleanField(default=True, db_index=True, verbose_name=_t('Is a user document, not a document submission.'),
help_text=_t('If the document is not a submitted job but a real query, script, workflow.'))
def is_editable(self, user):
return user.is_superuser or self.owner == user
def can_edit_or_exception(self, user, exception_class=PopupException):
if self.is_editable(user):
return True
else:
raise exception_class(_('Only superusers and %s are allowed to modify this document.') % user)
class PigScript(Document):
_ATTRIBUTES = ['script', 'name', 'properties', 'job_id', 'parameters', 'resources']
data = models.TextField(default=json.dumps({
'script': '',
'name': '',
'properties': [],
'job_id': None,
'parameters': [],
'resources': []
}))
def update_from_dict(self, attrs):
data_dict = self.dict
for attr in PigScript._ATTRIBUTES:
if attrs.get(attr) is not None:
data_dict[attr] = attrs[attr]
self.data = json.dumps(data_dict)
@property
def dict(self):
return json.loads(self.data)
class Submission(models.Model):
script = models.ForeignKey(PigScript)
workflow = models.ForeignKey(Workflow)
def create_or_update_script(id, name, script, user, parameters, resources, is_design=True):
"""This take care of security"""
try:
pig_script = PigScript.objects.get(id=id)
pig_script.can_edit_or_exception(user)
except:
pig_script = PigScript.objects.create(owner=user, is_design=is_design)
pig_script.update_from_dict({
'name': name,
'script': script,
'parameters': parameters,
'resources': resources
})
return pig_script
def get_scripts(user, max_count=200):
scripts = []
for script in PigScript.objects.filter(owner=user).order_by('-id')[:max_count]:
data = script.dict
massaged_script = {
'id': script.id,
'name': data['name'],
'script': data['script'],
'parameters': data['parameters'],
'resources': data['resources'],
'isDesign': script.is_design,
}
scripts.append(massaged_script)
return scripts
def get_workflow_output(oozie_workflow, fs):
# TODO: guess from the STORE or parameters
output = None
if 'workflowRoot' in oozie_workflow.conf_dict:
output = oozie_workflow.conf_dict.get('workflowRoot')
if output and not fs.exists(output):
output = None
return output
def hdfs_link(url):
if url:
path = Hdfs.urlsplit(url)[2]
if path:
if path.startswith(posixpath.sep):
return "/filebrowser/view" + path
else:
return "/filebrowser/home_relative_view/" + path
else:
return url
else:
return url
| [
"romain@cloudera.com"
] | romain@cloudera.com |
fbb2e4583fb58c848d33e8cc0e27639a1969d7a3 | 4045d51fa86f78876ab8b9edf367e5e2d98743a2 | /pyzscaler/zia/config.py | 711cba3351cf53853e481ff3dcb75bc42636188f | [
"MIT"
] | permissive | TrendingTechnology/pyZscaler | 861f67503ec5549651b15735310eb88af3d38e52 | 07ca55ce9c166cf7cf07f6bc3aa3da375d3f6d96 | refs/heads/main | 2023-08-01T08:32:16.515308 | 2021-09-21T23:41:00 | 2021-09-21T23:41:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from restfly.endpoint import APIEndpoint
class ActivationAPI(APIEndpoint):
def status(self):
"""
Returns the activation status for a configuration change.
Returns:
:obj:`str`
Configuration status.
Examples:
>>> config_status = zia.config.status()
"""
return self._get("status").status
def activate(self):
"""
Activates configuration changes.
Returns:
:obj:`str`
Configuration status.
Examples:
>>> config_activate = zia.config.activate()
"""
return self._post("status/activate").status
| [
"me@mitchkelly.com.au"
] | me@mitchkelly.com.au |
9d14284a90e6506b27414715f3d2e4972a67bb2b | cb1d2c5c8bce3a54c0d282182efc0b5c2811891f | /Code.py | a25e455a56f2b495c70424c84b14d4208117865e | [] | no_license | surajky/weighted-tf-idf | 8c4cd38bbbca4ba7c7e97d570e77b28326331ab3 | ffa79da10d7d30c5ccbc868133d29bd2824459dc | refs/heads/master | 2021-04-09T15:43:15.147470 | 2018-03-18T16:31:28 | 2018-03-18T16:31:28 | 125,744,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | import math
"""Frequency of words in each Document"""
documents= [[1,0,5,5,3],
[0,1,2,5,5],
[1,3,0,3,4],
[3,2,1,4,5],
[4,0,1,5,0]]
query=[4,2,1,3,0]
"""TF"""
temp_documents=documents
temp_query=query
for i in range(0,5):
sum=0
for j in range(0,5):
sum=sum+temp_documents[i][j]
for k in range(0,5):
temp_documents[i][k]=temp_documents[i][k]/sum
"""for query"""
sum=0
for i in range(0,5):
sum=sum+temp_query[i]
for i in range(0,5):
temp_query[i]=temp_query[i]/sum
"""idf"""
idf=[0,0,0,0,0]
count=0
for i in range(0,5):
count=0
for j in range(0,5):
if(temp_documents[j][i]!=0):
count=count+1
idf[i]=math.log2(5/count)
"""tf*idf"""
for i in range(0,5):
for j in range(0,5):
temp_documents[i][j]=(temp_documents[i][j]*idf[j])
for i in range(0,5):
temp_query[i]=temp_query[i]*idf[i]
rank=[0,0,0,0,0]
"""distance"""
sum2=0
for i in range(0,5):
sum=0
sum2=0
for j in range(0,5):
sum=sum+temp_documents[i][j]*temp_query[j]
sum2=sum2+(temp_documents[i][j]-temp_query[j])**2
sum2=math.sqrt(sum2)
sum=sum/sum2
rank[i]=sum
temp_rank=[0,0,0,0,0]
for i in range(0,5):
temp_rank[i]=rank[i]
list.sort(temp_rank,reverse=True)
print ("Rank of pages according to similarity of query to Documents: ")
for i in range(0,5):
for j in range(0,5):
if(temp_rank[i]==rank[j]):
print('Doc ' + str(j+1))
| [
"noreply@github.com"
] | noreply@github.com |
04c39588a75c7d1646fb96aeb656bbb9548a976f | c1b56d50c68bf32e900349cbab4bfd043a79a237 | /Pythagorean Triplet.py | 231f1b5449311249ea7648796d95434b151ff9d6 | [] | no_license | divanshu79/GeeksForGeeks-solutions | c7a5f0be04e8376e72f933c35fb2d09641fe7130 | caf77aad9c53d5d05c87318806097d750864a6e3 | refs/heads/master | 2020-03-25T07:56:14.997786 | 2018-08-05T06:37:22 | 2018-08-05T06:37:22 | 143,589,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | from collections import defaultdict
for _ in range(int(input())):
n = int(input())
arr = list(map(int, input().split()))
def_dict = defaultdict(int)
sq_list = []
for i in arr:
def_dict[i*i] = 1
sq_list.append(i*i)
sum_list = []
flag = 0
for i in range(n-1):
for j in range(i+1, n):
if def_dict[sq_list[i] + sq_list[j]] == 1:
flag = 1
print(arr[i], arr[j])
break
if flag == 1:
break
if flag == 1:
print('Yes')
else:
print('No') | [
"noreply@github.com"
] | noreply@github.com |
29eaf7dca764f8db0e109f82e350645c5ee1f812 | c741f04141784a2571d2d27d95e0d994e4584ab1 | /learning/py3/连接mysql/PyMySQL/test3.py | f72ccb6eb48887eb51cf2b269456a0e175b90e48 | [] | no_license | haodonghui/python | bbdece136620bc6f787b4942d6e1760ed808afd4 | 365062ba54297c81093b7f378742e76d438658b7 | refs/heads/master | 2022-02-03T23:52:37.288503 | 2022-01-27T05:23:25 | 2022-01-27T05:23:25 | 191,729,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from pythonmysql3 import DB
if __name__ == '__main__':
with DB(host='59.110.228.110', port=3306, database='test_tea_uc_0', user='test_tea_uc_0',
passwd='L~+SJ*F^kon[t+10l6') as db:
db.execute('select * from uc_user limit 0,10')
print(db)
for i in db:
print(i)
| [
"haodonghui@yestae.com"
] | haodonghui@yestae.com |
9486d6ebacf93445e04c3afaaa25288af94b06d0 | 1313fb47b394b48d28830d09bb91aade3db3523c | /style_transfer2.py | 01687f90ebee5e21a76cf4ffaabe980c1f6a7ea3 | [] | no_license | mbalali/CNN | 68c5e7787ae4958b16e304901afa0d7d5df77a1a | f8551a105236174a7205146e104c757c72425345 | refs/heads/main | 2023-02-25T01:55:51.277523 | 2021-02-01T16:11:05 | 2021-02-01T16:11:05 | 335,006,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,261 | py | # https://deeplearningcourses.com/c/advanced-computer-vision
# https://www.udemy.com/advanced-computer-vision
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
# In this script, we will focus on generating an image
# with the same style as the input image.
# But NOT the same content.
# It should capture only the essence of the style.
from keras.models import Model, Sequential
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.applications.vgg16 import VGG16
from style_transfer1 import VGG16_AvgPool, unpreprocess, scale_img
# from skimage.transform import resize
from scipy.optimize import fmin_l_bfgs_b
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import keras.backend as K
def gram_matrix(img):
# input is (H, W, C) (C = # feature maps)
# we first need to convert it to (C, H*W)
X = K.batch_flatten(K.permute_dimensions(img, (2, 0, 1)))
# now, calculate the gram matrix
# gram = XX^T / N
# the constant is not important since we'll be weighting these
G = K.dot(X, K.transpose(X)) / img.get_shape().num_elements()
return G
def style_loss(y, t):
return K.mean(K.square(gram_matrix(y) - gram_matrix(t)))
# let's generalize this and put it into a function
def minimize(fn, epochs, batch_shape):
t0 = datetime.now()
losses = []
x = np.random.randn(np.prod(batch_shape))
for i in range(epochs):
x, l, _ = fmin_l_bfgs_b(
func=fn,
x0=x,
maxfun=20
)
x = np.clip(x, -127, 127)
print("iter=%s, loss=%s" % (i, l))
losses.append(l)
print("duration:", datetime.now() - t0)
plt.plot(losses)
plt.show()
newimg = x.reshape(*batch_shape)
final_img = unpreprocess(newimg)
return final_img[0]
if __name__ == '__main__':
# try these, or pick your own!
path = 'styles/starrynight.jpg'
# path = 'styles/flowercarrier.jpg'
# path = 'styles/monalisa.jpg'
# path = 'styles/lesdemoisellesdavignon.jpg'
# load the data
img = image.load_img(path)
# convert image to array and preprocess for vgg
x = image.img_to_array(img)
# look at the image
# plt.imshow(x)
# plt.show()
# make it (1, H, W, C)
x = np.expand_dims(x, axis=0)
# preprocess into VGG expected format
x = preprocess_input(x)
# we'll use this throughout the rest of the script
batch_shape = x.shape
shape = x.shape[1:]
# let's take the first convolution at each block of convolutions
# to be our target outputs
# remember that you can print out the model summary if you want
vgg = VGG16_AvgPool(shape)
# Note: need to select output at index 1, since outputs at
# index 0 correspond to the original vgg with maxpool
symbolic_conv_outputs = [
layer.get_output_at(1) for layer in vgg.layers \
if layer.name.endswith('conv1')
]
# pick the earlier layers for
# a more "localized" representation
# this is opposed to the content model
# where the later layers represent a more "global" structure
# symbolic_conv_outputs = symbolic_conv_outputs[:2]
# make a big model that outputs multiple layers' outputs
multi_output_model = Model(vgg.input, symbolic_conv_outputs)
# calculate the targets that are output at each layer
style_layers_outputs = [K.variable(y) for y in multi_output_model.predict(x)]
# calculate the total style loss
loss = 0
for symbolic, actual in zip(symbolic_conv_outputs, style_layers_outputs):
# gram_matrix() expects a (H, W, C) as input
loss += style_loss(symbolic[0], actual[0])
grads = K.gradients(loss, multi_output_model.input)
# just like theano.function
get_loss_and_grads = K.function(
inputs=[multi_output_model.input],
outputs=[loss] + grads
)
def get_loss_and_grads_wrapper(x_vec):
l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)])
return l.astype(np.float64), g.flatten().astype(np.float64)
final_img = minimize(get_loss_and_grads_wrapper, 10, batch_shape)
plt.imshow(scale_img(final_img))
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
dda99e7ed45e747f0f612816bdf88ea51861dc51 | fd4dbf34dd2197d7429c6da99ae54cf0f2529934 | /jaden/jaden-pairbot/src/pairbot/pb_control/catkin_generated/generate_cached_setup.py | 1d72b28bc20771c5932ad7ccbe58eb5c50660419 | [] | no_license | PAIRLABS/InfoExchange | ed80a92f3b0c7528d93172284b6f218368f57b98 | 905c20312bdf8b93f6615cb9bb75562006061315 | refs/heads/master | 2022-02-17T14:47:20.551203 | 2019-08-30T01:48:12 | 2019-08-30T01:48:12 | 153,047,928 | 0 | 4 | null | 2019-08-30T06:34:02 | 2018-10-15T03:26:39 | Makefile | UTF-8 | Python | false | false | 1,355 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/indigo/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/indigo/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/somal/catkin_ws/devel;/opt/ros/indigo".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/somal/catkin_ws/src/gazebo_ros_demos/pb_control/devel/env.sh')
output_filename = '/home/somal/catkin_ws/src/gazebo_ros_demos/pb_control/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"jadenlin0106@nctu.edu.tw"
] | jadenlin0106@nctu.edu.tw |
55b12e55ecbc1f8f8ad159318d84990b6c4a6862 | 09978275a3edf0ac65e271692e383efde69c2f1c | /upload_to_s3.py | c069aecc01f2375f690180e2a2a43f681ced7d9f | [] | no_license | tekurioptions/Test | e0d17a1b89e0e7c9beccb600daab9f836d5e7d4b | 90b2558598ef9637a038ea8a01590f848a2eb5a0 | refs/heads/master | 2021-01-19T15:23:40.690317 | 2020-02-03T07:34:28 | 2020-02-03T07:34:28 | 100,965,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | from datetime import date, timedelta, datetime
from airflow import DAG
from airflow.operators import DummyOperator, PythonOperator
import boto3
s3 = boto3.resource('s3')
def upload_file_to_S3(filename, key, bucket_name):
s3.Bucket(bucket_name).upload_file(filename, key)
DAG_DEFAULT_ARGS = {
'owner': 'Nagendra',
'depends_on_past': False,
'retries': 1,
'retry_delay': timedelta(minutes=1)
}
dag = DAG('upload_to_s3', description='Simple S3 Test',
start_date=datetime(2019, 4, 4),
schedule_interval='@once',
default_args=DAG_DEFAULT_ARGS, catchup=False)
start_task = DummyOperator(task_id='dummy_task', retries=3, dag=dag)
upload_to_S3_task = PythonOperator(
task_id='upload_to_S3',
python_callable=upload_file_to_S3,
op_kwargs={
'filename': '/root/airflow-files/input/data.csv',
'key': 'my_S3_data.csv',
'bucket_name': 'nagdeep',
},
dag=dag)
start_task >> upload_to_S3_task | [
"noreply@github.com"
] | noreply@github.com |
bc2ec15906048fc42b645664a4552aa614fffaec | 4cbe0eef8694a7f5443e6d276577d3ca08d15456 | /cpt1/noneLenDemo.py | a713e854c6074bac6033c4576a506fd818583169 | [] | no_license | GSIL-Monitor/PythonLearning | 2bf313e366e395df1d27164fe79e16e948094583 | 3f20f9cdff1cef368baa6a2374e6b2cbe3871aa4 | refs/heads/master | 2020-04-19T09:11:45.169704 | 2018-11-28T09:55:01 | 2018-11-28T09:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | s=None
s1 = ''
s2 = ' '
print(len(s1))
print(len(s2))
print(len(s2.strip()))
# print(len(s))
t1 = t2 = t3 = None
print(t1, t2, t3)
| [
"249398363@qq.com"
] | 249398363@qq.com |
ace0305e6fa31b4f5385cfb572cd9b92098c8708 | e76a77c1338fde64dcde1376b06b247033d023f1 | /week9/movies/media.py | 01537f0e7c312b1a293eac692dd7dd1ab9e07712 | [] | no_license | keven/python-class | 88fa01d40562b8316e2648aa55ed76ddf34c382b | 40e8fe293d83192e808ceeb33735555c8559ee9b | refs/heads/master | 2021-08-22T20:27:53.739298 | 2017-12-01T06:19:52 | 2017-12-01T06:19:52 | 106,586,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import webbrowser
class Movie():
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
# def show_trailer(self):
# webbrowser.open(self.trailer_youtube_url)
| [
"kevenlin@kevens-mbp.lan"
] | kevenlin@kevens-mbp.lan |
e1261563d4276984a49fdc653c96e81c17f4a920 | 8fb4541f884be2c83ce76d5e2deb3d4d13ef4740 | /raisingcustomexception.py | 9be4ba1354f84ce6bde06f3e2069e5aeabf2cf47 | [] | no_license | jrajpal5-singularity/python_basics | b3bbf84af1e3aa69489986b703dc6c84e6b7ed24 | 580d9c4667ef0f2e276a6d46e2899e1ed52b5b7f | refs/heads/master | 2023-07-16T18:39:06.832280 | 2021-09-10T11:59:24 | 2021-09-10T11:59:24 | 405,064,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | class CoffeeTooHotException(Exception):
def __init__(self, msg):
super().__init__(msg)
class CoffeeTooColdException(Exception):
def __init__(self, msg):
super().__init__(msg)
class CoffeeCup:
def __init__(self, temperature):
self.__temperature = temperature
def drink_coffee(self):
if self.__temperature > 85:
print('coffee too hot')
raise CoffeeTooHotException('coffee temp.:' + str(self.__temperature))
elif self.__temperature < 65:
print('coffee too cold')
raise CoffeeTooColdException('coffee temp.:' + str(self.__temperature)) # any exception found in builtins import found in console
else:
print('coffee ok to drink')
cup = CoffeeCup(100)
cup.drink_coffee()
| [
"jrajpal5@gmail.com"
] | jrajpal5@gmail.com |
cc5d314df9df7ab8de8aebd5c5e453529818409f | 2d4e0dd8d1d6d06ed2ba80ad23be92ceec6bb4f5 | /learning_scripts/sum.py | d65e3f4ed61e47d9af0999cb01a43e16aef1d824 | [] | no_license | ashwinids/python | 573f50defde3aae994c2b32e62b7292be8d08e68 | a5891c37496f75f1af6adefa7b45d7914c92a689 | refs/heads/master | 2021-01-16T00:09:30.440296 | 2017-08-10T21:46:06 | 2017-08-10T21:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | #!/usr/bin/env python
def sum(a,b):
sum = a + b
print(sum)
a = float(raw_input("a:"))
b = float(raw_input("b:"))
sum(a,b)
| [
"asiddappa@apple.com"
] | asiddappa@apple.com |
aeb415f66f3579ca75d34caf48ab0a0ed47746c9 | c5bd5dd4f340b38f92f559909941428f242aaacd | /project1/wsgi.py | e7233a00b97621c13de83d6fdd168a57563a2206 | [] | no_license | nadhiya05/General-form-element | 5f00454a92de5a566ddbc56d1c8fe03bcab8d5ae | e87368bb751486beec7712daa6455fb405a40e99 | refs/heads/main | 2023-03-10T01:46:32.956855 | 2021-02-25T11:26:31 | 2021-02-25T11:26:31 | 339,750,571 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for project1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project1.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
1d1c6159d39366e7b2130cca2ed83d36fab067c6 | c96c79bb7ca3e71d609eab20ed8d68cff8ee7fe7 | /DataStructurePrograms/bankingCashCounter.py | 0049a83ecb8431b52e5fdb75741a8707cd5863a8 | [] | no_license | NikhilDusane222/Python | 25c9eb50bcd5e0e8679ece41d97129b9100e9a91 | 0183c4211a28bbddb6792978cf55da89a682f67a | refs/heads/master | 2021-05-18T13:07:07.059428 | 2020-04-12T17:23:57 | 2020-04-12T17:23:57 | 251,254,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | #Class Queue
class Queue:
def __init__(self):
self.balance = 0
print("Welcome to the Bank Cash Counter..")
print("This is a Banking portal")
#Function for deposite amount
def enqueue_deposit(self):
amount = int(input("Enter amount to be Deposited: "))
self.balance += amount
print("\nAmount Deposited:", amount)
#Function for withdraw amount
def dequeue_withdraw(self):
amount = int(input("Enter amount to be Withdrawn: "))
if self.balance >= amount:
self.balance -= amount
print("\nYou Withdrew:", amount)
else:
print("\nInsufficient balance ")
#Function for display amount
def queue_display(self):
print("\nNet Available Balance=", self.balance)
#Function for exit
def queue_exit(self):
exit()
#Main function
if __name__ == '__main__':
q = Queue()
try:
while True:
print("Please Enter the option that you want to make a transaction:")
#Choice for Deposite and Withdrawn amount
choiceNo = int(input(
" 1. Deposite Amount to the account \n 2. Withdraw Amount from the account \n "
"3. Display the amount \n 4. Cancel Transaction \n"))
if choiceNo == 1:
q.enqueue_deposit()
elif choiceNo == 2:
q.dequeue_withdraw()
elif choiceNo == 3:
q.queue_display()
elif choiceNo == 4:
q.queue_exit()
else:
print("Invalid Choice...!! Press the Correct choice")
except ValueError:
print("Invalid Choice...!! Press the Correct choice")
| [
"you@example.com"
] | you@example.com |
9e978584667f84859833c2ca627d42924dc03934 | a52dbccf7ad8ad0089161cea27a6affdaeb078ac | /application/admin.py | 960ee963788f709bfe5dcab70dd0cd9f90aff31e | [] | no_license | Ganeshrawat/django_Elibrary | ea28b3231d808aa394e60be8e3da136043d5c793 | b1b77bb98c6a9cc66855d52a3cda4be1b2c2757e | refs/heads/main | 2023-03-14T21:11:17.989918 | 2021-03-24T03:16:20 | 2021-03-24T03:16:20 | 350,924,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from django.contrib import admin
from .models import library
# Register your models here.
admin.site.register(library)
| [
"noreply@github.com"
] | noreply@github.com |
50f59c198771c869cd56e7fc4b30392e3a12eca6 | d4f18631298b98ea6115a6458b625aec44de76ca | /util.py | ec2f5b4e288729cb49e99a1d3fa951ca84faa311 | [] | no_license | sarkaaa/drbd-manager | a48a6fca72b9445b78d8521906a70292e3356e11 | f56d303993f5c24d025b7271866ecfa30b42400f | refs/heads/master | 2020-06-27T05:02:41.808145 | 2011-07-21T21:48:55 | 2011-07-21T21:48:55 | 97,047,504 | 0 | 0 | null | 2017-07-12T20:05:41 | 2017-07-12T20:05:41 | null | UTF-8 | Python | false | false | 3,035 | py | #!/usr/bin/env python
import os, sys, time, socket, traceback, subprocess
log_f = os.fdopen(os.dup(sys.stdout.fileno()), "aw")
pid = None
def reopenlog(log_file):
global log_f
if log_f:
log_f.close()
if log_file:
log_f = open(log_file, "aw")
else:
log_f = os.fdopen(os.dup(sys.stdout.fileno()), "aw")
def log(txt):
global log_f, pid
if not pid:
pid = os.getpid()
t = time.strftime("%Y%m%dT%H:%M:%SZ", time.gmtime())
print >>log_f, "%s [%d] %s" % (t, pid, txt)
log_f.flush()
class CommandError(Exception):
def __init__(self, code, output):
self.code = code
self.output = output
def __str__(self):
return "CommandError(%s, %s)" % (self.code, self.output)
# [run task cmd] executes [cmd], throwing a CommandError if exits with
# a non-zero exit code.
def run(cmd, task='unknown'):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = p.stdout.readlines()
retval = p.wait ()
if retval <> 0:
log("%s: %s exitted with code %d: %s" % (task, repr(cmd), retval, repr(result)))
raise(CommandError(retval, result))
log("%s: %s" % (task, " ".join(cmd)))
return result
def run_as_root(cmd, task='unknown'):
if os.geteuid() <> 0:
cmd = [ "sudo" ] + cmd
return run(cmd, task)
import tempfile
def make_sparse_file(size):
fd, filename = tempfile.mkstemp(suffix='.md')
os.fdopen(fd).close()
run(["dd", "if=/dev/zero", "of=%s" % filename, "bs=1", "count=0", "seek=%Ld" % size])
return filename
def block_device_sector_size(disk):
return int(run_as_root(["blockdev", "--getss", disk])[0].strip())
def block_device_sectors(disk):
return long(run_as_root(["blockdev", "--getsize", disk])[0].strip())
import re
def list_all_ipv4_addresses():
results = []
for line in run(["/sbin/ifconfig"]):
m = re.match('^\s*inet addr:(\S+) ', line)
if m:
results.append(m.group(1))
return results
def replication_ip():
# XXX we need to define storage, replication IPs officially somehow
return filter(lambda x:x <> "127.0.0.1", list_all_ipv4_addresses())[0]
def used_ports(ip):
"""Return a list of port numbers currently in-use."""
used = []
for line in run(["/bin/netstat", "-an"]):
m = re.match('^tcp\s+\S+\s+\S+\s+(\S+)\s+', line)
if m:
endpoint = m.group(1)
bits = endpoint.split(':')
if bits[0] == ip:
used.append(bits[1])
return used
def replication_port(ip):
"""Returns a port number which is currently free. Note someone else
may come along and allocate this one for us, so we have to be prepared
to retry."""
free_port = 7789
used = used_ports(ip)
while True:
if free_port not in used:
return free_port
free_port = free_port + 1
def read_file(filename):
f = open(filaname, "r")
try:
return f.readlines()
finally:
f.close()
| [
"dave.scott@eu.citrix.com"
] | dave.scott@eu.citrix.com |
f94e3678559823245c66e0d7d213c4425c930af9 | 46abc544bb046574fdb36b61161ff53ec813eeb7 | /main.py | c9fe398212449bfc6f9d5b959b28b00022a3f603 | [] | no_license | Wayeet/API-Fritzconnect | 6ce90cf0de839e854322ac91f71638d11570cc1a | 2e2d2e320bbddecc539a6bee493f5dbc0f7913b1 | refs/heads/main | 2023-06-20T08:45:34.802422 | 2021-07-24T13:06:57 | 2021-07-24T13:06:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | import api
if __name__ == "__main__":
api.run_api()
| [
"admin@orc-industries.de"
] | admin@orc-industries.de |
c49f65caddf0e7266d20aa73511179daeb6c0f53 | 808f60d31bf42e8e4ad1da5e38a9b2672475697f | /bin/rule_based_speaker/overtakeController.py | 8bf0ee6dc33d9865950ef3679f12d7474566d6d2 | [] | no_license | MLCS-Yonsei/Speaker | 14a0e070a8bc46cf7492bd4d4203048c376c05ef | d74aa8c218de71c3eccfc669ec4c8e145e7a9d73 | refs/heads/master | 2022-11-24T23:51:43.216388 | 2018-11-14T07:01:21 | 2018-11-14T07:01:21 | 146,603,099 | 0 | 0 | null | 2022-11-22T01:13:09 | 2018-08-29T13:22:02 | Python | UTF-8 | Python | false | false | 4,291 | py | import subprocess
import multiprocessing as mp
from threading import Thread
from multiprocessing import Pool
from queue import Empty
import time
import datetime
import os
import signal
import sqlite3
import redis
class overtakeChecker(mp.Process):
def __init__(self,que,r,target_ip):
# [공통] 기본설정
super(overtakeChecker,self).__init__()
self.event = mp.Event()
self.queue = que
self.r = r
self.target_ip = target_ip
self.channels = self.r.pubsub()
self.channels.subscribe(self.target_ip)
# Variables
self.r0_t0 = 0
self.c = False
self.status = False
def get_rank(self, data):
ranks = [info['mRacePosition'] for info in data["participants"]["mParticipantInfo"]]
return ranks
def get_sim_name(self, target_ip, gamedata):
participants = gamedata['participants']['mParticipantInfo']
# DB for config
conn = sqlite3.connect("./config/db/test.db")
cur = conn.cursor()
# Getting Simulator info
cur.execute("select * from simulators")
_sims = cur.fetchall()
# Connection 닫기
conn.close()
target_name = False
for sim in _sims:
if sim[0] == target_ip:
target_name = sim[1]
if target_name:
for i, p in enumerate(participants):
if p['mName'] == target_name:
return i
return False
def run(self):
while True:
# time.sleep(0.1)
message = self.r.hget(self.target_ip,'msg')
# self.r.hdel(self.target_ip,'msg')
if message:
data = eval(message)
gamedata = data['gamedata']
current_time = data['current_time']
# Codes
if "participants" in gamedata:
sim_index = self.get_sim_name(self.target_ip,gamedata)
lap_length = gamedata["eventInformation"]["mTrackLength"] # 랩 길이
lap_completed = gamedata["participants"]["mParticipantInfo"][sim_index]["mLapsCompleted"]
lap_distance = gamedata["participants"]["mParticipantInfo"][sim_index]["mCurrentLapDistance"] + lap_length * lap_completed
if lap_distance > 10:
ranks = self.get_rank(gamedata)
if len(ranks) > 1:
r0_t1 = ranks[sim_index]
if self.r0_t0 != 0:
if self.r0_t0 > r0_t1:
# Overtaked
print(self.target_ip,'추월')
self.c = ranks.index(r0_t1 + 1)
self.status = True
elif self.r0_t0 < r0_t1:
# Overtaken
print(self.target_ip,'추월당함')
self.c = ranks.index(r0_t1 - 1)
self.status = False
else:
self.c = False
if self.c:
c_name = gamedata["participants"]["mParticipantInfo"][self.c]["mName"]
current_time = str(datetime.datetime.now())
result = {}
result['current_time'] = current_time
result['target_ip'] = self.target_ip
result['flag'] = 'overtake'
result['data'] = {
'status': self.status,
'rank': r0_t1
}
self.r.hdel(self.target_ip,'msg')
self.r.hset(self.target_ip, 'results', result)
self.r0_t0 = r0_t1
def stop(self):
self.event.set()
self.join()
| [
"hwanmooy@gmail.com"
] | hwanmooy@gmail.com |
cc45a6551fb363d98c64fb79d9975c2773d2c81d | 2744da2550b68314c0e45b115e60f302619e4d12 | /hefen_python/result/result_statistic.py | 2cc4baa0c52d00b02442d6f1ca5266d48496b653 | [] | no_license | zhongwuzw/home_broadband-line | 41177030ca08a216eb4737a2113459ab02d40cc6 | 99c0805c62984366f4b18ab029887bd01fe2d788 | refs/heads/master | 2020-05-21T16:45:45.664073 | 2017-12-04T08:58:11 | 2017-12-04T08:58:11 | 60,708,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,589 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import pymysql.cursors
# 命令行参数顺序为:日期、能力、小时,如 python result_statistics.py http 20170404 2017040415
# or python result_statistics.py http 20170404
def insertListToMysqlDB(year_month, day, num, kind, isDay, org = "", projectID = "", appID = ""):
statisticsDatabase = pymysql.connect(host='192.168.92.111', port=3306, user='root', password='gbase',
db='test',
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
try:
with statisticsDatabase.cursor() as cursor:
sql = "INSERT INTO `receive_stastics` (`id`, `year_month`, `day`, `org`, `projectID`,`appID`, `num`, `kind`, `isDay`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
cursor.execute(sql,("0",year_month,day,org,projectID,appID,num,kind,isDay))
statisticsDatabase.commit()
finally:
statisticsDatabase.close()
if len(sys.argv) < 3 :
print "参数个数不够"
exit()
kind = sys.argv[1]
year_month = sys.argv[2]
isDay = 1
hour = ""
hour_format_1 = ""
hour_format_2 = ""
hour_format_3 = ""
if len(sys.argv) == 4:
hour = sys.argv[3]
hour_format_1 = '| grep ' + hour + ' '
hour_format_2 = '| grep -o "\/[0-9][0-9][0-9][0-9]\/' + hour + '" '
hour_format_3 = '| grep -o "' + hour + '.*Default"'
hour_format_4 = '| grep -o "\/[0-9a-zA-Z]*\/' + hour + '"'
isDay = 0
else:
hour_format_1 = '| grep ' + year_month + ' '
hour_format_2 = '| grep -o "\/[0-9][0-9][0-9][0-9]\/' + year_month + '" '
hour_format_3 = '| grep -o "' + year_month + '.*Default"'
hour_format_4 = '| grep -o "\/[0-9a-zA-Z]*\/' + year_month + '"'
if kind == "all":
# 自然维 - 按照PrjCode统计
nature_prjcode_list = os.popen('cat /opt/ots/log/' + year_month + 'unzip.log |grep "summary.csv"' + hour_format_1 + '|grep -o "public\/[0-9][0-9][0-9][0-9][0-9][0-9]\/"|grep -o "[0-9][0-9][0-9][0-9][0-9][0-9]"| sort|uniq -c').readlines()
# 自然维 - 按照APPID统计
nature_appid_list = os.popen('cat /opt/ots/log/' + year_month + 'unzip.log |grep "summary.csv"|grep mnt' + hour_format_2 + '|grep -o "\/[0-9][0-9][0-9][0-9]\/"|grep -o "[0-9][0-9][0-9][0-9]"|sort|uniq -c').readlines()
# 非自然维 - 按照orgKey统计
nonnature_orgkey_list = os.popen('cat /home/ots_8181/log/' + year_month + 'unzip.log |grep "summary.csv"' + hour_format_3 + '|grep -o "\/CMCC.*\/" |sort|uniq -c').readlines()
# 非自然维 - 按照APPID统计
nonnature_appid_list = os.popen('cat /home/ots_8181/log/' + year_month + 'unzip.log |grep "summary.csv"' + hour_format_4 + '| grep -o "\/[0-9a-zA-Z]*\/"|grep -o "[0-9a-zA-Z]*"|sort|uniq -c').readlines()
else:
# 自然维 - 按照PrjCode统计
nature_prjcode_list = os.popen('cat /opt/ots/log/' + year_month + 'unzip.log |grep "summary.csv"' + hour_format_1 + '|grep "' + kind + '"|grep -o "public\/[0-9][0-9][0-9][0-9][0-9][0-9]\/"|grep -o "[0-9][0-9][0-9][0-9][0-9][0-9]"| sort|uniq -c').readlines()
# 自然维 - 按照APPID统计
nature_appid_list = os.popen('cat /opt/ots/log/' + year_month + 'unzip.log |grep "summary.csv"|grep mnt|grep "\/' + kind + '\/"' + hour_format_2 + '|grep -o "\/[0-9][0-9][0-9][0-9]\/"|grep -o "[0-9][0-9][0-9][0-9]"|sort|uniq -c').readlines()
# 非自然维 - 按照orgKey统计
nonnature_orgkey_list = os.popen('cat /home/ots_8181/log/' + year_month + 'unzip.log |grep "summary.csv"|grep "\/' + kind + '\/"' + hour_format_3 + '|grep -o "\/CMCC.*\/" |sort|uniq -c').readlines()
# 非自然维 - 按照APPID统计
nonnature_appid_list = os.popen('cat /home/ots_8181/log/' + year_month + 'unzip.log |grep "summary.csv"|grep "\/' + kind + '\/"' + hour_format_4 + '| grep -o "\/[0-9a-zA-Z]*\/"|grep -o "[0-9a-zA-Z]*"|sort|uniq -c').readlines()
def handle_shell_result(list, dimension) :
org = (dimension == "org")
projectID = (dimension == "projectID")
appID = (dimension == "appID")
for item in list:
split_list = item.split()
if len(split_list) > 1:
insertListToMysqlDB(year_month=year_month, day=hour, num=split_list[0], kind=kind, isDay=isDay, org=(split_list[1] if org else ""), projectID=(split_list[1] if projectID else ""), appID=(split_list[1] if appID else ""))
dimension = ("projectID", "appID", "appID", "org")
count = 0
for alist in (nature_prjcode_list, nature_appid_list, nonnature_appid_list, nonnature_orgkey_list):
handle_shell_result(alist, dimension[count])
count += 1 | [
"zhongwuzw@qq.com"
] | zhongwuzw@qq.com |
fe6e9af8a31baddb7805d28634bc057f5808ce14 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/hoework01/gettop10frommaoyam01_20200626132705.py | 8d63e58baefe42cbcb636035be65bad77d03b90f | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 4,579 | py | # 使用requests,bs4库,爬取猫眼电影top10的电影名称、电影类型、上映时间,并以utf-8的字符集保存到csv文件中
import requests
from bs4 import BeautifulSoup as bs
maoyanUrl = "https://maoyan.com/board/4";
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593100662316.1593100664951.15; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; mojo-session-id={"id":"435818e6a726415f46defffa27f7abc6","time":1593100221937}; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100665; mojo-trace-id=17; _lxsdk_s=172ec2bff67-0c2-e9f-c64%7C%7C24__mta=251934006.1593072991075.1593100690175.1593100868002.17; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100868; _lxsdk_s=172ee2f4a3e-1c2-3a1-5a4%7C%7C1__mta=251934006.1593072991075.1593133988033.1593140260525.19; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593134712257.1593134712989.9; mojo-session-id={"id":"b78cc9fcb57a627220ec165f84d9d5a9","time":1593140260318}; mojo-trace-id=1; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593140260; _lxsdk_s=172ee8f28d1-560-08-4aa%7C%7C3',
# 'Host' : 'http://www.baidu.com',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
def get_urls(url, headers):
response = requests.get(url,headers=header)
bs_info = bs(response.text,"html.parser")
import re
films_url = []
for tag in bs_info.find_all('div',):
for tag_p in tag.find_all('a',href=re.compile('/films/')) :
# 获取top10电影详情页链接
films_url.append(url + tag_p.get('href'))
urls = set(films_url)
return urls
import pandas
# 获取详情页
def get_page_info(self,urls,header):
films_content = []
for url in urls:
content = get_page_content(self,url,header)
films_content.append(content)
return films_content
# 获取单个电影的详情信息
def get_page_brief(url,header):
import re
response = requests.get(url, headers=header)
bs_info = bs(response.text,'html.parser')
# print(response.text)
atag = bs_info.find('div',attrs={'class':'banner'})
film_name = atag.find('h1').text +" "+ atag.find('div',attrs = {'class' : 'ename ellipsis'}).text
film_type = ""
for type in atag.find_all('a',attrs={'target':'_blank'}):
film_type = film_type + type.text
tags = atag.find_all('li')
online_time = tags[-1].text
brief = [film_name,film_type,online_time]
return brief
def save_movies(movies):
movies_data = pd.DataFrame(data=movies)
movies_data.to_csv('./top')
def main():
#urls = get_urls(maoyanUrl,header)
#contents = get_page_info(self,urls,header)
#print(urls)
page_1 = 'https://maoyan.com/films/1375'
brief = get_page_brief(page_1,header)
save_movies(movies)
print(brief)
if __name__ == '__main__':
main() | [
"31039587+ydbB@users.noreply.github.com"
] | 31039587+ydbB@users.noreply.github.com |
fb9d2de4608618a90483dce7880ec25859319581 | eb4070d3dda38df8b6d4118343db59d559e58df6 | /week-1/Examples/plot_bostonjuly2012temps.py | 7106e6e834e9c292ae22013b1fc5392a53e0f201 | [] | no_license | RaviTezu/MITx-6.00.2x | df767115085e4f28cfaac20ec90c18453517ed5a | 6effafa89e15e1d59c9302c4a3c9f6ce96da0faa | refs/heads/master | 2021-01-10T16:15:03.999778 | 2016-04-20T11:40:46 | 2016-04-20T11:40:46 | 53,061,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | from __future__ import print_function
import os
import pylab
# It is assumed that the 'julyTemps.txt' file is present along the side of this script and this script is
# executed at the root.
PWD = os.getcwd()
FILE_NAME = 'julyTemps.txt'
FILE = PWD + '/' + FILE_NAME
HIGH = []
LOW = []
def load_file(inFile=FILE):
return open(inFile, 'r')
def read_data(fd=load_file()):
for line in fd.readlines():
fields = line.split()
if len(fields) < 3 or not fields[0].isdigit():
pass
else:
HIGH.append(fields[1])
LOW.append(fields[2])
def calculate_diff(high=HIGH, low=LOW):
diff_temps = [int(h) - int(l) for h, l in zip(high, low)]
return diff_temps
def plotting(diff_temps):
length = len(diff_temps)
print(length)
pylab.figure(1)
pylab.title('Day by Day Ranges in Temperature in Boston in July 2012')
pylab.xlabel('Days')
pylab.ylabel('Temperature Ranges')
pylab.plot(range(1, length + 1), diff_temps)
pylab.show()
if __name__ == "__main__":
read_data()
plotting(calculate_diff())
| [
"ravi-teja@live.com"
] | ravi-teja@live.com |
bcfb6d795ad0b60febd1d19dad0b815dc763f820 | 521d68b39681253ad39e7254968a8482f28ba391 | /apis/setOrder.py | da57f268899240cf1cbef06e4de171588c2984dc | [] | no_license | unChae/python_ats | 14cca2a07a5629cd2def9e3d428866d9da6a1414 | 0d3d8e9c5ccfc807076dfb24a6084e325b6e17ec | refs/heads/master | 2023-07-21T23:49:35.256075 | 2021-03-27T06:38:17 | 2021-03-27T06:38:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | import os
import jwt
import uuid
import hashlib
from urllib.parse import urlencode
import requests
# call .env
from dotenv import load_dotenv
sys.path.append("/root/ats")
from create_log import createLogs
#response
sys.path.append("/root/ats/flask")
from make_response import cus_respones
load_dotenv(verbose=True)
access_key = os.getenv('ACCESS_KEY')
secret_key = os.getenv('SECRET_KEY')
server_url = os.getenv('URL')
#bid 매수 ask 매도
# query = {
# 'market': 'KRW-BTT',
# 'side': 'bid',
# 'volume': '1',
# 'price': '5000.0',
# 'ord_type': 'price',
# }
# query_string = urlencode(query).encode()
# m = hashlib.sha512()
# m.update(query_string)
# query_hash = m.hexdigest()
# payload = {
# 'access_key': access_key,
# 'nonce': str(uuid.uuid4()),
# 'query_hash': query_hash,
# 'query_hash_alg': 'SHA512',
# }
# jwt_token = jwt.encode(payload, secret_key)
# authorize_token = 'Bearer {}'.format(jwt_token)
# headers = {"Authorization": authorize_token}
# res = requests.post(server_url + "/v1/orders", params=query, headers=headers)
# print(res.json())
#매수
def buy_order(coin, price):
query = {
'market': coin,
'side': 'bid',
'price': price,
'ord_type': 'price',
}
query_string = urlencode(query).encode()
m = hashlib.sha512()
m.update(query_string)
query_hash = m.hexdigest()
payload = {
'access_key': access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = jwt.encode(payload, secret_key)
authorize_token = 'Bearer {}'.format(jwt_token)
headers = {"Authorization": authorize_token}
res = requests.post(server_url + "/v1/orders", params=query, headers=headers)
createLogs(1, order+" by "+coin)
return cus_respones(200, "ok", res)
#매도
def sell_order(coin, volume):
query = {
'market': coin,
'side': 'ask',
'volume': volume,
'ord_type': 'market',
}
query_string = urlencode(query).encode()
m = hashlib.sha512()
m.update(query_string)
query_hash = m.hexdigest()
payload = {
'access_key': access_key,
'nonce': str(uuid.uuid4()),
'query_hash': query_hash,
'query_hash_alg': 'SHA512',
}
jwt_token = jwt.encode(payload, secret_key)
authorize_token = 'Bearer {}'.format(jwt_token)
headers = {"Authorization": authorize_token}
res = requests.post(server_url + "/v1/orders", params=query, headers=headers)
createLogs(1, order+" by "+coin)
return cus_respones(200, "ok", res) | [
"darrun45@gmail.com"
] | darrun45@gmail.com |
d21a1e0fda886e68b04b7b6fb2aae7d62a280eea | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit1046.py | 63eee4eecc3a2149468ba16560b7bb2f0123e5f6 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,157 | py | # qubit number=5
# total number=51
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.cx(input_qubit[1],input_qubit[0]) # number=45
prog.z(input_qubit[1]) # number=46
prog.h(input_qubit[0]) # number=48
prog.cz(input_qubit[1],input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1046.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
f8240bf750062c3d98ef0e95fe1b2b65a774ae4b | 6d82e1deb77d7beee2605e7bac4f009783c9aac3 | /Alferov_Aleksandr_dz_3/task_3_1.py | 15a908c796513bee6012eb09f4e2cc4a09bfc000 | [] | no_license | selfish-skunk/gb_homework | 5df452cf9aab03fa97297d5ea5ebd3601d97a806 | 1dfa0b7937beb3e6af474cb4416005106c9fe35a | refs/heads/main | 2023-06-03T19:14:45.600448 | 2021-06-25T16:30:37 | 2021-06-25T16:30:37 | 369,659,381 | 0 | 0 | null | 2021-05-23T00:17:16 | 2021-05-21T21:38:15 | null | UTF-8 | Python | false | false | 625 | py | # 1. Написать функцию num_translate(), переводящую числительные от 0 до 10 c английского на русский язык.
# ヽ(ˇヘˇ)ノ
def num_translate(num, dict):
print(dict.get(num))
word_book = {
'zero': 'ноль',
'one': 'один',
'two': 'два',
'three': 'три',
'four': 'четыре',
'five': 'пять',
'six': 'шесть',
'seven': 'семь',
'eight': 'восемь',
'nine': 'девять',
'ten': 'десять'
}
digit = input('Ввод пользователя: ')
num_translate(digit, word_book) | [
"selfish.skunk@gmail.com"
] | selfish.skunk@gmail.com |
1ec2324207c0632e45795f7b7729bc5d37c4bc78 | a657f94afbefe55ec883bcc351fdba9136d8638b | /euler.py | 4ef9d006c8a91b3c3b12e449e20eb85c69b26c2b | [] | no_license | ertosns/euler | 1ee3bb188b77d514c800b802b5a16322effaed7e | 26dc468cd31f5b504cf35da5d97598eb36f1d73a | refs/heads/master | 2021-08-09T02:25:40.941503 | 2017-09-17T22:30:30 | 2017-09-17T22:30:30 | 103,866,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | '''
Author ertosns
Date 23/8/2017
euler challenge solutions.
'''
import math
def palindrome(int_p):
str_p = str(int_p)
l = len(str_p)
for i in range(l/2):
if str_p[i] != str_p[l-i-1]:
return False
return True
#todo enhance
def prime(n):
if n < 1:
return False
elif n<=3 :
return True
elif not n%2 or not n%3:
return False
i = 5;
while i <= math.sqrt(n):
if not n%(i-2) and not n%i and not n%(i+2):
return False
i+=6
return True
def get_nth_prime(n):
i = 1
pcount = 0
while True:
if prime(i):
pcount+=1
if pcount == n:
return i
i+=1
#sieve of erathostenes
#Complexity: O(Nlog(N)log(N))
#memorty: O(N)
#optimization:use segmanted algorithms for better memory performance for large enough number.
def eratosthenes(N):
primes = []
cnt = 0
sieve = [True for num in range(N+1)]
trav = 2
ind = 0
while math.pow(trav,2) <= N: #traversal [2,3,4, ..
inc = 0
while True: #incremental
ind = int(math.pow(trav,2)+inc*trav)
if (ind > N):
break
sieve[ind-1] = False
inc+=1
trav+=1
for num in sieve:
cnt+=1
if num:
primes.append(cnt)
return primes
def prime_sieve(N, algo):
return algo(N)
#fermat algorithm
def fermat_fact(N):
fact = []
if N < 1:
return fact
if not N%2:
return [2, N/2]
q = int(math.ceil(math.sqrt(N)))
while q<N:
p2 = q*q-N
p = int(math.sqrt(p2))
if p2/p==p and (q+p) != N:
fact.append([(q+p),(q-p)])
q+=1
return fact
#assumes N>1, return prime factors for N
def trial_division(N):
fact = []
for p in prime_sieve(N, eratosthenes):
if not N%p:
fact.append(p)
return fact
def largest_prime_factor(N):
n=N
d = 2
while n>1 and d<=math.sqrt(N):
if not (n%d):
n=n/d
else:
d = d+2 if d!=2 else 3
if n==1:
return d
else:
return n
def factorize(N, algo):
return algo(N)
def has_ndig_factors(N, w):
UL = math.pow(10,w)
LL = math.pow(10,w-1)-1
for pair in factorize(N, fermat_fact):
if pair[0] < UL and pair[0] > LL and pair[1] < UL and pair[1] > LL:
return True
return False
#--problem panel--#
#find the sum of all numbers below N multiple of 3,5.
def euler1():
t = int(raw_input())
for a0 in range(t):
n = int(raw_input())
i = 1
sum = 0
while i*3 < n:
sum += i*3
if not i%3 and i*5<n:
sum += i*5
i+=1
print (sum)
#sum even fabonacci under N
def euler2():
t = int(raw_input())
for a0 in xrange(t):
n=long(raw_input())
if n<2:
print 0
elif n==2:
print 2
else:
i=1
j=2
sum=0
while i<n:
if i%2==0:
sum+=i
tmp=j
j=i+j
i=tmp
print sum
#max prime factor under N
def euler3():
t = int(raw_input())
for x0 in xrange(t):
n = int(raw_input())
print largest_prime_factor(n)
#search max palindrome number under N as product of 3-digits
def euler4():
t = int(raw_input())
for x0 in xrange(t):
N = int(raw_input())
palind = -1
tota = N
p_range = 999
for p0 in range(p_range, 99, -1):
for p1 in range(p_range, 99, -1):
tota = p0*p1
if tota>N:
continue
elif (palindrome(tota)) and tota>palind:
palind = tota
p_range-=1
print palind
#-----------------#
euler4()
| [
"ertosnsbot@yahoo.com"
] | ertosnsbot@yahoo.com |
d178dcc2efeab0bd3fdac83c2c00a2998ac26b5e | 0cb970785a746a30f9b44b3e5234157818688197 | /Dpython/datatypes/listdatatype/dictionary.py | fdca2679ba3ae6247d7957ec1b3a8f77e78d2354 | [] | no_license | charan2108/pythonprojectsNew | 4255bbb81b6cf0d47c51c131ed93a0bb331a669c | b2f273d44937ec576daa0235d0d0326ff5149bf8 | refs/heads/main | 2023-05-03T16:55:33.242693 | 2021-05-26T11:18:17 | 2021-05-26T11:18:17 | 371,001,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | d = {101:'q', 102:'w', 103:'r'}
print(d)
print(type(d))
d[101] = 'alfa'
print(d)
e = {}
d['a']='apple'
d['b']='gold'
print(e)
| [
"sumacharan.adabala@gmail.com"
] | sumacharan.adabala@gmail.com |
7e26c46bb3f2d77d8229684f5f891b3a9cbdec8c | e84a2084649d3aa176270461cce2584aaf3e6e7f | /forum/forum/urls.py | 532d22c07f55be1180d215f1186da8ff4b5dc265 | [] | no_license | yeawin/webcms | 6a2a568c875369997f4653401f0d2bb4037c6699 | 081f1ebd200a906045a085170176be7455dffa32 | refs/heads/master | 2021-01-01T17:53:22.459795 | 2017-09-08T02:18:12 | 2017-09-08T02:18:12 | 98,188,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | """forum URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from machina.app import board
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^forum/', include(board.urls)),
]
| [
"ywliu@xmu.edu.cn"
] | ywliu@xmu.edu.cn |
71535013b5db40125494eabf74abe9bf04feaa38 | 7b7b0e3c1db646fcd58ed7bf032b37f826aa5dc5 | /monte_carlo.py | 1e6d559cd67e18301326aba1308b57af827750e7 | [] | no_license | tvvasquezg/RL | fcc3806d0fb88e7d5bcc3e8cf7afa0b7cb413de8 | 505887aa75548a717f369f08ff4babebbdc271dc | refs/heads/master | 2021-01-06T08:18:53.708775 | 2020-02-17T19:06:11 | 2020-02-17T19:06:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,422 | py | import numpy as np
from time import sleep
import matplotlib.pyplot as plt
rows = 6
cols = 9
start = (2,0)
goal = (0,8)
blocks = [(1,1),(2,1),(3,1),(4,1),(5,1)]
BEHAVIORS = ('U','D','L','R')
class Environment:
def __init__(self):
self.rows = rows
self.cols = cols
self.start = start
self.goal = goal
self.blocks = blocks
self.state = self.start
def next_state(self,behavior):
r = self.state[0]
c = self.state[1]
if behavior == 'U':
r -=1
elif behavior == 'D':
r +=1
elif behavior == 'L':
c -=1
elif behavior == 'R':
c +=1
if (0 <= r <= self.rows-1 and 0 <= c <= self.cols-1):
if (r,c) not in blocks:
self.state = (r,c)
return self.state
def give_reward(self):
return 1 if self.state==self.goal else -.01
def reset(self):
self.state = self.start
def choose_initial_state_behavior(self):#exploring starts
s_r = np.random.choice(self.rows) #state_row
s_c = np.random.choice(self.cols) #state_col
while (s_r,s_c) in self.blocks:
s_r = np.random.choice(self.rows)
s_c = np.random.choice(self.cols)
b = np.random.choice(BEHAVIORS)
self.state = (s_r,s_c)
self.start = self.state
return (s_r,s_c),b
class MCAgent:
def __init__(self,env,learning_rate,gamma):
self.lr = learning_rate
self.gamma = gamma
self.Q = {}
#self.returns = {}
self.env = env
self.agent_state = self.env.state
self.accumulated_reward = 0
for r in range(self.env.rows):
for c in range(self.env.cols):
self.Q[(r,c)] = {}
#self.returns[(r,c)] = {}
for b in BEHAVIORS:
self.Q[(r,c)][b] = 0#1:avg G,number
#self.returns[(r,c)][b] =
def get_best_action(self,s):
best_behavior = None
best_value = float('-inf')
for b in BEHAVIORS:
if self.Q[s][b]>best_value:
best_value = self.Q[s][b]
best_behavior = b
return best_behavior,best_value
def explore(self,behavior,eps=0.5):
p = np.random.random()
if p<eps:
return np.random.choice(BEHAVIORS)
else:
return behavior
def learn(self,episode,eps):
G = 0
first = True
for i,(s,b,r) in enumerate(reversed(episode)):
#if i==1:
#print('last b-state value',s,self.Q[s][b])
#first = False
v_s = self.Q[s][b]
self.Q[s][b] += self.lr*(G - v_s)
G = r + self.gamma*G
#self.
# nextState = self.env.next_state(b)
# self.agent_state = nextState
# reward = self.env.give_reward()
# self.accumulated_reward += reward
# if self.env.state != self.env.goal:
# nextBehavior,nextQValue = self.get_best_action(nextState)
# nextBehavior = self.explore(nextBehavior,eps)
# self.Q[s][b] = self.Q[s][b] + self.lr*(reward +self.gamma*nextQValue - self.Q[s][b])
# else:
# self.Q[s][b] = self.Q[s][b] + self.lr*(reward - self.Q[s][b])
def reset(self):
self.accumulated_reward = 0
#self.agent_state
#if s == self.env.goal:
#print('s',s,'goal!!!!')
#print('b',b)
#sleep(.1)
#print('s',s)
#print('tupla',(reward +self.gamma*nextQValue - self.Q[s][b]))
env = Environment()
lr = 0.001
a = MCAgent(env,lr,0.9)
print('lr:',lr)
num_episodes = 5000
eps = 0.2
t = 1
episodes = []
accumulated_rewards = []
def test(env,agent,initial_state=(2,3),eps=0.01):
s = initial_state
env.state = s
env.start = s
a.reset()
a.agent_state = s
#print('s',first_state,'b',first_behavior)
r = env.give_reward()
#s_b_r_list = [(first_state,first_behavior,0)]
#it_ep = 0
episode = []
last_state = None
current_state = s
while env.state != env.goal:
#print('it of ep',it_ep)
#it_ep +=1
b,v = a.get_best_action(s)
b = a.explore(b,eps)
#a.learn(s,b,eps)
# print('s',s)
env.next_state(b)
# last_state = current_state
# current_state = env.state
# if last_state == current_state:
# break
r = env.give_reward()
a.accumulated_reward += r
#s_prime = env.state
#s_b_r_list.append((s_prime,b,r))
s = env.state
a.agent_state = s
episode.append(s)
return episode
for it in range(num_episodes):
env.reset()
a.reset()
#print('it',it)
first_state,first_behavior = env.choose_initial_state_behavior()
s = env.start
episode = [first_state]
a.agent_state = first_state
#print('s',first_state,'b',first_behavior)
r = env.give_reward()
s_b_r_list = [(first_state,first_behavior,0)]
it_ep = 0
while env.state != env.goal:
#print('it of ep',it_ep)
#it_ep +=1
b,v = a.get_best_action(s)
b = a.explore(b,eps/t)
#a.learn(s,b,eps)
#print('s',s,'b',b)
env.next_state(b)
r = env.give_reward()
s_b_r_list.append((s,b,r))
a.accumulated_reward += r
s_prime = env.state
s = s_prime
a.agent_state = s
episode.append(s)
#print('(0,7)',a.Q[(0,7)])
#print('(1,7)',a.Q[(1,7)])
#print('(1,8)',a.Q[(1,8)])
#print('it',it)
episode = test(env,a)
#if len(episode)<=30:
a.learn(s_b_r_list,eps/t)
episodes.append(len(episode))
accumulated_rewards.append(a.accumulated_reward)
if it%50 == 0 and it != 0:
t +=.1
#print('length of episode',len(episode))
print(it,'len test episode --->',len(episode))
if len(episode)<=30:
print(episode)
#np.mean(episodes[it:])
print('accumulated reward',a.accumulated_reward)
print('eps',eps/t)
#sleep(1)
try:
print(episodes[-1])
except:
pass
plt.plot([x for x in range(num_episodes)],episodes)
plt.title('longitud de episodios')
plt.show()
plt.plot([x for x in range(num_episodes)],accumulated_rewards)
plt.title('refuerzo acumulado')
plt.show()
print('fin')
test(env,a)
| [
"maacostaro@unal.edu.co"
] | maacostaro@unal.edu.co |
89938fbcb47e0b7757adcf91ed9a35f11cc37eeb | a27e43d263375f1ea42d496e18af01f5ad46990e | /modules/initialize.py | d7767bbf8a118b8f1b6dc24808d627c54abdcc1f | [] | no_license | Klim314/Quetzalcoatl | 74565556a26d548f28118137e81866f7dc7a4e7a | 0d78183235207bc9c44c7c099722f5a7203e1d9c | refs/heads/master | 2016-08-06T08:57:19.802511 | 2015-06-24T08:29:53 | 2015-06-24T08:29:53 | 36,220,505 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | #!/usr/bin/env python3
"""
initialize.py
loads the pubdip.ini file
returns a dictionary containing all terms
"""
def execute(target):
res = dict()
with open(target) as f:
for i in f:
if i[0] == '#':
continue
temp = i.split('=')
res[temp[0]] = temp[1].strip()
return res
if __name__ == "__main__":
path = "../pubdip.ini"
print(execute(path))
| [
"klim314@gmail.com"
] | klim314@gmail.com |
fa6ee29c104959fe81ede565ddd6ba55d07a6502 | d55f0a4e5c00c2ee075250bf8da6bc13e3a83cf5 | /django_project/chumthewaters/admin.py | fc2c2fa6b254e993e2de97539089beec5d4f35b1 | [] | no_license | dtsiedel/ChumTheWaters | 00278b61ec461ac292ec16932aaad2192e39dac2 | 8d7495bb156217ad61445d0368ee76d34ac26ffd | refs/heads/master | 2020-04-11T03:51:15.627461 | 2016-12-14T16:40:51 | 2016-12-14T16:40:51 | 68,022,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from django.contrib import admin
from .views import hello
# Register your models here.
admin.site.register(hello)
| [
"dts.siedel@gmail.com"
] | dts.siedel@gmail.com |
7cd02ffb1d2c83ef5702de777444badd6d5579f9 | d0bc8b73f567968b0998230b11677d6237d0f6a5 | /sdk/python/kfp/v2/components/types/type_utils_test.py | 9a315867c558fe745d1fad3879fdfbf95a5ed129 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] | permissive | Stevedev7/pipelines | 74a1e2c114c80bb71a29192eeba8d41d730f98b3 | 6a730f961803f505909c08897ba81ae543a3e8eb | refs/heads/master | 2023-09-05T16:02:54.675847 | 2021-11-08T18:35:09 | 2021-11-08T18:35:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,553 | py | # Copyright 2020 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from typing import Any, Dict, List, Union
from absl.testing import parameterized
from kfp.components import structures
from kfp.pipeline_spec import pipeline_spec_pb2 as pb
from kfp.v2.components.types import artifact_types, type_utils
from kfp.v2.components.types.type_utils import InconsistentTypeException
_PARAMETER_TYPES = [
'String',
'str',
'Integer',
'int',
'Float',
'Double',
'bool',
'Boolean',
'Dict',
'List',
'JsonObject',
'JsonArray',
{
'JsonObject': {
'data_type': 'proto:tfx.components.trainer.TrainArgs'
}
},
]
_KNOWN_ARTIFACT_TYPES = ['Model', 'Dataset', 'Schema', 'Metrics']
_UNKNOWN_ARTIFACT_TYPES = [None, 'Arbtrary Model', 'dummy']
class _ArbitraryClass:
pass
class _VertexDummy(artifact_types.Artifact):
TYPE_NAME = 'google.VertexDummy'
VERSION = '0.0.2'
def __init__(self):
super().__init__(uri='uri', name='name', metadata={'dummy': '123'})
class TypeUtilsTest(parameterized.TestCase):
def test_is_parameter_type(self):
for type_name in _PARAMETER_TYPES:
self.assertTrue(type_utils.is_parameter_type(type_name))
for type_name in _KNOWN_ARTIFACT_TYPES + _UNKNOWN_ARTIFACT_TYPES:
self.assertFalse(type_utils.is_parameter_type(type_name))
@parameterized.parameters(
{
'artifact_class_or_type_name':
'Model',
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.Model', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
artifact_types.Model,
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.Model', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
'Dataset',
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.Dataset', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
artifact_types.Dataset,
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.Dataset', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
'Metrics',
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.Metrics', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
artifact_types.Metrics,
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.Metrics', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
'ClassificationMetrics',
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.ClassificationMetrics',
schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
artifact_types.ClassificationMetrics,
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.ClassificationMetrics',
schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
'SlicedClassificationMetrics',
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.SlicedClassificationMetrics',
schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
artifact_types.SlicedClassificationMetrics,
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.SlicedClassificationMetrics',
schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
'arbitrary name',
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.Artifact', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
_ArbitraryClass,
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.Artifact', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
artifact_types.HTML,
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.HTML', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
artifact_types.Markdown,
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.Markdown', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
'some-google-type',
'expected_result':
pb.ArtifactTypeSchema(
schema_title='system.Artifact', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
'google.VertexModel',
'expected_result':
pb.ArtifactTypeSchema(
schema_title='google.VertexModel', schema_version='0.0.1')
},
{
'artifact_class_or_type_name':
_VertexDummy,
'expected_result':
pb.ArtifactTypeSchema(
schema_title='google.VertexDummy', schema_version='0.0.2')
},
)
def test_get_artifact_type_schema(self, artifact_class_or_type_name,
expected_result):
self.assertEqual(
expected_result,
type_utils.get_artifact_type_schema(artifact_class_or_type_name))
@parameterized.parameters(
{
'given_type': 'Int',
'expected_type': pb.ParameterType.NUMBER_INTEGER,
},
{
'given_type': 'Integer',
'expected_type': pb.ParameterType.NUMBER_INTEGER,
},
{
'given_type': int,
'expected_type': pb.ParameterType.NUMBER_INTEGER,
},
{
'given_type': 'Double',
'expected_type': pb.ParameterType.NUMBER_DOUBLE,
},
{
'given_type': 'Float',
'expected_type': pb.ParameterType.NUMBER_DOUBLE,
},
{
'given_type': float,
'expected_type': pb.ParameterType.NUMBER_DOUBLE,
},
{
'given_type': 'String',
'expected_type': pb.ParameterType.STRING,
},
{
'given_type': 'Text',
'expected_type': pb.ParameterType.STRING,
},
{
'given_type': str,
'expected_type': pb.ParameterType.STRING,
},
{
'given_type': 'Boolean',
'expected_type': pb.ParameterType.BOOLEAN,
},
{
'given_type': bool,
'expected_type': pb.ParameterType.BOOLEAN,
},
{
'given_type': 'Dict',
'expected_type': pb.ParameterType.STRUCT,
},
{
'given_type': dict,
'expected_type': pb.ParameterType.STRUCT,
},
{
'given_type': 'List',
'expected_type': pb.ParameterType.LIST,
},
{
'given_type': list,
'expected_type': pb.ParameterType.LIST,
},
{
'given_type': Dict[str, int],
'expected_type': pb.ParameterType.STRUCT,
},
{
'given_type': List[Any],
'expected_type': pb.ParameterType.LIST,
},
{
'given_type': {
'JsonObject': {
'data_type': 'proto:tfx.components.trainer.TrainArgs'
}
},
'expected_type': pb.ParameterType.STRUCT,
},
)
def test_get_parameter_type(self, given_type, expected_type):
self.assertEqual(expected_type,
type_utils.get_parameter_type(given_type))
# Test get parameter by Python type.
self.assertEqual(pb.ParameterType.NUMBER_INTEGER,
type_utils.get_parameter_type(int))
def test_get_parameter_type_invalid(self):
with self.assertRaises(AttributeError):
type_utils.get_parameter_type_schema(None)
def test_get_input_artifact_type_schema(self):
input_specs = [
structures.InputSpec(name='input1', type='String'),
structures.InputSpec(name='input2', type='Model'),
structures.InputSpec(name='input3', type=None),
]
# input not found.
with self.assertRaises(AssertionError) as cm:
type_utils.get_input_artifact_type_schema('input0', input_specs)
self.assertEqual('Input not found.', str(cm))
# input found, but it doesn't map to an artifact type.
with self.assertRaises(AssertionError) as cm:
type_utils.get_input_artifact_type_schema('input1', input_specs)
self.assertEqual('Input is not an artifact type.', str(cm))
# input found, and a matching artifact type schema returned.
self.assertEqual(
'system.Model',
type_utils.get_input_artifact_type_schema('input2',
input_specs).schema_title)
# input found, and the default artifact type schema returned.
self.assertEqual(
'system.Artifact',
type_utils.get_input_artifact_type_schema('input3',
input_specs).schema_title)
@parameterized.parameters(
{
'given_type': 'String',
'expected_type': 'String',
'is_compatible': True,
},
{
'given_type': 'String',
'expected_type': 'Integer',
'is_compatible': False,
},
{
'given_type': {
'type_a': {
'property': 'property_b',
}
},
'expected_type': {
'type_a': {
'property': 'property_b',
}
},
'is_compatible': True,
},
{
'given_type': {
'type_a': {
'property': 'property_b',
}
},
'expected_type': {
'type_a': {
'property': 'property_c',
}
},
'is_compatible': False,
},
{
'given_type': 'Artifact',
'expected_type': 'Model',
'is_compatible': True,
},
{
'given_type': 'Metrics',
'expected_type': 'Artifact',
'is_compatible': True,
},
)
def test_verify_type_compatibility(
self,
given_type: Union[str, dict],
expected_type: Union[str, dict],
is_compatible: bool,
):
if is_compatible:
self.assertTrue(
type_utils.verify_type_compatibility(
given_type=given_type,
expected_type=expected_type,
error_message_prefix='',
))
else:
with self.assertRaises(InconsistentTypeException):
type_utils.verify_type_compatibility(
given_type=given_type,
expected_type=expected_type,
error_message_prefix='',
)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
417b5863206b5507b692d235dc15a4afa388178b | 556270814f52cb72af0c5cfa69a91bc00ddbdfdf | /codeUp problemset 100/1073_16726262(AC).py | ac4339547925dfee24b238460eaa41355920c857 | [] | no_license | jihyoung-lee/Coding-Test-With-Python | fd6a14dd0f33a224821292f5ec9e35452ed5587b | 4d2ff83c08980198144066e80c01c55346fc6a23 | refs/heads/main | 2023-04-15T19:22:18.842949 | 2021-04-29T13:28:21 | 2021-04-29T13:28:21 | 326,173,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | a = input().split()
for x in a:
if(x!="0"):
print(x)
else:
break
| [
"67559886+jihyoung-lee@users.noreply.github.com"
] | 67559886+jihyoung-lee@users.noreply.github.com |
f954f43e8cdc73c171ae692a1d158d1f11b505f0 | d032dacfe02d05985b3a5c9efc469e06dd3d90e1 | /livechat/routing.py | 637bfc29c7baa80075d782c7b01dfb3e99ba83d5 | [
"MIT"
] | permissive | AnuragGupta806/Persevarance-Hack36 | 51f4767bb98ccb08668ce99afe2b169a9e146856 | 34a005664558593adff36e03d0a457febed3608a | refs/heads/main | 2023-04-03T03:03:26.162086 | 2021-04-17T11:43:42 | 2021-04-17T11:43:42 | 356,320,370 | 0 | 1 | MIT | 2021-04-17T11:43:43 | 2021-04-09T15:39:58 | CSS | UTF-8 | Python | false | false | 156 | py | from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_name>\w+)/', consumers.ChatConsumer),
]
| [
"54318429+AnuragGupta806@users.noreply.github.com"
] | 54318429+AnuragGupta806@users.noreply.github.com |
6076f1c22b57304f12ad5e41a44755ef30bd6a44 | 206aa8bf20087f47b040777154e4525fbdf05e0c | /Assignment_2/multiagent/keyboardAgents.py | 68303807fe284b692708afb2e9c0b0da7bc02211 | [] | no_license | heromanba/UC-Berkeley-CS188-Assignments | 0f7340553beb523f551bfa7445da2520b72de164 | fe2e1dc05272477bc03039ec0ab8a31bdd89dabe | refs/heads/master | 2021-06-28T18:44:01.615408 | 2020-09-18T07:14:42 | 2020-09-18T07:14:42 | 136,872,884 | 4 | 12 | null | null | null | null | UTF-8 | Python | false | false | 2,391 | py | from game import Agent
from game import Directions
import random
class KeyboardAgent(Agent):
"""
An agent controlled by the keyboard.
"""
# NOTE: Arrow keys also work.
WEST_KEY = 'a'
EAST_KEY = 'd'
NORTH_KEY = 'w'
SOUTH_KEY = 's'
STOP_KEY = 'q'
def __init__( self, index = 0 ):
self.lastMove = Directions.STOP
self.index = index
self.keys = []
def getAction( self, state):
from graphicsUtils import keys_waiting
from graphicsUtils import keys_pressed
keys = list(keys_waiting()) + list(keys_pressed())
if keys != []:
self.keys = keys
legal = state.getLegalActions(self.index)
move = self.getMove(legal)
if move == Directions.STOP:
# Try to move in the same direction as before
if self.lastMove in legal:
move = self.lastMove
if (self.STOP_KEY in self.keys) and Directions.STOP in legal: move = Directions.STOP
if move not in legal:
move = random.choice(legal)
self.lastMove = move
return move
def getMove(self, legal):
move = Directions.STOP
if (self.WEST_KEY in self.keys or 'Left' in self.keys) and Directions.WEST in legal: move = Directions.WEST
if (self.EAST_KEY in self.keys or 'Right' in self.keys) and Directions.EAST in legal: move = Directions.EAST
if (self.NORTH_KEY in self.keys or 'Up' in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
if (self.SOUTH_KEY in self.keys or 'Down' in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
return move
class KeyboardAgent2(KeyboardAgent):
"""
A second agent controlled by the keyboard.
"""
# NOTE: Arrow keys also work.
WEST_KEY = 'j'
EAST_KEY = "l"
NORTH_KEY = 'i'
SOUTH_KEY = 'k'
STOP_KEY = 'u'
def getMove(self, legal):
move = Directions.STOP
if (self.WEST_KEY in self.keys) and Directions.WEST in legal: move = Directions.WEST
if (self.EAST_KEY in self.keys) and Directions.EAST in legal: move = Directions.EAST
if (self.NORTH_KEY in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
if (self.SOUTH_KEY in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
return move
| [
"heromanba24@gmail.com"
] | heromanba24@gmail.com |
341a49376980097248fe356f153251bf6dcac09a | 1439bf1f4127331306aa9abc2153170073b3f171 | /src/skill/urls.py | fc9c9e60caed7dbc9f82fcc4e2ba6ee6c45e9a9b | [
"MIT"
] | permissive | xgerinx/skillsitev2 | 6c6ac7bfdfc19dea46ea8a57498a12d1a9120a1b | 860d1c1214de125346c0accc4ec4b8953297231b | refs/heads/master | 2023-08-25T05:57:04.704007 | 2021-10-26T09:41:20 | 2021-10-26T09:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,808 | py | from django.urls import path, re_path, include
from django.views.generic import TemplateView
from .Controllers.Home import home
from .Controllers.Auth import auth
from .Controllers.Admin import admin
from .Controllers.Payment import payment
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
from .views import (
HomeAPIView,
PostDetailAPIView,
OrderCreateAPIView,
UserCreateAPIView,
ReviewsAPIView,
LeadersAPIView,
SignupView,
SigninView,
PostCreateAPIView,
send_verification_link,
)
from .serializers import CustomJWTSerializer
# app_name = 'skill-api'
urlpatterns = [
path('', home.index),
path('api/home/', HomeAPIView.as_view(), name='api-home'),
path('locale/', home.locale),
# path('order/', payment.create),
# path('api/orders/', OrderCreateAPIView.as_view()),
# path('api/reviews/', ReviewsAPIView.as_view(), name='list-create'),
# path('api/leaders/', LeadersAPIView.as_view(), name='leaders'),
# path('api/login/', include('rest_social_auth.urls_jwt_pair')),
# path('api-auth/', include('rest_framework.urls')),
path('api/token/', TokenObtainPairView.as_view()),
path('api/token/refresh/', TokenRefreshView.as_view()),
# /auth/token/login/ - token
# path('api/token/', jwt_views.TokenObtainPairView.as_view(), name='token_obtain_pair'),
# path('api/token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),
# path('auth/', include('djoser.urls.jwt')),
path('api/register/', auth.RegisterAPIView.as_view(), name='api_register'),
path('api/login/', auth.LoginAPIView.as_view(), name='api_login'),
# path('signin/', auth.signin, name='signin'),
# path('signin/', include('social_django.urls', namespace='social')),
# path('signup/', auth.signup, name='order-create'),
# for testing purposes only
# path('test-signin/', SigninView.as_view(), name='test-signin'),
# path('test-signup/', SignupView.as_view(), name='test-signup'),
# path('test-post-create/', PostCreateAPIView.as_view(), name='test-post-create'),
# path('signup/', UserCreateAPIView.as_view()),
path('logout/', auth.exit, name='logout'),
path('forgot/', auth.forgot, name='forgot'),
path('verify/email/<token>/', auth.verify_email, name='verify-email'),
path('get/verification/link/<email>/', send_verification_link, name='send-verification-link'),
path('react/', TemplateView.as_view(template_name='react.html')),
path('close/', TemplateView.as_view(template_name='close.html')),
# path('administrator/', admin.admin, name='admin'),
# path('react/payment/', TemplateView.as_view(template_name='react.html')),
re_path(r'^(?P<slug>[\w-]+)/$', PostDetailAPIView.as_view(), name='detail'),
]
| [
"xi.forwork.ix@gmail.com"
] | xi.forwork.ix@gmail.com |
34a80c8dab37022c77f53a2aea2077a2f51aa81b | a0e33f22ed416429e5ed003896d410ab0e82d3eb | /polymodels/managers.py | a08e4ba298a2da0d63b9bcbbeaadcc69656423fd | [
"MIT"
] | permissive | fusionbox/django-polymodels | 37982506c6ea58ae85f44da676cd990b4babc6fd | 0e6caf3932b2d8337d15f9755983c94743317e12 | refs/heads/master | 2020-12-25T10:59:02.520899 | 2016-01-22T00:13:22 | 2016-01-22T00:13:22 | 50,145,841 | 0 | 0 | null | 2016-01-22T00:13:14 | 2016-01-22T00:13:14 | null | UTF-8 | Python | false | false | 2,968 | py | from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import models
class PolymorphicQuerySet(models.query.QuerySet):
def select_subclasses(self, *models):
self.type_cast = True
relateds = set()
accessors = self.model.subclass_accessors
if models:
subclasses = set()
for model in models:
if not issubclass(model, self.model):
raise TypeError(
"%r is not a subclass of %r" % (model, self.model)
)
subclasses.update(model.subclass_accessors)
# Collect all `select_related` required lookups
for subclass in subclasses:
# Avoid collecting ourself and proxy subclasses
related = accessors[subclass][2]
if related:
relateds.add(related)
queryset = self.filter(
**self.model.content_type_lookup(*tuple(subclasses))
)
else:
# Collect all `select_related` required relateds
for accessor in accessors.values():
# Avoid collecting ourself and proxy subclasses
related = accessor[2]
if accessor[2]:
relateds.add(related)
queryset = self
if relateds:
queryset = queryset.select_related(*relateds)
return queryset
def exclude_subclasses(self):
return self.filter(**self.model.content_type_lookup())
def _clone(self, *args, **kwargs):
kwargs.update(type_cast=getattr(self, 'type_cast', False))
return super(PolymorphicQuerySet, self)._clone(*args, **kwargs)
def iterator(self):
iterator = super(PolymorphicQuerySet, self).iterator()
if getattr(self, 'type_cast', False):
for obj in iterator:
yield obj.type_cast()
else:
# yield from iterator
for obj in iterator:
yield obj
class PolymorphicManager(models.Manager.from_queryset(PolymorphicQuerySet)):
use_for_related_fields = True
def contribute_to_class(self, model, name):
# Avoid circular reference
from .models import BasePolymorphicModel
if not issubclass(model, BasePolymorphicModel):
raise ImproperlyConfigured(
'`%s` can only be used on '
'`BasePolymorphicModel` subclasses.' % self.__class__.__name__
)
return super(PolymorphicManager, self).contribute_to_class(model, name)
def get_queryset(self):
queryset = super(PolymorphicManager, self).get_queryset()
model = self.model
opts = model._meta
if opts.proxy:
# Select only associated model and its subclasses.
queryset = queryset.filter(**self.model.subclasses_lookup())
return queryset
| [
"charette.s@gmail.com"
] | charette.s@gmail.com |
0d7feabbe57c28d92e23066062f6adeaf3d18808 | 3e42ce946e90a35b5b915f22f3d2c8f5fefc4287 | /substitutionEnc.py | 465225403fbe15abb5db63d3db6e011339cf42d4 | [] | no_license | M-Smith-contact/reticulated.python | 0c5c2f788fe1f283072704d92fefca115c9c21e0 | 3d2f29b309d75b611c9fb9483c07547145c3beef | refs/heads/master | 2021-01-17T13:10:44.026308 | 2016-07-16T19:50:34 | 2016-07-16T19:50:34 | 59,521,611 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | def removeChar(string,idx):
return string[:idx] + string[idx+1:]
def keyGen():
alphabet = "abcdefghijklmnopqrstuvwxyz"
key = ""
for i in range(len(alphabet)):
ch = random.randint(0,25-i)
key = key + alphabet[ch]
alphabet = removeChar(alphabet,ch)
return key
def substitutionEncrypt(plainText,key):
alphabet = "abcdefghijklmnopqrstuvwxyz "
plainText = plainText.lower()
cipherText = ""
for ch in plainText:
idx = alphabet.find(ch)
cipherText = cipherText + key[idx]
return cipherText
def encryptMessage():
msg = input('Enter a message to encrypt: ')
cipherText = scramble2Encrypt(msg)
print('The encrypted message is: ', cipherText)
import random
key = keyGen()
msg = input('Enter a message to encrypt: ')
cipherStr = substitutionEncrypt(msg,key)
print(key)
print(cipherStr)
| [
"noreply@github.com"
] | noreply@github.com |
14ef0266760231bb28a25df8a15f3f4a3d809e6e | 224017c6fdfd172f10b5eba3edf557a672b39133 | /acticationGraph/deriveSigmoid.py | 9c4a42d93d13f2f76f79c03ece2ea6740b0973c6 | [] | no_license | sleep4725/Ai.kim | 08296012b5d07ac8fc38ada54f88abb7f15a3290 | 8920ef26ad74a1cd1a78c45e05cca91c1f3e449a | refs/heads/master | 2020-05-29T15:26:46.125311 | 2019-07-09T23:46:22 | 2019-07-09T23:46:22 | 189,220,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | import numpy as np
import matplotlib.pyplot as plt
"""
하이퍼볼릭 탄젠트 함수
시그모이드 함수의 대체제로 사용할 수 있는 활성화 함수
"""
class DeriveSigmoid:
def __init__(self):
## x좌표
self.xCoordinate = np.arange(-10.0, 10.0, 0.1)
def derivativeSigmoidFunction(self):
# ==========================================
# 도함수
# ==========================================
tmp1 = list(map(
lambda x: 1 / (1 + np.exp(-x)),
self.xCoordinate
))
tmp2 = list(map(
lambda x:1-x, tmp1
))
yCoordinate = [x*y for x, y in zip(tmp1, tmp2)]
plt.plot(self.xCoordinate, yCoordinate)
plt.xlabel("x-coordinate")
plt.ylabel("y-coordinate")
# y축 설정
plt.ylim(-0.1, 0.3)
plt.show()
def main():
snode = DeriveSigmoid()
snode.derivativeSigmoidFunction()
if __name__ == "__main__":
main() | [
"sleep4725@naver.com"
] | sleep4725@naver.com |
91a5b6e81692b41a2ffffebed1fa5a58a9cc4ca7 | 2097293065bb28452b221a5f635bac63c69a3e80 | /pizza.py | 60599550eb351267a25b0b28131179907e104ba8 | [
"MIT"
] | permissive | kafkoders/hashcode-pizza | eb2ca3944f62c9c21853b8d0dc2cd34a984984bf | 513452f35299885f396a49113264523a0a6cceae | refs/heads/master | 2020-04-23T22:16:53.542636 | 2019-02-24T19:46:38 | 2019-02-24T19:46:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,483 | py | import pandas as pd
import numpy as np
import math
input_files = ['d_big']
def create_pizza_dataset(file_):
flag = False
elements_ = []
with open(file_ + '.in') as input_:
for line in input_:
if flag is False:
rows, cols, min_ingredients, max_cells = line.split(' ')
flag = True
else:
elements_.append(np.array(list(line.rstrip())))
df = pd.DataFrame(elements_)
pizza_ = df.replace(['M', 'T'], [1, 0])
total_tomatoes = len(pizza_[pizza_.values == 0])
total_mushrooms = len(pizza_[pizza_.values == 1])
less_ingredient = 'tomatoes' if total_tomatoes < total_mushrooms else 'mushrooms'
return pizza_, rows, cols, min_ingredients, max_cells, less_ingredient
def maximize_cuts(max_):
possible_cuts = list()
for j in range(max_, (int(min_ingredients) * 2) - 1, -1):
for i in range(j, 0, -1):
if (j % i) == 0:
item_x = [int(j / i), i]
item_y = [i, int(j / i)]
if item_x not in possible_cuts:
possible_cuts.append(item_x)
if item_y not in possible_cuts:
possible_cuts.append(item_y)
return possible_cuts
class pizzaSlice:
slice_ = None
value_ = 0
def __init__(self, slice_):
self.slice_ = slice_
self.value_ = self.calc_value()
def calc_value(self):
mushrooms = 0
tomatoes = 0
for val in self.slice_:
if pizza_.at[val[0], val[1]] == 1:
mushrooms += 1
elif pizza_.at[val[0], val[1]] == 0:
tomatoes += 1
if less_ingredient == 'tomatoes':
return tomatoes
else:
return mushrooms
def matches_condition(pizza_, pizza_slices):
if not pizza_slices:
return None
else:
min_slice = None
max_cells = 0
for pizza_slice in pizza_slices:
tomatoes = 0
mushrooms = 0
for cell_slice in pizza_slice.slice_:
if pizza_.at[cell_slice[0], cell_slice[1]] == 1:
mushrooms += 1
elif pizza_.at[cell_slice[0], cell_slice[1]] == 0:
tomatoes += 1
if mushrooms >= int(min_ingredients) and tomatoes >= int(min_ingredients):
if min_slice is None:
min_slice = pizza_slice
if min_slice.value_ > pizza_slice.value_ and max_cells < len(pizza_slice.slice_):
max_cells = len(pizza_slice.slice_)
min_slice = pizza_slice
if min_slice is not None:
return min_slice.slice_
else:
return None
def check_cuts(x, y, min_, max_, cuts_):
slices_ = list()
for cut in cuts_:
slice_ = list()
invalid = False
for i in range(cut[0]):
for j in range(cut[1]):
if x + i < pizza_.shape[0] and y + j < pizza_.shape[1] and pizza_.at[x + i, y + j] != 5:
slice_.append([x + i, y + j])
else:
invalid = True
if invalid is False:
slices_.append(pizzaSlice(slice_))
return slices_
if __name__ == '__main__':
for file_ in input_files:
pizza_, rows, cols, min_ingredients, max_cells, less_ingredient = create_pizza_dataset(file_)
good_slices = list()
possible_cuts = maximize_cuts(int(max_cells))
for row_ in range(pizza_.shape[0]):
for col_ in range(pizza_.shape[1]):
if pizza_.at[row_, col_] != 5:
slices_ = check_cuts(row_, col_, int(min_ingredients), int(max_cells), possible_cuts)
slice_ = matches_condition(pizza_, slices_)
if slice_ is not None:
col_final = len(slice_)
good_slices.append([row_, slice_[col_final - 1][0], col_, slice_[col_final - 1][1]])
for element in slice_:
pizza_.at[element[0], element[1]] = 5
with open(file_ + '.out', 'w') as f_:
f_.write(str(len(good_slices)) + "\n")
for value_ in good_slices:
f_.write(str(value_[0]) + " " + str(value_[2]) + " " + str(value_[1]) + " " + str(value_[3]) + "\n") | [
"alvarob96@usal.es"
] | alvarob96@usal.es |
c0058983bc604c49b5110d37318a073b7ff43ecb | ba2e82c79ecf6998fa3626912d47c02f4315c1df | /acgan/main.py | fc4a10e7f94deda89a167b42e33f0d47be8ec216 | [] | no_license | danmar3/AssociativeGAN | 660513cc8dbb5a4e144234b83c0eb42ba58d2d85 | be17cc0737198a67796bce7fe657da22a2618361 | refs/heads/master | 2023-04-02T06:18:01.546140 | 2020-08-28T06:27:36 | 2020-08-28T06:27:36 | 167,607,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,549 | py | import os
import tqdm
import json
import datetime
import numpy as np
import tensorflow as tf
import twodlearn as tdl
import matplotlib.pyplot as plt
from . import data
from . import params as acgan_params
from .utils import eager_function
from .train import run_training
from .model import msg_gan
from .model.gmm_gan import GmmGan
from .model.wacgan import WacGan, WacGanV2, WacGanDev
from .model.bigmmgan import BiGmmGan
import functools
from sklearn.manifold import TSNE
def normalize_image(image):
return ((image-image.min())/(image.max()-image.min()))
def compat_params(params):
if 'data' not in params:
params['data'] = dict()
return params
class ExperimentGMM(object):
name = 'gmmgan'
Model = GmmGan
@classmethod
def restore_session(cls, session_path, dataset_name, indicator=None):
with open(os.path.join(session_path, 'params.json'), "r") as file_h:
params = json.load(file_h)
# remove n_start steps if loading a session
if 'n_start' in params['run']:
print('\n\n ---> resetting n_start to zero \n\n')
params['run']['n_start'] = 0
experiment = cls(dataset_name=dataset_name, params=params,
indicator=indicator)
experiment.restore(session_path)
return experiment
def _init_params(self, params, dataset_name):
if params is None:
params = acgan_params.PARAMS[dataset_name][self.name]
self.params = params
self.params['indicator'] = self.indicator
if 'global' in self.params:
msg_gan.set_global(self.params['global'])
filename = os.path.join(self.output_dir, 'params.json')
with open(filename, 'w') as file_h:
json.dump(self.params, file_h)
def _init_trainer(self, xreal):
trainer = tdl.core.SimpleNamespace(
gen=self.model.generator_trainer(
**self.params['generator_trainer']),
dis=self.model.discriminator_trainer(
xreal=xreal, **self.params['discriminator_trainer']),
enc=self.model.encoder_trainer(
xreal=xreal, **self.params['encoder_trainer']))
tdl.core.variables_initializer(trainer.gen.variables).run()
tdl.core.variables_initializer(trainer.dis.variables).run()
tdl.core.variables_initializer(trainer.enc.variables).run()
return trainer
def __init__(self, dataset_name='celeb_a', params=None, indicator=None):
self.indicator = indicator
self.session = (tf.compat.v1.get_default_session()
if tf.compat.v1.get_default_session() is not None
else tf.compat.v1.InteractiveSession())
# init output_dir
now = datetime.datetime.now()
self.output_dir = 'tmp/{}/session_{}{:02d}{:02d}_{:02d}{:02d}_{}'\
''.format(self.name,
now.year, now.month, now.day, now.hour, now.minute,
self.indicator
)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# init model
self._init_params(params, dataset_name)
dataset = data.load(
name=dataset_name,
batch_size=self.params['generator_trainer']['batch_size'],
**self.params['data'])
self.model = self.Model(**self.params['model'])
iter = tf.compat.v1.data.make_one_shot_iterator(dataset)
xreal = iter.get_next()
self.trainer = self._init_trainer(xreal)
# saver
self.saver = tf.compat.v1.train.Saver(
tdl.core.get_variables(self.model))
def restore(self, pathname=None):
'''Restore the weight values stored at pathname.
Args:
pathname: path where the variables checkpoints are stored.
It could point to a session folder, a checkpoints folder or
a specific checkpoint file. By default chooses the most recent
checkpoint.
'''
def get_latest(pathname, filter=None):
if filter is None:
filter = lambda x: True
folders = [folder for folder in os.listdir(pathname)
if os.path.isdir(os.path.join(pathname, folder))
and filter(folder)]
if folders:
return os.path.join(pathname, sorted(folders)[-1])
files = [fi.split('.')[0] for fi in os.listdir(pathname)
if filter(fi)]
if files:
return os.path.join(pathname, sorted(files)[-1] + '.ckpt')
else:
raise ValueError('could not find any saved checkpoint in {}'
''.format(pathname))
if pathname is None:
pathname = self.output_dir
if os.path.isdir(pathname):
if any('session' in folder for folder in os.listdir(pathname)):
pathname = get_latest(pathname, lambda x: 'session' in x)
if 'checkpoints' in os.listdir(pathname):
pathname = os.path.join(pathname, 'checkpoints')
pathname = get_latest(pathname, lambda x: 'vars_' in x)
print('-------------- Restoring: {} ------------------'
''.format(pathname))
self.saver.restore(self.session, pathname)
def run(self, n_steps=100, **kwargs):
if not kwargs:
kwargs = self.params['run']
if 'homogenize' not in kwargs:
kwargs['homogenize'] = False
if 'reset_embedding' not in kwargs:
kwargs['reset_embedding'] = False
if 'n_start' not in kwargs:
kwargs['n_start'] = 0
if not hasattr(self, '_global_steps'):
self._global_steps = 0
def train_gan():
if kwargs['homogenize']:
logits_h = self.model.embedding.logits
_logits = logits_h.value().eval()
logits = np.zeros(logits_h.shape.as_list(),
dtype=logits_h.dtype.as_numpy_dtype)
self._set_logits(logits=logits)
if not run_training(
dis=self.trainer.dis, gen=self.trainer.gen,
n_steps=kwargs['gan_steps'], n_logging=10,
ind=self.indicator):
return False
if kwargs['homogenize']:
self._set_logits(logits=_logits)
return True
def train_encoder():
for i in tqdm.tqdm(range(kwargs['encoder_steps'])):
self.session.run(self.trainer.enc.step['encoder'])
def train_embedding():
_n_steps = kwargs['embedding_steps']
reset = False
if kwargs['reset_embedding'] is not False:
if self._global_steps % kwargs['reset_embedding'] == 0:
print('--> Resetting embedding.')
_n_steps = kwargs['reset_embedding']*_n_steps
reset = True
def get_dict(sparsity):
feed_dict = None
if isinstance(self.trainer.enc.loss['embedding'],
tdl.core.SimpleNamespace):
feed_dict = {self.trainer.enc.loss['embedding'].sparsity:
sparsity}
return feed_dict
if reset:
self.session.run(self.model.embedding.init_op)
feed_dict = get_dict(sparsity=0.0)
for i in tqdm.tqdm(range(_n_steps)):
self.session.run(self.trainer.enc.step['embedding'],
feed_dict=feed_dict)
for i in tqdm.tqdm(range(_n_steps)):
self.session.run(self.trainer.enc.step['linear_disc'])
else:
for i in tqdm.tqdm(range(_n_steps)):
self.session.run(self.trainer.enc.step['linear_disc'])
feed_dict = get_dict(sparsity=1.0)
for i in tqdm.tqdm(range(_n_steps)):
self.session.run(self.trainer.enc.step['embedding'],
feed_dict=feed_dict)
# warm up trainin the gan
if self._global_steps == 0:
if not train_gan():
return False
# train for n_steps trials
for trial in tqdm.tqdm(range(n_steps)):
if self._global_steps >= kwargs['n_start']:
train_encoder()
train_embedding()
if not train_gan():
return False
self._global_steps = self._global_steps + 1
return True
@eager_function
def reconstruct(self, x_seed):
encoded = self.model.encoder(x_seed)
xrecon = self.model.generator(encoded.sample())
return xrecon
@eager_function
def _set_logits(self, logits):
logits_h = self.model.embedding.logits
logits_op = logits_h.assign(logits)
return logits_op
def set_component(self, component):
logits_h = self.model.embedding.logits
logits = np.array(
[30.0 if i == component else 0.0
for i in range(logits_h.shape[-1].value)],
dtype=logits_h.dtype.as_numpy_dtype)
self._set_logits(logits=logits)
def visualize_clusters(self, ax=None):
'''visualize samples from gmm clusters.'''
if ax is None:
_, ax = plt.subplots(10, 10, figsize=(15, 15))
_logits = self.model.embedding.logits.value().eval()
n_components = _logits.shape[0]
max_components = ax.shape[0]
n_images = ax.shape[1]
# comp_list = np.random.choice(
# n_components, max_components, replace=False)
comp_sorted = np.argsort(_logits)[::-1]
comp_list = np.concatenate([
comp_sorted[:max_components//2],
comp_sorted[-(max_components-max_components//2):]])
assert len(comp_list) == max_components
for component_idx, component in enumerate(comp_list):
self.set_component(component)
xsim = self.session.run(self.trainer.gen.xsim)
while xsim.shape[0] < n_images:
xsim = np.concatenate(
[xsim, self.session.run(self.trainer.gen.xsim)],
axis=0)
for img_idx in range(n_images):
image = normalize_image(xsim[img_idx, ...])
ax[component_idx, img_idx].imshow(
np.squeeze(image),
interpolation='nearest')
ax[component_idx, img_idx].axis('off')
self._set_logits(logits=_logits)
def visualize_reconstruction(self, ax=None):
'''visualize the reconstruction of real images.'''
if ax is None:
fig, ax = plt.subplots(5, 8, figsize=(18, 10))
n_real = ax.shape[1]
xreal = self.trainer.dis.xreal.eval()
while xreal.shape[0] < n_real:
xreal = np.concatenate(
[xreal, self.trainer.dis.xreal.eval()],
axis=0)
for i in range(ax.shape[1]):
ax[0, i].imshow(np.squeeze(normalize_image(xreal[i, ...])),
interpolation='nearest')
ax[0, i].axis('off')
for j in range(1, ax.shape[0]):
xrecon = self.reconstruct(x_seed=xreal)
for i in range(ax.shape[1]):
ax[j, i].imshow(np.squeeze(normalize_image(xrecon[i, ...])),
interpolation='nearest')
ax[j, i].axis('off')
def visualize_imgs(self, ax=None):
if ax is None:
fig, ax = plt.subplots(4, 4, figsize=(20, 20))
# dis.sim_pyramid[-1].eval()
n_elements = functools.reduce(lambda x, y: x*y, ax.shape, 1)
ax = np.reshape(ax, n_elements)
xsim = self.session.run(self.trainer.gen.xsim)
while xsim.shape[0] < n_elements:
xsim = np.concatenate(
[xsim, self.session.run(self.trainer.gen.xsim)],
axis=0)
for i in range(n_elements):
image = (xsim[i][:, :, :]+1)*0.5
ax[i].imshow(np.squeeze(normalize_image(image)),
interpolation='nearest')
ax[i].axis('off')
def visualize_manifold(self, ax=None):
if ax is None:
fig, ax = plt.subplots(1, 1)
Z = self.session.run(self.model.embedding.components.loc)
Z_embedded = TSNE(n_components=2).fit_transform(Z)
ax.scatter(Z_embedded[:, 0], Z_embedded[:, 1])
def visualize(self, save=False, filename=None, visualize_manifold=False):
if filename is None:
folder = os.path.join(self.output_dir, 'images')
if not os.path.exists(folder):
os.makedirs(folder)
now = datetime.datetime.now()
filename = '{}/generated_{}{:02d}{:02d}_{:02d}{:02d}.pdf'.format(
folder, now.year, now.month, now.day, now.hour, now.minute)
fig = plt.figure(figsize=(13, 3*13))
gs = fig.add_gridspec(30, 10)
def reserve_ax(start, scale, shape):
if isinstance(scale, int):
scale = (scale, scale)
ax = np.array(
[fig.add_subplot(gs[
(start + i*scale[0]):(start + i*scale[0]+scale[0]),
j*scale[1]:j*scale[1]+scale[1]])
for i in range(shape[0]) for j in range(shape[1])])
return np.reshape(ax, shape)
ax = reserve_ax(start=0, scale=2, shape=(5, 5))
self.visualize_imgs(ax=ax)
ax = reserve_ax(start=10, scale=1, shape=(10, 10))
self.visualize_clusters(ax=ax)
ax = reserve_ax(start=20, scale=(2, 10), shape=(1, 1))
ax1 = fig.add_subplot(gs[20:22, 0:8])
ax2 = fig.add_subplot(gs[20:22, 8:10])
probs = self.model.embedding.dist.cat.probs.eval()
ax1.bar(x=range(probs.shape[0]), height=probs)
if visualize_manifold:
self.visualize_manifold(ax=ax2)
ax = reserve_ax(start=22, scale=(1, 1), shape=(8, 10))
self.visualize_reconstruction(ax=ax)
if save:
plt.savefig(filename)
plt.close()
def save(self, folder=None):
'''save model params'''
if folder is None:
folder = os.path.join(self.output_dir, 'checkpoints')
if not os.path.exists(folder):
os.makedirs(folder)
now = datetime.datetime.now()
filename = '{}/vars_{}{:02d}{:02d}_{:02d}{:02d}.ckpt'.format(
folder,
now.year, now.month, now.day, now.hour, now.minute)
self.saver.save(self.session, filename)
class ExperimentWACGAN(ExperimentGMM):
name = 'wacgan'
Model = WacGan
class ExperimentWACGAN_V2(ExperimentGMM):
name = 'wacganV2'
Model = WacGanV2
class ExperimentWACGAN_Dev(ExperimentGMM):
name = 'wacganDev'
Model = WacGanDev
def _init_trainer(self, xreal):
trainer = tdl.core.SimpleNamespace(
gen=self.model.generator_trainer(
**self.params['generator_trainer']),
dis=self.model.discriminator_trainer(
xreal=xreal, **self.params['discriminator_trainer']),
encoder=self.model.encoder_trainer(
xreal=xreal, **self.params['encoder_trainer']),
embedding=self.model.embedding_trainer(
xreal=xreal, **self.params['encoder_trainer']))
tdl.core.variables_initializer(trainer.gen.variables).run()
tdl.core.variables_initializer(trainer.dis.variables).run()
tdl.core.variables_initializer(trainer.encoder.variables).run()
tdl.core.variables_initializer(trainer.embedding.variables).run()
return trainer
def _train_encoder(self, encoder_steps):
self.trainer.encoder.optim.run(n_steps=encoder_steps)
def _train_embedding(self, embedding_steps, reset_embedding):
_n_steps = embedding_steps
reset = False
if reset_embedding is not False:
if self._global_steps % reset_embedding == 0:
print('--> Resetting embedding.')
_n_steps = reset_embedding*_n_steps
reset = True
def get_dict(sparsity):
feed_dict = None
sparsity_h = self.trainer.embedding.gmm['estimator']\
.loss.model.sparsity
if isinstance(sparsity_h, tf.Tensor):
feed_dict = {sparsity_h: sparsity}
return feed_dict
if reset:
self.session.run(self.model.embedding.init_op)
feed_dict = get_dict(sparsity=0.0)
self.trainer.embedding.gmm['optim'].run(
n_steps=_n_steps, feed_dict=feed_dict)
self.trainer.embedding.linear['optim'].run(n_steps=_n_steps)
else:
self.trainer.embedding.linear['optim'].run(n_steps=_n_steps)
feed_dict = get_dict(sparsity=1.0)
self.trainer.embedding.gmm['optim'].run(
n_steps=_n_steps, feed_dict=feed_dict)
def run(self, n_steps=100, **kwargs):
if not kwargs:
kwargs = self.params['run']
if 'homogenize' not in kwargs:
kwargs['homogenize'] = False
if 'reset_embedding' not in kwargs:
kwargs['reset_embedding'] = False
if 'n_start' not in kwargs:
kwargs['n_start'] = 0
if not hasattr(self, '_global_steps'):
self._global_steps = 0
def train_gan():
if kwargs['homogenize']:
logits_h = self.model.embedding.logits
_logits = logits_h.value().eval()
logits = np.zeros(logits_h.shape.as_list(),
dtype=logits_h.dtype.as_numpy_dtype)
self._set_logits(logits=logits)
if not run_training(
dis=self.trainer.dis, gen=self.trainer.gen,
n_steps=kwargs['gan_steps'], n_logging=10,
ind=self.indicator):
return False
if kwargs['homogenize']:
self._set_logits(logits=_logits)
return True
# warm up trainin the gan
if self._global_steps == 0:
if not train_gan():
return False
# train for n_steps trials
for trial in tqdm.tqdm(range(n_steps)):
if self._global_steps >= kwargs['n_start']:
self._train_encoder(kwargs['encoder_steps'])
self._train_embedding(
embedding_steps=kwargs['embedding_steps'],
reset_embedding=kwargs['reset_embedding'])
if not train_gan():
return False
self._global_steps = self._global_steps + 1
return True
class ExperimentBiGmmGan(ExperimentWACGAN_Dev):
name = 'bigmmgan'
Model = BiGmmGan
def _init_trainer(self, xreal):
trainer = tdl.core.SimpleNamespace(
gen=self.model.generator_trainer(
**self.params['generator_trainer']),
dis=self.model.discriminator_trainer(
xreal=xreal, **self.params['discriminator_trainer']),
encoder=self.model.encoder_trainer(
xreal=xreal, **self.params['encoder_trainer']),
embedding=self.model.embedding_trainer(
xreal=xreal, **self.params['embedding_trainer']))
tdl.core.variables_initializer(trainer.gen.variables).run()
tdl.core.variables_initializer(trainer.dis.variables).run()
tdl.core.variables_initializer(trainer.encoder.variables).run()
tdl.core.variables_initializer(trainer.embedding.variables).run()
return trainer
def _train_embedding(self, embedding_steps, reset_embedding):
_n_steps = embedding_steps
reset = False
if reset_embedding is not False:
if self._global_steps % reset_embedding == 0:
print('--> Resetting embedding.')
_n_steps = reset_embedding*_n_steps
reset = True
if reset:
self.session.run(self.model.embedding.init_op)
self.trainer.embedding.optim.run(n_steps=_n_steps)
@eager_function
def reconstruct(self, x_seed):
encoded = self.model.encoder(x_seed)
xrecon = self.model.generator(encoded.sample())
return xrecon[-1]
| [
"daniellml55@gmail.com"
] | daniellml55@gmail.com |
2d576cb462f12622bac6af5d76918c08f5fb9835 | 95875e8ec01a0717e355f341870a3c3e01d78e1a | /zabbix/base.py | cdf0bc9cef89e0fed35f086da64712a7bff950e3 | [] | no_license | nnoori/zabbix-python | 8faff36d8d6b29af0a49f86a7f87f913194a68f9 | f2f70ef1a60ab7e9914a4cdfd3d75785ee46145a | refs/heads/master | 2021-01-10T21:00:36.142580 | 2012-11-18T17:21:35 | 2012-11-18T17:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,703 | py | # -*- coding: utf-8 -*-
from zabbix.session import Session
from zabbix.exceptions import NoneUniqueResultException
class _BaseObject(object):
_zabbix_format = None
@classmethod
def _call(cls, method, data):
return Session._get_instance().call(method, data)
@classmethod
def _get_parameters(cls):
return cls._allowed_parameters
@classmethod
def _get_prefix(cls):
return cls._prefix
def __init__(self):
self._zabbix_format = dict()
pass
def __str__(self):
return repr(self._zabbix_format)
def __repr__(self):
return repr(self._zabbix_format)
def _build(self, json_string):
self._zabbix_format = json_string
def __setattr__(self, name, value):
if (name in dir(self)):
super(_BaseObject, self).__setattr__(name, value)
return
else:
raise Exception("%s is not an alllowed parameter" % name)
class ReadableObject(_BaseObject):
def __init__(self):
super(ReadableObject, self).__init__()
@classmethod
def get(cls, search_criteria):
search_criteria._zabbix_format["output"] = "extend"
call_result = cls._call("%s.get" % cls.get_prefix(), search_criteria._zabbix_format)
if len(call_result) == 0:
return None
if len(call_result) > 1:
raise NoneUniqueResultException("Found more than one result")
result = cls()
result._build(call_result[0])
return result
@classmethod
def find(cls, search_criteria):
search_criteria._zabbix_format["output"] = "extend"
call_results = cls._call("%s.get" % cls.get_prefix(), search_criteria._zabbix_format)
results = []
for call_result in call_results:
result = cls()
result._build(call_result)
results.append(result)
return results
@classmethod
def count(cls, search_criteria):
search_criteria._internal["countOutput"] = True
return int(cls._call("%s.get" % cls.get_prefix(), search_criteria._zabbix_format))
class WriteableObject(_BaseObject):
def __init__(self):
super(WriteableObject, self).__init__()
def save(self):
if self.identifier:
self._call("%s.update" % self.get_prefix(), self._zabbix_format)
else:
self._call("%s.create" % self.get_prefix(), self._zabbix_format)
class DeleteableObject(_BaseObject):
def __init__(self):
super(DeleteableObject, self).__init__()
def delete(self):
pass
class SearchBase(_BaseObject):
def __init__(self):
super(SearchBase, self).__init__()
| [
"henrik@hlyh.dk"
] | henrik@hlyh.dk |
06e6e65949af3c2ab5cd6467727da6b5208a7aa2 | 4aab8dad4da56e6b64163ef656a0616a08c28672 | /app/config.py | ddad97cec50d3ac58e12837e84ef4afdb4569676 | [] | no_license | govorovsky/api_disqus | 51b3df13c13a2a1b15f896d7675fdcbe730d9915 | cc9f371164b6969320bbf48222921385bc5eba48 | refs/heads/master | 2020-06-01T09:39:39.187252 | 2014-05-31T17:33:44 | 2014-05-31T17:33:44 | 17,840,672 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | host = 'localhost'
user = 'root'
password = 'qazxsw12'
forum_db = 'mydb'
| [
"govorovskij@gmail.com"
] | govorovskij@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.