id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6630052 | <reponame>autra/mapproxy<filename>mapproxy/source/wms.py
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Retrieve maps/information from WMS servers.
"""
import sys
from mapproxy.request.base import split_mime_type
from mapproxy.cache.legend import Legend, legend_identifier
from mapproxy.image import make_transparent, ImageSource, SubImageSource, bbox_position_in_image
from mapproxy.image.merge import concat_legends
from mapproxy.image.transform import ImageTransformer
from mapproxy.layer import MapExtent, DefaultMapExtent, BlankImage, LegendQuery, MapQuery, MapLayer
from mapproxy.source import InfoSource, SourceError, LegendSource
from mapproxy.client.http import HTTPClientError
from mapproxy.util.py import reraise_exception
import logging
log = logging.getLogger('mapproxy.source.wms')
class WMSSource(MapLayer):
supports_meta_tiles = True
def __init__(self, client, image_opts=None, coverage=None, res_range=None,
transparent_color=None, transparent_color_tolerance=None,
supported_srs=None, supported_formats=None, fwd_req_params=None,
error_handler=None):
MapLayer.__init__(self, image_opts=image_opts)
self.client = client
self.supported_srs = supported_srs or []
self.supported_formats = supported_formats or []
self.fwd_req_params = fwd_req_params or set()
self.transparent_color = transparent_color
self.transparent_color_tolerance = transparent_color_tolerance
if self.transparent_color:
self.image_opts.transparent = True
self.coverage = coverage
self.res_range = res_range
if self.coverage:
self.extent = MapExtent(self.coverage.bbox, self.coverage.srs)
else:
self.extent = DefaultMapExtent()
self.error_handler = error_handler
def is_opaque(self, query):
"""
Returns true if we are sure that the image is not transparent.
"""
if self.res_range and not self.res_range.contains(query.bbox, query.size,
query.srs):
return False
if self.image_opts.transparent:
return False
if self.opacity is not None and (0.0 < self.opacity < 0.99):
return False
if not self.coverage:
# not transparent and no coverage
return True
if self.coverage.contains(query.bbox, query.srs):
# not transparent and completely inside coverage
return True
return False
def get_map(self, query):
if self.res_range and not self.res_range.contains(query.bbox, query.size,
query.srs):
raise BlankImage()
if self.coverage and not self.coverage.intersects(query.bbox, query.srs):
raise BlankImage()
try:
resp = self._get_map(query)
if self.transparent_color:
resp = make_transparent(resp, self.transparent_color,
self.transparent_color_tolerance)
resp.opacity = self.opacity
return resp
except HTTPClientError as e:
if self.error_handler:
resp = self.error_handler.handle(e.response_code, query)
if resp:
return resp
log.warning('could not retrieve WMS map: %s', e.full_msg or e)
reraise_exception(SourceError(e.args[0]), sys.exc_info())
def _get_map(self, query):
format = self.image_opts.format
if not format:
format = query.format
if self.supported_formats and format not in self.supported_formats:
format = self.supported_formats[0]
if self.supported_srs:
# srs can be equal while still having a different srs_code (EPSG:3857/900913), make sure to use a supported srs_code
request_srs = None
for srs in self.supported_srs:
if query.srs == srs:
request_srs = srs
break
if request_srs is None:
return self._get_transformed(query, format)
if query.srs.srs_code != request_srs.srs_code:
query.srs = request_srs
if self.extent and not self.extent.contains(MapExtent(query.bbox, query.srs)):
return self._get_sub_query(query, format)
resp = self.client.retrieve(query, format)
return ImageSource(resp, size=query.size, image_opts=self.image_opts)
def _get_sub_query(self, query, format):
size, offset, bbox = bbox_position_in_image(query.bbox, query.size, self.extent.bbox_for(query.srs))
if size[0] == 0 or size[1] == 0:
raise BlankImage()
src_query = MapQuery(bbox, size, query.srs, format, dimensions=query.dimensions)
resp = self.client.retrieve(src_query, format)
return SubImageSource(resp, size=query.size, offset=offset, image_opts=self.image_opts)
def _get_transformed(self, query, format):
dst_srs = query.srs
src_srs = self.supported_srs.best_srs(dst_srs)
dst_bbox = query.bbox
src_bbox = dst_srs.transform_bbox_to(src_srs, dst_bbox)
src_width, src_height = src_bbox[2]-src_bbox[0], src_bbox[3]-src_bbox[1]
ratio = src_width/src_height
dst_size = query.size
xres, yres = src_width/dst_size[0], src_height/dst_size[1]
if xres < yres:
src_size = dst_size[0], int(dst_size[0]/ratio + 0.5)
else:
src_size = int(dst_size[1]*ratio +0.5), dst_size[1]
src_query = MapQuery(src_bbox, src_size, src_srs, format, dimensions=query.dimensions)
if self.coverage and not self.coverage.contains(src_bbox, src_srs):
img = self._get_sub_query(src_query, format)
else:
resp = self.client.retrieve(src_query, format)
img = ImageSource(resp, size=src_size, image_opts=self.image_opts)
img = ImageTransformer(src_srs, dst_srs).transform(img, src_bbox,
query.size, dst_bbox, self.image_opts)
img.format = format
return img
def _is_compatible(self, other, query):
if not isinstance(other, WMSSource):
return False
if self.opacity is not None or other.opacity is not None:
return False
if self.supported_srs != other.supported_srs:
return False
if self.supported_formats != other.supported_formats:
return False
if self.transparent_color != other.transparent_color:
return False
if self.transparent_color_tolerance != other.transparent_color_tolerance:
return False
if self.coverage != other.coverage:
return False
if (query.dimensions_for_params(self.fwd_req_params) !=
query.dimensions_for_params(other.fwd_req_params)):
return False
return True
def combined_layer(self, other, query):
if not self._is_compatible(other, query):
return None
client = self.client.combined_client(other.client, query)
if not client:
return None
return WMSSource(client, image_opts=self.image_opts,
transparent_color=self.transparent_color,
transparent_color_tolerance=self.transparent_color_tolerance,
supported_srs=self.supported_srs,
supported_formats=self.supported_formats,
res_range=None, # layer outside res_range should already be filtered out
coverage=self.coverage,
fwd_req_params=self.fwd_req_params,
)
class WMSInfoSource(InfoSource):
def __init__(self, client, fi_transformer=None, coverage=None):
self.client = client
self.fi_transformer = fi_transformer
self.coverage = coverage
def get_info(self, query):
if self.coverage and not self.coverage.contains(query.coord, query.srs):
return None
doc = self.client.get_info(query)
if self.fi_transformer:
doc = self.fi_transformer(doc)
return doc
class WMSLegendSource(LegendSource):
def __init__(self, clients, legend_cache, static=False):
self.clients = clients
self.identifier = legend_identifier([c.identifier for c in self.clients])
self._cache = legend_cache
self._size = None
self.static = static
@property
def size(self):
if not self._size:
legend = self.get_legend(LegendQuery(format='image/png', scale=None))
# TODO image size without as_image?
self._size = legend.as_image().size
return self._size
def get_legend(self, query):
if self.static:
# prevent caching of static legends for different scales
legend = Legend(id=self.identifier, scale=None)
else:
legend = Legend(id=self.identifier, scale=query.scale)
if not self._cache.load(legend):
legends = []
error_occured = False
for client in self.clients:
try:
legends.append(client.get_legend(query))
except HTTPClientError as e:
error_occured = True
log.error(e.args[0])
except SourceError as e:
error_occured = True
# TODO errors?
log.error(e.args[0])
format = split_mime_type(query.format)[1]
legend = Legend(source=concat_legends(legends, format=format),
id=self.identifier, scale=query.scale)
if not error_occured:
self._cache.store(legend)
return legend.source
| StarcoderdataPython |
229067 | """
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import setup
import subprocess
from v8 import *
import argparse
import sys
import os
def install_v8(path):
old_dir = os.getcwd()
os.chdir(path)
subprocess.Popen(["python", "setup.py", "install"]).wait()
os.chdir(old_dir)
parser = argparse.ArgumentParser("Fridafy Installer")
group = parser.add_mutually_exclusive_group()
group.add_argument("--osx", action="store_true", default=False, dest="osx", help="Install Fridafy for OSX")
group.add_argument("--linux-x86", action="store_true", default=False, dest="nix32", help="Install Fridafy for Linux 32bit")
group.add_argument("--linux-x86-64", action="store_true", default=False, dest="nix64", help="Install Fridafy for Linux 64bit")
group.add_argument("--win-x86", action="store_true", default=False, dest="win32", help="Install Fridafy for Windows 32bit")
group.add_argument("--win-x86-64", action="store_true", default=False, dest="win64", help="Install Fridafy for Windows 64bit")
args = parser.parse_args()
if not (args.osx | args.nix32 | args.nix64 | args.win32 | args.win64):
print("Error: You must select at least an architecture!")
else:
if args.osx:
install_v8(PYV8_OSX)
elif args.nix32:
install_v8(PYV8_LINUX_32)
elif args.nix64:
install_v8(PYV8_LINUX_64)
elif args.win32:
install_v8(PYV8_WINDOWS_32)
elif args.win64:
install_v8(PYV8_WINDOWS_64)
sys.argv = [sys.argv[0]]
sys.argv.append("install")
setup(
name="Fridafy",
version="0.1",
author="<NAME>",
author_email="<EMAIL>",
description="Simplified JS Engine for frida instrumentation tool",
license="MIT",
keywords="",
url="https://github.com/dzonerzy/Fridafy",
packages=["fridafy"],
entry_points={
'console_scripts': [
'fridafy=fridafy.fridafy:main'
]
},
classifiers=[
"Topic :: Mobile"
],
install_requires=[
'frida',
],
)
| StarcoderdataPython |
330363 | <filename>isolateparser/resultparser/__init__.py
from .breseq_folder_parser import BreseqFolderParser, IsolateTableColumns, get_sample_name
| StarcoderdataPython |
9735516 | '''
Created on Aug 13, 2020
@author: <NAME>
'''
import os
import torch
import torch.optim as optim
import torch.nn as nn
import torch.cuda as cuda
import functools
import argparse
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torchvision.utils import save_image
import data.dataset_handler as dataset_handler
import utils.utils as utils
import nets.generators as generators
class UnetGenerator(nn.Module):
"""
The orginal implementation is https://github.com/phillipi/pix2pix
Copyright (c) 2016, <NAME> and <NAME>
"""
def __init__(self, input_nc, output_nc, num_downs=6, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet self.net_G
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 6,
image of size 64x64 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
def get_net_params(self):
for name, params in self.named_parameters():
print(name, params.size())
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1) # @UndefinedVariable
class NLayerDiscriminator(nn.Module):
"""
Defines a PatchGAN discriminator
Require two images as input: both input and output of the generator
"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc * 2, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=0)] # output 1 channel prediction map
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
def get_net_params(self):
for name, params in self.named_parameters():
print(name, params.size())
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
class pix2pix():
'''
pix2pix class supports transfer image one by one.
'''
def __init__(self, opt):
self.net_G = UnetGenerator(opt.channels, opt.channels)
self.net_D = NLayerDiscriminator(opt.channels)
self.net_interpolate = generators.GeneratorWithCondition_NoNoise_V7(opt)
self.opt = opt
self.data_loader = None
self.data_path = dataset_handler.Dataset.D_UCF101_BODY_TRAIN.value
# Loss function
self.adv_loss = nn.BCELoss()
self.l1_loss = nn.L1Loss()
if cuda.is_available() and opt.isCudaUsed:
self.net_G = self.net_G.cuda()
self.net_D = self.net_D.cuda()
self.adv_loss = self.adv_loss.cuda()
self.l1_loss = self.l1_loss.cuda()
# Initialize weights
self.net_G.apply(weights_init_normal)
self.net_D.apply(weights_init_normal)
# Optimizers
self.optimizer_G = optim.Adam(self.net_G.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
self.optimizer_D = optim.Adam(self.net_D.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
def load_training_dataset(self):
if self.data_loader is not None:
return self.data_loader
print("Loading training dataset: %s" % self.data_path)
dataset = dataset_handler.ImageDatasetLoader(self.opt).loadImageDatasetForNoNoise(self.data_path);
self.data_loader = DataLoader(dataset=dataset, batch_size=self.opt.batch_size, shuffle=True, num_workers=0)
print("Done.")
return self.data_loader
def load_interpolation_network(self):
path = "D:/pyProject/GANs/implementations/my_approach/models_test/cpt/gennet_gen_images54_400.pt"
self._load_model(self.net_interpolate, path)
def train(self, opt, progress=0):
print(opt)
print(self.net_G)
print(self.net_D)
print(self.net_interpolate)
self.load_interpolation_network()
self.load_training_dataset()
if progress <= 0:
no_epochs = self.opt.n_epochs
progress = 0
else:
no_epochs = self.opt.n_epochs - progress
os.makedirs(self.opt.path, exist_ok=True)
self._write_to_file()
# ----------
# Training
# ----------
for t_epoch in range(no_epochs):
epoch = t_epoch + current_progress
temp_log = ""
for i, imgs in enumerate(dataloader):
temp_log = self._train_one_batch(imgs, epoch, i, dataloader.__len__())
self.save_models(epoch)
self._write_to_file(temp_log)
return self.net_G, self.net_D;
def _train_one_epoch(self, epoch, dataloader):
temp_log = ""
for i, imgs in enumerate(dataloader):
temp_log = self._train_one_batch(imgs, epoch, i, dataloader.__len__())
self.save_models(epoch)
self._write_to_file(temp_log)
def _cal_generator_loss(self, input, output, ground_truth, valid_label):
adv_loss = self.adv_loss(self.net_D(torch.cat((input, output), 1)), valid_label)
g_loss = self.opt.adv_lambda * adv_loss
g_loss = g_loss + self.opt.l1_lambda * self.l1_loss(output, ground_truth)
return adv_loss, g_loss;
def _cal_discriminator_loss(self, ground_truth_distingue, fake_distingue, valid_label, fake_label):
real_loss = self.adv_loss(ground_truth_distingue, valid_label)
fake_loss = self.adv_loss(fake_distingue, fake_label)
return real_loss, fake_loss, (real_loss + fake_loss) / 2;
def _train_one_batch(self, imgs, epoch, batch, total_batch):
self.net_G.train()
self.net_D.train()
self.net_interpolate.eval()
Tensor = torch.FloatTensor if not (self.opt.isCudaUsed and torch.cuda.is_available()) else torch.cuda.FloatTensor # @UndefinedVariable
pres = imgs[:, 0]
lats = imgs[:, 2]
mids = imgs[:, 1]
# Adversarial ground truths
valid = Variable(Tensor(mids.shape[0], 1, 1, 1).fill_(0.95), requires_grad=False)
fake = Variable(Tensor(mids.shape[0], 1, 1, 1).fill_(0.1), requires_grad=False)
# Configure input
if (self.opt.isCudaUsed):
inputPres = pres.to('cuda')
inputLats = lats.to('cuda')
expectedOutput = mids.to('cuda')
else:
inputPres = pres
inputLats = lats
expectedOutput = mids
# -----------------
# Train Generator
# -----------------
self.optimizer_G.zero_grad() # set G's gradient to zero
imgs_inter = self.net_interpolate(inputPres.data, inputLats.data) # get interpolated frame from the interpolation network
imgs_ref = self.net_G(imgs_inter) # get refinement frame
# Calculate gradient for G
# Loss measures generator's ability to fool the discriminator and generate similar image to ground truth
adv_loss, g_loss = self._cal_generator_loss(imgs_inter, imgs_ref, expectedOutput, valid)
g_loss.backward()
self.optimizer_G.step() # update G's weights
# ---------------------
# Train Discriminator
# ---------------------
self.optimizer_D.zero_grad() # set D's gradient to zero
# Calculate gradient for D
gt_distingue = self.net_D(torch.cat((imgs_inter.detach(), expectedOutput), 1))
fake_distingue = self.net_D(torch.cat((imgs_inter.detach(), imgs_ref.detach()), 1))
real_loss, fake_loss, d_loss = self._cal_discriminator_loss(gt_distingue, fake_distingue, valid, fake)
d_loss.backward()
self.optimizer_D.step() # update D's weights
# Show progress
psnr1 = utils.cal_psnr_tensor(imgs_inter.data[0].cpu(), expectedOutput.data[0].cpu())
psnr2 = utils.cal_psnr_tensor(imgs_ref.data[0].cpu(), expectedOutput.data[0].cpu())
temp_log = ("V4: [Epoch %d] [Batch %d/%d] [D loss: %f] [G loss: %f] [psnr1: %f] [psnr_ref: %f]"
% (epoch, batch, total_batch, d_loss.item(), g_loss.item(), psnr1, psnr2))
if (batch % 100 == 0):
print(temp_log)
# Display result (input and output) after every opt.sample_intervals
batches_done = epoch * total_batch + batch
if batches_done % self.opt.sample_interval == 0:
save_image(imgs_ref.data[:25], self.opt.path + "/l_%d.png" % batches_done, nrow=5, normalize=True)
print("Saved l_%d.png" % batches_done)
print(temp_log)
return temp_log
def save_models(self, epoch, output_path=None):
'''
Save model into file which contains state's information
:param epoch: last epochth train
:param output_path: saved directory path
'''
outpath = self.opt.default_model_path if output_path is None else output_path
os.makedirs(outpath, exist_ok=True)
state_gen = {'epoch': epoch + 1,
'state_dict': self.net_G.state_dict(),
'optimizer': self.optimizer_G.state_dict(),
}
state_dis = {'epoch': epoch + 1,
'state_dict': self.net_D.state_dict(),
'optimizer': self.optimizer_D.state_dict(),
}
torch.save(state_gen, outpath + "/pix2pix_gen_" + self.opt.path + ".pt");
torch.save(state_dis, outpath + "/pix2pix_dis_" + self.opt.path + ".pt");
os.makedirs("%s/cpt" % (outpath), exist_ok=True)
torch.save(state_gen, "%s/cpt/pix2pix_gen_%s_%d.pt" % (outpath, self.opt.path, epoch));
return;
def _load_model(self, model, path):
'''
Load network's state data (respecting to saved information)
:param model:
:param optimizer:
:param path: file path
'''
checkpoint = torch.load(path)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if self.opt.isCudaUsed: model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=self.opt.lr, betas=(self.opt.b1, self.opt.b2))
optimizer.load_state_dict(checkpoint['optimizer'])
if not self.opt.isCudaUsed:
return start_epoch
# copy tensor into GPU manually
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
return start_epoch
def _write_to_file(self, content=None):
path = self.opt.path + "/readme.txt"
if content is not None:
utils.write_to_existed_text_file(path, "\n" + content)
return;
s = ""
s += str(self.opt)
s += "\n"
s += str(self.net_G)
s += "\n"
s += str(self.net_D)
s += "\n"
s += ("Dataset: " + self.data_path)
utils.write_to_text_file(path, s)
def initParameters():
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=400, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=32, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--crop_size", type=int, default=64, help="size of cropping area from image")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=500, help="number of batches between image sampling")
parser.add_argument("--test_interval", type=int, default=50, help="number of epochs between testing while training")
parser.add_argument("--patch_size", type=int, default=3, help="the number of frames in a patch")
parser.add_argument("--path", type=str, default="pix2pix_try", help="output folder")
parser.add_argument("--default_model_path", type=str, default="models_test", help="the default path of saved models")
parser.add_argument("--adv_lambda", type=float, default=1.0, help="the default weight of adv Loss")
parser.add_argument("--l1_lambda", type=float, default=100.0, help="the default weight of L1 Loss")
parser.add_argument("--isCudaUsed", type=bool, default=True, help="run with GPU or CPU (default)")
parser.add_argument("--gen_load", type=str, default=None, help="loaded generator for training")
parser.add_argument("--dis_load", type=str, default=None, help="loaded discriminator for training")
return parser.parse_args()
def main():
print("From train_pix2pix module...")
opt = initParameters()
nets = pix2pix(opt)
nets.train(opt)
main()
| StarcoderdataPython |
8071814 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.validators import RegexValidator
gtin_re = r'^[0-9]{6,8}$|^[0-9]{10}$|^[0-9]{12}$|^[0-9]{13}$|^[0-9]{14,}$'
validate_gtin = \
RegexValidator(gtin_re, 'Please enter a valid GTIN/ISBN/EAN/UPC code.')
| StarcoderdataPython |
1664826 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import os
from typing import Any, Callable, Dict, List, Optional, Union
import uuid
import airflow
from airflow.exceptions import AirflowException
if airflow.__version__ > "2.0":
from airflow.hooks.base import BaseHook
else:
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import great_expectations as ge
from great_expectations.checkpoint import LegacyCheckpoint
from great_expectations.checkpoint.types.checkpoint_result import CheckpointResult
from great_expectations.data_context.types.base import (
DataContextConfig,
GCSStoreBackendDefaults,
)
from great_expectations.data_context import BaseDataContext
class GreatExpectationsOperator(BaseOperator):
"""
An operator to leverage Great Expectations as a task in your Airflow DAG.
Current list of expectations types:
https://docs.greatexpectations.io/en/latest/reference/glossary_of_expectations.html
How to create expectations files:
https://docs.greatexpectations.io/en/latest/guides/tutorials/how_to_create_expectations.html
:param run_name: Identifies the validation run (defaults to timestamp if not specified)
:type run_name: Optional[str]
:param data_context_root_dir: Path of the great_expectations directory
:type data_context_root_dir: Optional[str]
:param data_context: A great_expectations `DataContext` object
:type data_context: Optional[BaseDataContext]
:param expectation_suite_name: The name of the Expectation Suite to use for validation
:type expectation_suite_name: Optional[str]
:param batch_kwargs: The batch_kwargs to use for validation
:type batch_kwargs: Optional[dict]
:param assets_to_validate: A list of dictionaries of batch_kwargs + Expectation Suites to use for validation
:type assets_to_validate: Optional[list[dict]]
:param checkpoint_name: A Checkpoint name to use for validation
:type checkpoint_name: Optional[str]
:param validation_operator_name: name of a Great Expectations validation operator, defaults to action_list_operator
:type validation_operator_name: Optional[str]
:param fail_task_on_validation_failure: Fail the Airflow task if the Great Expectation validation fails
:type fail_task_on_validation_failure: Optiopnal[bool]
:param validation_failure_callback: Called when the Great Expectations validation fails
:type validation_failure_callback: Callable[[CheckpointResult], None]
:param **kwargs: kwargs
:type **kwargs: Optional[dict]
"""
ui_color = "#AFEEEE"
ui_fgcolor = "#000000"
template_fields = (
"checkpoint_name",
"batch_kwargs",
"assets_to_validate",
"data_context_root_dir",
)
@apply_defaults
def __init__(
self,
*,
run_name: Optional[str] = None,
data_context_root_dir: Optional[Union[str, bytes, os.PathLike]] = None,
data_context: Optional[BaseDataContext] = None,
expectation_suite_name: Optional[str] = None,
batch_kwargs: Optional[Dict] = None,
assets_to_validate: Optional[List[Dict]] = None,
checkpoint_name: Optional[str] = None,
validation_operator_name: Optional[str] = None,
fail_task_on_validation_failure: Optional[bool] = True,
validation_failure_callback: Optional[
Callable[[CheckpointResult], None]
] = None,
**kwargs
):
super().__init__(**kwargs)
self.run_name: Optional[str] = run_name
# Check that only one of the arguments is passed to set a data context (or none)
if data_context_root_dir and data_context:
raise ValueError(
"Only one of data_context_root_dir or data_context can be specified."
)
self.data_context_root_dir: Optional[str] = data_context_root_dir
self.data_context: Optional[BaseDataContext] = data_context
# Check that only the correct args to validate are passed
# this doesn't cover the case where only one of expectation_suite_name or batch_kwargs is specified
# along with one of the others, but I'm ok with just giving precedence to the correct one
if (
sum(
bool(x)
for x in [
(expectation_suite_name and batch_kwargs),
assets_to_validate,
checkpoint_name,
]
)
!= 1
):
raise ValueError(
"Exactly one of expectation_suite_name + batch_kwargs, "
"assets_to_validate, or checkpoint_name is required to run validation."
)
self.expectation_suite_name: Optional[str] = expectation_suite_name
self.batch_kwargs: Optional[Dict] = batch_kwargs
self.assets_to_validate: Optional[List[Dict]] = assets_to_validate
self.checkpoint_name: Optional[str] = checkpoint_name
self.validation_operator_name: Optional[str] = validation_operator_name
self.fail_task_on_validation_failure = fail_task_on_validation_failure
self.validation_failure_callback = validation_failure_callback
def create_data_context(self) -> BaseDataContext:
"""Create and return the :class:`~ge.data_context.DataContext` to be used
during validation.
Subclasses should override this to provide custom logic around creating a
`DataContext`. This is called at task execution time, which defers connecting
to the meta database and allows for the use of templated variables.
"""
if self.data_context_root_dir:
return ge.data_context.DataContext(
context_root_dir=self.data_context_root_dir
)
else:
return ge.data_context.DataContext()
def execute(self, context: Any) -> CheckpointResult:
self.log.info("Ensuring data context exists...")
if not self.data_context:
self.log.info("Data context does not exist, creating now.")
self.data_context: Optional[BaseDataContext] = self.create_data_context()
self.log.info("Running validation with Great Expectations...")
batches_to_validate = []
if self.batch_kwargs and self.expectation_suite_name:
batch = {
"batch_kwargs": self.batch_kwargs,
"expectation_suite_names": [self.expectation_suite_name],
}
batches_to_validate.append(batch)
elif self.checkpoint_name:
checkpoint = self.data_context.get_checkpoint(self.checkpoint_name)
for batch in checkpoint.batches:
batch_kwargs = batch["batch_kwargs"]
for suite_name in batch["expectation_suite_names"]:
batch = {
"batch_kwargs": batch_kwargs,
"expectation_suite_names": [suite_name],
}
batches_to_validate.append(batch)
elif self.assets_to_validate:
for asset in self.assets_to_validate:
batch = {
"batch_kwargs": asset["batch_kwargs"],
"expectation_suite_names": [asset["expectation_suite_name"]],
}
batches_to_validate.append(batch)
result = LegacyCheckpoint(
name="_temp_checkpoint",
data_context=self.data_context,
validation_operator_name=self.validation_operator_name,
batches=batches_to_validate,
).run(run_name=self.run_name)
self.handle_result(result)
return result
def handle_result(self, result: CheckpointResult) -> None:
"""Handle the given validation result.
If the validation failed, this method will:
- call :attr:`~validation_failure_callback`, if set
- raise an :exc:`airflow.exceptions.AirflowException`, if
:attr:`~fail_task_on_validation_failure` is `True`, otherwise, log a warning
message
If the validation succeeded, this method will simply log an info message.
:param result: The validation result
:type result: CheckpointResult
"""
if not result["success"]:
if self.validation_failure_callback:
self.validation_failure_callback(result)
if self.fail_task_on_validation_failure:
raise AirflowException("Validation with Great Expectations failed.")
else:
self.log.warning(
"Validation with Great Expectations failed. "
"Continuing DAG execution because "
"fail_task_on_validation_failure is set to False."
)
else:
self.log.info("Validation with Great Expectations successful.")
class GreatExpectationsBigQueryOperator(GreatExpectationsOperator):
"""
An operator that allows you to use Great Expectations to validate data Expectations
against a BigQuery table or the result of a SQL query.
The Expectations need to be stored in a JSON file sitting in an accessible GCS
bucket. The validation results are output to GCS in both JSON and HTML formats.
:param gcp_project: The GCP project of the bucket holding the Great Expectations
artifacts.
:type gcp_project: str
:param gcs_bucket: GCS bucket holding the Great Expectations artifacts.
:type gcs_bucket: str
:param gcs_expectations_prefix: GCS prefix where the Expectations file can be
found. For example, "ge/expectations".
:type gcs_expectations_prefix: str
:param gcs_validations_prefix: GCS prefix where the validation output files should
be saved. For example, "ge/expectations".
:type gcs_validations_prefix: str
:param gcs_datadocs_prefix: GCS prefix where the validation datadocs files should
be saved. For example, "ge/expectations".
:type gcs_datadocs_prefix: str
:param query: The SQL query that defines the set of data to be validated. If the
query parameter is filled in then the `table` parameter cannot be.
:type query: Optional[str]
:param table: The name of the BigQuery table with the data to be validated. If the
table parameter is filled in then the `query` parameter cannot be.
:type table: Optional[str]
:param bq_dataset_name: The name of the BigQuery data set where any temp tables
will be created that are needed as part of the GE validation process.
:type bq_dataset_name: str
:param bigquery_conn_id: ID of the connection with the credentials info needed to
connect to BigQuery.
:type bigquery_conn_id: str
"""
ui_color = "#AFEEEE"
ui_fgcolor = "#000000"
template_fields = GreatExpectationsOperator.template_fields + (
"bq_dataset_name",
"gcp_project",
"gcs_bucket",
)
@apply_defaults
def __init__(
self,
*,
gcp_project: str,
gcs_bucket: str,
gcs_expectations_prefix: str,
gcs_validations_prefix: str,
gcs_datadocs_prefix: str,
query: Optional[str] = None,
table: Optional[str] = None,
bq_dataset_name: str,
bigquery_conn_id: str = "bigquery_default",
**kwargs
):
self.query: Optional[str] = query
self.table: Optional[str] = table
self.bigquery_conn_id = bigquery_conn_id
self.bq_dataset_name = bq_dataset_name
self.gcp_project = gcp_project
self.gcs_bucket = gcs_bucket
self.gcs_expectations_prefix = gcs_expectations_prefix
self.gcs_validations_prefix = gcs_validations_prefix
self.gcs_datadocs_prefix = gcs_datadocs_prefix
super().__init__(batch_kwargs=self.get_batch_kwargs(), **kwargs)
def create_data_context(self) -> BaseDataContext:
"""Create and return the `DataContext` with a BigQuery `DataSource`."""
# Get the credentials information for the BigQuery data source from the BigQuery
# Airflow connection
conn = BaseHook.get_connection(self.bigquery_conn_id)
connection_json = conn.extra_dejson
credentials_path = connection_json.get("extra__google_cloud_platform__key_path")
data_context_config = DataContextConfig(
config_version=2,
datasources={
"bq_datasource": {
"credentials": {
"url": "bigquery://"
+ self.gcp_project
+ "/"
+ self.bq_dataset_name
+ "?credentials_path="
+ credentials_path
},
"class_name": "SqlAlchemyDatasource",
"module_name": "great_expectations.datasource",
"data_asset_type": {
"module_name": "great_expectations.dataset",
"class_name": "SqlAlchemyDataset",
},
}
},
store_backend_defaults=GCSStoreBackendDefaults(
default_bucket_name=self.gcs_bucket,
default_project_name=self.gcp_project,
validations_store_prefix=self.gcs_validations_prefix,
expectations_store_prefix=self.gcs_expectations_prefix,
data_docs_prefix=self.gcs_datadocs_prefix,
),
)
return BaseDataContext(project_config=data_context_config)
def get_batch_kwargs(self) -> Dict:
# Tell GE where to fetch the batch of data to be validated.
batch_kwargs = {
"datasource": "bq_datasource",
}
# Check that only one of the arguments is passed to set a data context (or none)
if self.query and self.table:
raise ValueError("Only one of query or table can be specified.")
if self.query:
batch_kwargs["query"] = self.query
batch_kwargs["data_asset_name"] = self.bq_dataset_name
batch_kwargs["bigquery_temp_table"] = self.get_temp_table_name(
"ge_" + datetime.datetime.now().strftime("%Y%m%d") + "_", 10
)
elif self.table:
batch_kwargs["table"] = self.table
batch_kwargs["data_asset_name"] = self.bq_dataset_name
self.log.info("batch_kwargs: " + str(batch_kwargs))
return batch_kwargs
def get_temp_table_name(
self, desired_prefix: str, desired_length_of_random_portion: int
) -> str:
random_string = str(uuid.uuid4().hex)
random_portion_of_name = random_string[:desired_length_of_random_portion]
full_name = desired_prefix + random_portion_of_name
self.log.info("Generated name for temporary table: %s", full_name)
return full_name
| StarcoderdataPython |
11388432 | """ Usage of RichMenu Manager """
from richmenu import RichMenu, RichMenuManager
# Setup RichMenuManager
channel_access_token = "YOUR_CHANNEL_ACCESS_TOKEN"
rmm = RichMenuManager(channel_access_token)
# Setup RichMenu to register
rm = RichMenu(name="Test menu", chat_bar_text="Open this menu")
rm.add_area(0, 0, 1250, 843, "message", "テキストメッセージ")
rm.add_area(1250, 0, 1250, 843, "uri", "http://imoutobot.com")
rm.add_area(0, 843, 1250, 843, "postback", "data1=from_richmenu&data2=as_postback")
rm.add_area(1250, 843, 1250, 843, "postback", ["data3=from_richmenu_with&data4=message_text", "ポストバックのメッセージ"])
# Register
res = rmm.register(rm, "/path/to/menu.png")
richmenu_id = res["richMenuId"]
print("Registered as " + richmenu_id)
# Apply to user
user_id = "LINE_MID_TO_APPLY"
rmm.apply(user_id, richmenu_id)
# Check
res = rmm.get_applied_menu(user_id)
print(user_id + ":" + res["richMenuId"])
# # Others
# res = rmm.get_list()
# rmm.download_image(richmenu_id, "/path/to/downloaded_image.png")
# res = rmm.detach(user_id)
# res = rmm.remove(richmenu_id)
# rmm.remove_all()
# res = rmm.get_list()
| StarcoderdataPython |
108422 | #!/usr/bin/env python3
"""
Equivalent kubectl commands are much less readable:
k get ds -n kube-system kube-flannel-ds -oyaml
k get ds -n kube-system kube-flannel-ds -o=custom-columns=IFACE:.spec.template.spec.containers[0].args
# Not sure of the next one:
kubectl patch ds -n kube-system kube-flannel-ds -p '{"spec":{"template":{"spec":{"containers":[{"name":"kube-flannel","args":"new args"}]}}}}'
"""
import sys
import yaml
iface_name = sys.argv[1]
with open("kube-flannel.yml") as f:
eph_yaml = list(yaml.load_all(f, Loader=yaml.FullLoader))
ds = [x for x in eph_yaml if x["kind"] == "DaemonSet"][0]
container_args = ds["spec"]["template"]["spec"]["containers"][0]["args"]
iface_arg = "--iface={}".format(iface_name)
# Make it idempotent:
if iface_arg not in container_args:
container_args.append(iface_arg)
with open("kube-flannel.yml", "w") as f:
lded = yaml.dump_all(eph_yaml, f, default_flow_style=False)
| StarcoderdataPython |
172225 | # -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 3.0.0
# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_configure_idrac_eventing
from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
from ansible_collections.dellemc.openmanage.tests.unit.compat.mock import MagicMock, patch, Mock, PropertyMock
from pytest import importorskip
importorskip("omsdk.sdkfile")
importorskip("omsdk.sdkcreds")
class TestConfigureEventing(FakeAnsibleModule):
module = dellemc_configure_idrac_eventing
@pytest.fixture
def idrac_configure_eventing_mock(self, mocker):
omsdk_mock = MagicMock()
idrac_obj = MagicMock()
omsdk_mock.file_share_manager = idrac_obj
omsdk_mock.config_mgr = idrac_obj
type(idrac_obj).create_share_obj = Mock(return_value="Status")
type(idrac_obj).set_liason_share = Mock(return_value="Status")
return idrac_obj
@pytest.fixture
def idrac_file_manager_config_eventing_mock(self, mocker):
try:
file_manager_obj = mocker.patch(
'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_configure_idrac_eventing.file_share_manager')
except AttributeError:
file_manager_obj = MagicMock()
obj = MagicMock()
file_manager_obj.create_share_obj.return_value = obj
return file_manager_obj
@pytest.fixture
def is_changes_applicable_eventing_mock(self, mocker):
try:
changes_applicable_obj = mocker.patch(
'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_configure_idrac_eventing.config_mgr')
except AttributeError:
changes_applicable_obj = MagicMock()
obj = MagicMock()
changes_applicable_obj.is_change_applicable.return_value = obj
return changes_applicable_obj
@pytest.fixture
def idrac_connection_configure_eventing_mock(self, mocker, idrac_configure_eventing_mock):
idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.'
'dellemc_configure_idrac_eventing.iDRACConnection',
return_value=idrac_configure_eventing_mock)
idrac_conn_class_mock.return_value.__enter__.return_value = idrac_configure_eventing_mock
return idrac_configure_eventing_mock
def test_main_configure_eventing_success_case01(self, idrac_connection_configure_eventing_mock, idrac_default_args,
mocker, idrac_file_manager_config_eventing_mock):
idrac_default_args.update({"share_name": "sharename", 'share_password': None, "destination_number": 1,
"destination": "1.1.1.1", 'share_mnt': None, 'share_user': None})
message = {'msg': 'Successfully configured the idrac eventing settings.',
'eventing_status': {"Id": "JID_12345123456", "JobState": "Completed"},
'changed': True}
mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.'
'dellemc_configure_idrac_eventing.run_idrac_eventing_config', return_value=message)
result = self._run_module(idrac_default_args)
assert result["msg"] == "Successfully configured the iDRAC eventing settings."
def test_run_idrac_eventing_config_success_case01(self, idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock, idrac_default_args,
is_changes_applicable_eventing_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "destination_number": 1, "destination": "1.1.1.1",
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
"smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
"password": "<PASSWORD>"})
message = {"changes_applicable": True, "message": "Changes found to commit!"}
idrac_connection_configure_eventing_mock.config_mgr.is_change_applicable.return_value = message
f_module = self.get_module_mock(params=idrac_default_args, check_mode=True)
with pytest.raises(Exception) as ex:
self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert "Changes found to commit!" == ex.value.args[0]
def test_run_idrac_eventing_config_success_case02(self, idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock, idrac_default_args):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "destination_number": 1, "destination": "1.1.1.1",
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
"smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
"password": "<PASSWORD>"})
message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True,
"Status": "Success"}
idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert result['message'] == 'changes found to commit!'
def test_run_idrac_eventing_config_success_case03(self, idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock, idrac_default_args):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "destination_number": 1,
"destination": "1.1.1.1", "snmp_v3_username": "snmpuser",
"snmp_trap_state": "Enabled", "alert_number": 4, "email_alert_state": "Enabled",
"address": "abc@xyz", "custom_message": "test", "enable_alerts": "Enabled",
"authentication": "Enabled", "smtp_ip_address": "192.168.0.1", "smtp_port": 443,
"username": "uname", "password": "<PASSWORD>"})
message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False,
"Status": "Success"}
idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert result["Message"] == 'No changes found to commit!'
def test_run_idrac_eventing_config_success_case04(self, idrac_connection_configure_eventing_mock,
idrac_default_args, idrac_file_manager_config_eventing_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "destination_number": 1, "destination": "1.1.1.1",
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
"smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
"password": "<PASSWORD>"})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "Success"}
idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert result['Message'] == 'No changes were applied'
def test_run_idrac_eventing_config_success_case05(self, idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock, idrac_default_args):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "destination_number": None, "destination": None,
"snmp_v3_username": None, "snmp_trap_state": None, "alert_number": None,
"email_alert_state": None, "address": None, "custom_message": None,
"enable_alerts": None, "authentication": None,
"smtp_ip_address": None, "smtp_port": None, "username": None,
"password": <PASSWORD>})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "Success"}
obj = MagicMock()
idrac_connection_configure_eventing_mock.config_mgr = obj
type(obj).configure_snmp_trap_destination = PropertyMock(return_value=message)
type(obj).configure_email_alerts = PropertyMock(return_value=message)
type(obj).configure_idrac_alerts = PropertyMock(return_value=message)
type(obj).configure_smtp_server_settings = PropertyMock(return_value=message)
idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert result['Message'] == 'No changes were applied'
def test_run_idrac_eventing_config_failed_case01(self, idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock, idrac_default_args):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "destination_number": 1, "destination": "1.1.1.1",
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
"smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
"password": "<PASSWORD>"})
message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}}
idrac_connection_configure_eventing_mock.file_share_manager.create_share_obj.return_value = "mnt/iso"
idrac_connection_configure_eventing_mock.config_mgr.set_liason_share.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
with pytest.raises(Exception) as ex:
self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert ex.value.args[0] == 'status failed in checking Data'
def test_run_idrac_eventing_config_failed_case02(self, idrac_connection_configure_eventing_mock,
idrac_default_args, idrac_file_manager_config_eventing_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "destination_number": 1, "destination": "1.1.1.1",
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
"smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
"password": "<PASSWORD>"})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "failed"}
idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert result['Message'] == 'No changes were applied'
def test_run_idrac_eventing_config_failed_case03(self, idrac_connection_configure_eventing_mock,
idrac_default_args, idrac_file_manager_config_eventing_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "<PASSWORD>", "destination_number": 1,
"destination": "1.1.1.1", "snmp_v3_username": "snmpuser",
"snmp_trap_state": "Enabled", "alert_number": 4, "email_alert_state": "Enabled",
"address": "abc@xyz", "custom_message": "test", "enable_alerts": "Enabled",
"authentication": "Enabled", "smtp_ip_address": "192.168.0.1",
"smtp_port": 443, "username": "uname", "password": "<PASSWORD>"})
message = {'Status': 'Failed', "Data": {'Message': "Failed to found changes"}}
idrac_connection_configure_eventing_mock.file_share_manager.create_share_obj.return_value = "mnt/iso"
idrac_connection_configure_eventing_mock.config_mgr.set_liason_share.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
with pytest.raises(Exception) as ex:
self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert ex.value.args[0] == 'Failed to found changes'
@pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError])
def test_main_configure_eventing_exception_handling_case(self, exc_type, mocker, idrac_default_args,
idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock):
idrac_default_args.update({"share_name": "sharename", 'share_password': <PASSWORD>,
'share_mnt': None, 'share_user': None})
mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.'
'dellemc_configure_idrac_eventing.run_idrac_eventing_config', side_effect=exc_type('test'))
result = self._run_module_with_fail_json(idrac_default_args)
assert 'msg' in result
assert result['failed'] is True
| StarcoderdataPython |
1695429 | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
def scan_callback(msg):
global g_range_ahead
g_range_ahead = msg.ranges[0]
if g_range_ahead > 10:
g_range_ahead = 10
g_range_ahead = 1
driving_forward = False
scan_sub = rospy.Subscriber('scan', LaserScan, scan_callback)
cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
rospy.init_node('wander')
state_change_time = rospy.Time.now()
print "Now:", state_change_time
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if driving_forward:
if (g_range_ahead < 0.8 or rospy.Time.now() > state_change_time):
driving_forward = False
print "Starting to turn; range ahead:", g_range_ahead
state_change_time = rospy.Time.now() + rospy.Duration(1)
else:
if (g_range_ahead > 1.0 and rospy.Time.now() > state_change_time):
driving_forward = True
print "Starting forward"
state_change_time = rospy.Time.now() + rospy.Duration(30)
twist = Twist()
if driving_forward:
twist.linear.x = g_range_ahead / 4
twist.angular.z = 0.1
else:
twist.angular.z = 0.5
print "lin %0.1f, ang %0.1f" % (twist.linear.x, twist.angular.z)
cmd_vel_pub.publish(twist)
try:
rate.sleep()
except rospy.exceptions.ROSInterruptException:
break
print "done"
| StarcoderdataPython |
6442743 | <filename>hermes/hermes/email_engine/__init__.py
import ssl
import smtplib
from gaea.log import logger
from gaea.config import CONFIG
from hermes.constants import SENDER, TO
class EmailEngine:
open_connections = {}
def close(self, emails=None):
if emails is None:
emails = list(self.open_connections.keys())
elif isinstance(emails, str):
emails = [emails]
for email in emails:
connection = self.open_connections.get(email)
if connection is None:
logger.info(f"No opened connection for {email}")
continue
connection.close()
logger.info(f"Closed connection for {email}")
del self.open_connections[email]
def get_email_client(self):
return smtplib.SMTP_SSL(
host=CONFIG.EMAIL_SERVER_HOST,
port=CONFIG.EMAIL_SERVER_PORT,
context=ssl.SSLContext(),
)
def login(self, email_address):
if email_address in self.open_connections:
return
password_var_name = "{name}_EMAIL_PASSWORD".format(
name=email_address.split("@")[0].upper()
)
password = CONFIG.get(password_var_name)
if password is None:
logger.error(f"Trying to connect to an unknown email: {email_address}")
raise ValueError(f"{email_address} is unknown")
client = self.get_email_client()
client.login(email_address, password)
logger.info(f"Connected to {email_address}")
self.open_connections[email_address] = client
def send(self, message):
sender = message.email[SENDER]
if sender not in self.open_connections:
self.login(sender)
client = self.open_connections[sender]
client.send_message(message.email)
logger.info(f"Email sent to {message.email[TO]}", email=message.email)
| StarcoderdataPython |
307574 | from .core import *
__version__ = '0.0.7'
| StarcoderdataPython |
9685481 | def cat_mouse(x: str) -> str:
return 'Escaped!' if x.count('.') >= 4 else 'Caught!'
| StarcoderdataPython |
5018948 | from django import template
from django.core.exceptions import ViewDoesNotExist
from django.core.urlresolvers import NoReverseMatch
from django.template.defaulttags import url as django_url
register = template.Library()
class LocalSiteURLNode(template.Node):
def __init__(self, url_node):
self.url_node = url_node
self.args = list(url_node.args)
self.kwargs = url_node.kwargs.copy()
def render(self, context):
# We're going to try two versions of the URL: one with the local
# site name, and one without. Of course, we only try with the
# name if that's provided in the context.
#
# We will be plugging in a set of arguments to url_node before
# rendering, based on the backed up values in LocalSiteURLNode's
# constructor.
#
# Since {% url %} can't mix positional and keyword argumetns, we
# must figure out whether we want to use args or kwargs.
local_site_name = context.get('local_site_name', None)
if local_site_name:
local_site_var = template.Variable('local_site_name')
if self.args:
self.url_node.args = [local_site_var] + self.args
else:
self.url_node.kwargs['local_site_name'] = local_site_var
try:
return self.url_node.render(context)
except (NoReverseMatch, ViewDoesNotExist):
# We'll try it again without those arguments.
pass
self.url_node.args = list(self.args)
self.url_node.kwargs = self.kwargs.copy()
return self.url_node.render(context)
@register.tag
def url(parser, token):
return LocalSiteURLNode(django_url(parser, token))
| StarcoderdataPython |
6608817 | import launch
from launch.actions import RegisterEventHandler
from launch.event_handlers.on_process_exit import OnProcessExit
from launch.events.process.process_exited import ProcessExited
from launch.launch_context import LaunchContext
from launch.actions import RegisterEventHandler
import launch_ros
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def generate_launch_description():
clock_server = launch_ros.actions.Node(
package='ros2_com',
executable='clock_server',
name='clock_server'
)
rosbag_node = launch.actions.ExecuteProcess(
cmd=['ros2', 'bag', 'record', '-a', '--compression-mode','file',
'--compression-format', 'zstd'],
output='screen'
)
def shutdown_all(event:ProcessExited, context:LaunchContext):
if event.returncode != 0:
print(f"{bcolors.FAIL}[ERROR] {event.action.name} node exited with status code {event.returncode}, shutting down recording nodes{bcolors.ENDC}")
return launch.actions.EmitEvent(event=launch.events.Shutdown())
event_hand = RegisterEventHandler(event_handler=OnProcessExit(on_exit=shutdown_all))
return launch.LaunchDescription([
clock_server,
rosbag_node,
event_hand
])
| StarcoderdataPython |
4847885 | <reponame>sined-rahkuk/math-examples-generator<filename>example.py
import sys
import random
import argparse
DEFAULT_DIV_AMOUNT = 10
DEFAULT_MUL_AMOUNT = 10
DEFAULT_FIRST_RANK = 3
DEFAULT_SECOND_RANK = 2
class Example:
def __init__(self, first_dig_rank, sec_dig_rank, operation_char='x'):
self.first_dig_rank = first_dig_rank
self.sec_dig_rank = sec_dig_rank
self.operation_char = operation_char
self.first_num, self.second_num = self.generate_nums()
def generate_nums(self):
sec_num = random.randint(
10 ** (self.sec_dig_rank-1), # from 100
10 ** self.sec_dig_rank - 1) # to 999
if self.operation_char == 'x':
# multiplication
first_num = random.randint(
10 ** (self.first_dig_rank-1), # from 100
10 ** self.first_dig_rank - 1) # to 999
else:
# division without remainder
result = random.randint(
10 ** (self.first_dig_rank-1), # from 100
10 ** self.first_dig_rank - 1) # to 999
first_num = result * sec_num
return (first_num, sec_num)
def __repr__(self):
return f'First num: {self.first_num}, Second num: {self.second_num}'
def form_example_string(self):
return f'{self.first_num} {self.operation_char} {self.second_num}'
def form_example_string_solved(self):
if self.operation_char == 'x':
result = self.first_num * self.second_num
else:
result = self.first_num / self.second_num
return self.form_example_string() + f' = {int(result)}'
def init_args():
parser = argparse.ArgumentParser(
description='Програма служить на генерування прикладів із математики на множення та ділення. Вона є помічником для батьків у справі вигадування таких прикладів.')
parser.add_argument('-a', '--amount', type=int, metavar='',
help='Кількість прикладів, по скільки для множення та для ділення.')
parser.add_argument('-f', '--first', type=int, metavar='',
help='Розряд першого числа. Сотні за замовчуванням.')
parser.add_argument('-s', '--second', type=int, metavar='',
help='Розряд другого числа. Десятки за замовчуванням.')
args = parser.parse_args()
return args
def main():
divs = [Example(first_dig_rank=INPUT_FIRST_RANK or DEFAULT_FIRST_RANK,
sec_dig_rank=INPUT_SECOND_RANK or DEFAULT_SECOND_RANK,
operation_char=':')
for i in range(INPUT_AMOUNT or DEFAULT_DIV_AMOUNT)]
muls = [Example(first_dig_rank=INPUT_FIRST_RANK or DEFAULT_FIRST_RANK,
sec_dig_rank=INPUT_SECOND_RANK or DEFAULT_SECOND_RANK,
operation_char='x')
for i in range(INPUT_AMOUNT or DEFAULT_DIV_AMOUNT)]
with open('primery.txt', 'w', encoding='utf-8') as unsolved:
with open('otvety.txt', 'w', encoding='utf-8') as solved:
unsolved.write('Приклади:\n\n')
solved.write('Відповіді на приклади:\n\n')
unsolved.write('\tДілення:\n')
solved.write('\tДілення:\n')
for index, example in enumerate(divs, start=1):
unsolved.write(
f'\t\t{index}. {example.form_example_string()}\n')
solved.write(
f'\t\t{index}. {example.form_example_string_solved()}\n')
unsolved.write('\n')
solved.write('\n')
unsolved.write('\tМноження:\n')
solved.write('\tМноження:\n')
for index, example in enumerate(muls, start=1):
unsolved.write(
f'\t\t{index}. {example.form_example_string()}\n')
solved.write(
f'\t\t{index}. {example.form_example_string_solved()}\n')
if __name__ == "__main__":
args = init_args()
INPUT_AMOUNT = args.amount
INPUT_FIRST_RANK = args.first
INPUT_SECOND_RANK = args.second
main()
| StarcoderdataPython |
3576480 | <filename>xapi_bridge/converter.py
"""Convert tracking log entries to xAPI statements."""
import logging
from xapi_bridge import exceptions, settings
from xapi_bridge.statements import base, course, problem, video, vertical_block, attachment
logger = logging.getLogger(__name__)
TRACKING_EVENTS_TO_XAPI_STATEMENT_MAP = {
# course enrollment
'edx.course.enrollment.activated': course.CourseEnrollmentStatement,
'edx.course.enrollment.deactivated': course.CourseUnenrollmentStatement,
'edx.course.completed': course.CourseCompletionStatement,
'edx.course.expell': course.CourseExpellStatement,
# course completion
#'edx.certificate.created': course.CourseCompletionStatement,
# vertical block - composite kim completion
'complete_vertical': vertical_block.VerticalBlockCompleteStatement,
# problems
'problem_check': problem.ProblemCheckStatement,
'edx.attachment': attachment.AttachmentStatement,
# 'edx.drag_and_drop_v2.item.dropped'
# video
#'ready_video': video.VideoStatement,
#'load_video': video.VideoStatement,
#'edx.video.loaded': video.VideoStatement,
#'play_video': video.VideoPlayStatement,
#'edx.video.played': video.VideoPlayStatement,
'pause_video': video.VideoStatement,
'video_check': video.VideoCheckStatement,
'stop_video': video.VideoCompleteStatement,
#'edx.video.stopped': video.VideoCompleteStatement,
#'show_transcript': video.VideoTranscriptStatement,
#'hide_transcript': video.VideoTranscriptStatement,
#'edx.video.transcript.shown': video.VideoTranscriptStatement,
#'edx.video.transcript.hidden': video.VideoTranscriptStatement,
#'edx.video.closed_captions.shown': video.VideoTranscriptStatement,
#'edx.video.closed_captions.hidden': video.VideoTranscriptStatement,
}
def to_xapi(evt):
"""Return tuple of xAPI statements or None if ignored or unhandled event type."""
# strip Video XBlock prefixes for checking
event_type = evt['event_type'].replace("xblock-video.", "")
if event_type in settings.IGNORED_EVENT_TYPES:
return # deliberately ignored event
# filter video_check from problem_check
event_source = evt['event_source']
if event_type == 'problem_check' and event_source == 'server':
event_data = evt['event']
data = event_data['answers'][event_data['answers'].keys()[0]]
if 'watch_times' in data:
event_type = 'video_check'
try:
statement_class = TRACKING_EVENTS_TO_XAPI_STATEMENT_MAP[event_type]
except KeyError: # untracked event
return
try:
statement = statement_class(evt)
if hasattr(statement, 'version'): # make sure it's a proper statement
return (statement, )
else:
message = "Statement missing version."
raise exceptions.XAPIBridgeStatementConversionError(event=evt, message=message)
except exceptions.XAPIBridgeSkippedConversion as e:
logger.debug("Skipping conversion of event with message {}. Event was {}".format(e.message, evt))
| StarcoderdataPython |
9731496 | <filename>locations/spiders/hm.py
import scrapy
from locations.items import GeojsonPointItem
import itertools
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i : i + n]
def partition(l, n):
return list(chunks(l, n))
def process_hours(opening_hours):
ret_hours = []
for hours_str in opening_hours:
split_hours = (
hours_str.replace(",", "")
.replace("AM AM", "")
.replace("PM PM", "")
.split(" ")
)
if split_hours[1] == "-":
range_start = split_hours[0]
range_end = split_hours[2]
times = partition([x for x in split_hours[3:] if x != "-"], 2)
else:
range_start, range_end = split_hours[0], None
times = partition([x for x in split_hours[1:] if x != "-"], 2)
periods = partition(times, 2)
periods = [list(itertools.chain(*r)) for r in periods]
period_list = []
for start, start_period, end, end_period in periods:
start_hour, start_minutes = [int(x) for x in start.split(":")]
end_hour, end_minutes = [int(x) for x in end.split(":")]
if start_period == "PM":
start_hour += 12
end_hour += 12
hours = (start_hour, start_minutes, end_hour, end_minutes)
period_list.append("%02d:%02d-%02d:%02d" % hours)
periods_str = ", ".join(period_list)
if range_start and range_end:
ret_hours.append(
"{}-{} {}".format(range_start[:2], range_end[:2], periods_str)
)
elif range_start:
ret_hours.append("{} {}".format(range_start[:2], periods_str))
return "; ".join(ret_hours)
class HMSpider(scrapy.Spider):
name = "hm-worldwide"
item_attributes = {"brand": "H&M"}
all_stores_uri = "https://hm.storelocator.hm.com/rest/storelocator/stores/1.0/locale/en_US/country/{}/"
start_urls = ["http://www.hm.com/entrance.ahtml"]
def parse(self, response):
country_urls = response.css(".column li a::attr('href')").extract()
country_codes = {x.split("=")[1].split("&")[0].upper() for x in country_urls}
for country_code in country_codes:
yield scrapy.Request(
url=self.all_stores_uri.format(country_code),
callback=self.parse_country,
)
def parse_country(self, response):
stores = response.css("storeComplete")
for store in stores:
point = {
"lat": store.xpath("latitude/text()").extract_first(),
"lon": store.xpath("longitude/text()").extract_first(),
"name": store.xpath("name/text()").extract_first(),
"addr_full": store.xpath("address/addressLine/text()").extract_first(),
"city": store.xpath("city/text()").extract_first(),
"country": store.xpath("country/text()").extract_first(),
"phone": store.xpath("phone/text()").extract_first(),
"opening_hours": process_hours(
store.xpath("openingHours/openingHour/text()").extract()
),
"ref": store.xpath("storeId/text()").extract_first(),
}
if "/country/US" in response.url:
point["state"] = store.xpath("region/name/text()").extract_first()
point["postcode"] = (
store.xpath("address/addressLine/text()")
.extract()[-1]
.split(" ")[-1]
)
yield GeojsonPointItem(**point)
| StarcoderdataPython |
6513482 | <filename>tests/dispatch/__init__.py<gh_stars>100-1000
"""Unit-tests for the dispatch project
"""
| StarcoderdataPython |
6520001 | import os, sys
import unittest
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..'))
from test_environment_device import WuTest
from configuration import *
class TestDiscovery(unittest.TestCase):
def setUp(self):
self.test = WuTest(False, False)
def test_basic_discoveryAPI(self):
self.test.discovery()
res = self.test.countWuObjectByWuClassID(1901) # Binary_TestSensor
self.assertEqual(res, 3)
res = self.test.countWuObjectByWuClassID(1902) # Integer_TestSensor
self.assertEqual(res, 3)
res = self.test.countWuObjectByWuClassID(2005) # Sound
self.assertEqual(res, 3)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
314550 | from kivy.uix.textinput import TextInput
class MaskaredText(TextInput):
symbol = "0123456789/"
use_symbol = True
mask = "##/##/####"
use_mask = True
def insert_text(self, string, from_undo=False):
text_size = len(self.text)
mask_size = len(self.mask)
if self.use_symbol:
"""Used to filter the text by the symbol that you specify."""
if string in self.symbol:
pass
else:
return super(MaskaredText, self).insert_text("", from_undo=from_undo)
if self.use_mask:
"""Used to filter the text by the mask that you specify."""
if text_size == mask_size:
return super(MaskaredText, self).insert_text("", from_undo=from_undo)
try:
if self.mask[text_size] == "#":
int(string)
else:
if string != self.mask[text_size]:
string = self.mask[text_size] + string
return super(MaskaredText, self).insert_text(string, from_undo=from_undo)
except Exception as e:
print("except", str(e))
return super(MaskaredText, self).insert_text("", from_undo=from_undo)
return super(MaskaredText, self).insert_text(string, from_undo=from_undo)
if __name__ == "__main__":
from kivy.app import App
from kivy.lang import Builder
class MainApp(App):
def build(self):
return Builder.load_string("""
BoxLayout:
orientation: "vertical"
MaskaredText:
font_size: sp(50)
multiline: True
mask: mask.text
symbol: symbol.text
use_symbol: use_symbol.active
use_mask: use_mask.active
BoxLayout:
Label:
text: "Symbol"
size_hint_x: .3
CheckBox:
id: use_symbol
active: True
size_hint_x: .2
TextInput:
id: symbol
size_hint_x: .5
text: "0123456789/"
hint_text: "Your Symbol"
BoxLayout:
Label:
text: "Mask"
size_hint_x: .3
CheckBox:
id: use_mask
active: True
size_hint_x: .2
TextInput:
id: mask
size_hint_x: .5
text: "##/##/####"
hint_text: "Your Mask"
""")
MainApp().run() | StarcoderdataPython |
8163830 | <reponame>euan-forrester/photo-recommender-poc
import flickrapi
import logging
import requests
from django.core.cache import cache
from django.conf import settings
class FlickrApiWrapper:
"""
Wraps around the flickrapi package: adds in retries to calls that fail, and external cacheing via memcached
"""
def __init__(self, flickr_api_key, flickr_api_secret, memcached_location, memcached_ttl, max_retries):
settings.configure(CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': memcached_location,
'KEY_FUNCTION': FlickrApiWrapper._make_memcached_key,
'TIMEOUT': memcached_ttl,
}
})
self.flickr = flickrapi.FlickrAPI(flickr_api_key, flickr_api_secret, format='parsed-json', cache=True)
self.flickr.cache = cache
self.max_retries = max_retries
def get_person_info(self, user_id):
lambda_to_call = lambda: self.flickr.people.getInfo(user_id=user_id)
person_info = self._call_with_retries(lambda_to_call)
logging.info("Just called get_person_info for user %s" % (user_id))
return person_info
def get_favorites(self, user_id, max_per_call, max_to_get):
got_all_favorites = False
current_page = 1
favorites = []
while not got_all_favorites and len(favorites) < max_to_get:
favorites_subset = self._get_favorites_page(user_id, max_per_call, current_page)
if len(favorites_subset['photos']['photo']) > 0: # We can't just check if the number we got back == the number we requested, because frequently we can get back < the number we requested but there's still more available. This is likely due to not having permission to be able to view all of the ones we requested
favorites.extend(favorites_subset['photos']['photo'])
else:
got_all_favorites = True
current_page += 1
favorites_up_to_max = favorites[0:max_to_get]
logging.info("Returning %d favorites which took %d calls" % (len(favorites_up_to_max), current_page - 1))
return favorites_up_to_max
def _get_favorites_page(self, user_id, max_per_call, page_number):
lambda_to_call = lambda: self.flickr.favorites.getList(user_id=user_id, extras='url_l,url_m', per_page=max_per_call, page=page_number)
favorites = self._call_with_retries(lambda_to_call)
logging.info("Just called get_favorites_page for page %d with max_per_call %d and returning %d faves" % (page_number, max_per_call, len(favorites['photos']['photo'])))
return favorites
@staticmethod
def _make_memcached_key(key, key_prefix, version):
# Similar to the default key function, except that we translate the key first. The FlickrAPI package
# uses objects as keys, then calls repr() on it to translate it into a string. This means the string will have
# spaces in the name, but memcached won't accept spaces in the key names, so we have to replace those
translated_key = repr(key).replace(' ', '$')
return '%s:%s:%s' % (key_prefix, version, translated_key)
def _call_with_retries(self, lambda_to_call):
num_retries = 0
result = None
success = False
error = None
while (num_retries < self.max_retries) and not success:
try:
result = lambda_to_call()
success = True
except flickrapi.exceptions.FlickrError as e:
# You get random 502s when making lots of calls to this API, which apparently indicate rate limiting:
# https://www.flickr.com/groups/51035612836@N01/discuss/72157646430151464/
# Sleeping between calls didn't seem to always solve it, but retrying does
# There doesn't seem to be a way to determine that this happened from the exception object other than to test
# the string against "do_request: Status code 502 received"
logging.debug("Got FlickrError %s" % (e))
error = e
except requests.exceptions.ConnectionError as e:
logging.debug("Got ConnectionError %s" % (e))
# Sometimes we see a random "Remote end closed connection without response" error
error = e
num_retries += 1
if not success:
raise error
return result | StarcoderdataPython |
263246 | import os
import dialogflow_v2beta1 as dialogflow
from google.api_core.exceptions import InvalidArgument
import random
import json
import packages.covid_endpoint
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = './configfiles/private_key.json'
class DialogFlowWrapper:
def __init__(self):
self.DIALOGFLOW_PROJECT_ID = 'covid-19bot-sbsl'
self.DIALOGFLOW_LANGUAGE_CODE = 'en'
self.helplines = self.populate_helpline_numbers()
self.covidwrapper = packages.covid_endpoint.CovidWrapper()
def populate_helpline_numbers(self):
folder_name = os.path.join(os.getcwd(), 'training')
file_name = 'numbers.json'
data = {}
with open(os.path.join(folder_name, file_name), 'r') as file_pointer:
data = json.load(file_pointer)
helplines = {}
for key, value in data.items():
key = key.lower()
if key not in helplines:
helplines[key] = [value]
else:
helplines[key].append(value)
return helplines
def process_input(self, text_to_be_analyzed, chat_id, first_name):
SESSION_ID = chat_id
session_client = dialogflow.SessionsClient()
session = session_client.session_path(
self.DIALOGFLOW_PROJECT_ID, SESSION_ID)
text_input = dialogflow.types.TextInput(
text=text_to_be_analyzed, language_code=self.DIALOGFLOW_LANGUAGE_CODE)
query_input = dialogflow.types.QueryInput(text=text_input)
knowledge_base_path = dialogflow.knowledge_bases_client \
.KnowledgeBasesClient \
.knowledge_base_path(self.DIALOGFLOW_PROJECT_ID, 'MTY5NjYxNzQ3MDEwOTUyMjMyOTY')
query_params = dialogflow.types.QueryParameters(
knowledge_base_names=[knowledge_base_path])
try:
response = session_client.detect_intent(
session=session, query_input=query_input)
except InvalidArgument:
pass
detected_intent = response.query_result.intent.display_name
reply = ""
if detected_intent == 'city_handler':
reply = self.response_city_handler(response)
elif detected_intent == 'state_handler':
reply = self.response_state_handler(response)
elif detected_intent == 'Knowledge.KnowledgeBase.MTY5NjYxNzQ3MDEwOTUyMjMyOTY':
reply = self.response_faq(response)
elif detected_intent == 'Default Welcome Intent':
reply = self.response_welcome(response, first_name)
elif detected_intent == 'Default Fallback Intent':
reply = self.response_fallback(response, first_name)
elif detected_intent == 'country_handler':
reply = self.response_country_handler(response)
elif detected_intent == 'helpline':
reply = self.response_helpline(response)
return reply
def response_helpline(self, response):
state_name = response.query_result.parameters.fields['geo-state'].string_value
if state_name == "":
return response.query_result.fulfillment_text
else:
state_name = state_name.lower()
num_lst = []
if state_name in self.helplines:
reply = ''
for item in self.helplines[state_name]:
for numbers in item:
num_lst.append(numbers)
if len(num_lst) == 1:
reply = num_lst[0]
else:
reply = f'You can call on any of the below numbers\n'
for numbers in num_lst:
reply += f'{numbers}\n'
return reply
else:
return "Cannot find State. Make sure that you spelled the name correctly"
def response_city_handler(self, response):
city_name = response.query_result.parameters.fields['geo-city'].string_value
if city_name == "":
return response.query_result.fulfillment_text
reply = ""
confirmed_cases, active_cases, recovered, deaths = self.covidwrapper.get_district_data(
city_name)
reply += f'The details for {city_name}\nConfirmed Cases: {confirmed_cases}\nActive Cases: {active_cases}\nRecovered: {recovered}\nDeaths: {deaths}\n'
return reply
def response_state_handler(self, response):
state_name = response.query_result.parameters.fields['geo-state'].string_value
if state_name == "":
return response.query_result.fulfillment_text
reply = ""
confirmed_cases, active_cases, recovered, deaths = self.covidwrapper.get_state_data(
state_name)
reply += f'The details for {state_name}\nConfirmed Cases: {confirmed_cases}\nActive Cases: {active_cases}\nRecovered: {recovered}\nDeaths: {deaths}\n'
return reply
def response_country_handler(self, response):
country_name = response.query_result.parameters.fields['geo-country'].string_value
if country_name == "":
return response.query_result.fulfillment_text
reply = ""
confirmed_cases, active_cases, recovered, deaths = self.covidwrapper.get_country_data()
reply += f'The details for India\nConfirmed Cases: {confirmed_cases}\nActive Cases: {active_cases}\nRecovered: {recovered}\nDeaths: {deaths}\n'
return reply
def response_faq(self, response):
return response.query_result.fulfillment_text
def prefix_reply(self, responses):
"""Returns a random string from a given set of responses"""
pos = random.randint(0, len(responses) - 1)
return responses[pos]
def response_welcome(self, response, first_name):
emoticons = ['😌', '🙂']
responses = ['Hi ', 'Hello ', 'Whats up ', 'How are you ']
reply = self.prefix_reply(responses) + first_name
return "\n".join([self.prefix_reply(emoticons), reply])
def response_fallback(self, response, first_name):
phrase = "Wait there!! I am still learning"
responses = ['🙄', '🤔', '😬', '😐']
reply = self.prefix_reply(responses) + "\n" + phrase
return reply
if __name__ == "__main__":
d = DialogFlowWrapper()
ipt = input()
d.process_input(ipt, 'ciah')
| StarcoderdataPython |
4938092 | # V1 : DEV
# V2
class Solution:
def __init__(self, w):
"""
:type w: List[int]
"""
self.preSum = [0] * len(w)
self.preSum[0] = w[0]
for i in range(1, len(w)):
self.preSum[i] = self.preSum[i - 1] + w[i]
def pickIndex(self):
"""
:rtype: int
"""
total = self.preSum[-1]
rand = random.randint(0, total - 1)
left, right = 0, len(self.preSum) - 1
while left + 1 < right:
mid = (left + right) // 2
if rand >= self.preSum[mid]:
left = mid
else:
right = mid
if rand < self.preSum[left]:
return left
return right
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
# V3
# Time: ctor: O(n)
# pickIndex: O(logn)
# Space: O(n)
import random
import bisect
class Solution(object):
def __init__(self, w):
"""
:type w: List[int]
"""
self.__prefix_sum = list(w)
for i in range(1, len(w)):
self.__prefix_sum[i] += self.__prefix_sum[i-1]
def pickIndex(self):
"""
:rtype: int
"""
target = random.randint(0, self.__prefix_sum[-1]-1)
return bisect.bisect_right(self.__prefix_sum, target)
| StarcoderdataPython |
3267440 | import pickle
def read_dataset(eval_path):
difficult_words = []
pos_tags = []
with open(eval_path, 'r', encoding='utf-8') as reader:
while True:
line = reader.readline()
if not line:
break
row = line.strip().split('\t')
difficult_word, pos_tag = row[1], row[2]
difficult_words.append(difficult_word)
pos_tags.append(pos_tag)
return difficult_words, pos_tags
def save_res(res, output_path):
with open(output_path, 'a', encoding='utf-8') as f:
f.write(' '.join(res) + '\n')
def main():
EVAL_PATH = './dataset/annotation_data.csv'
OUTPUT_PATH = './data/hownet_output.csv'
eval_path = EVAL_PATH
output_path = OUTPUT_PATH
difficult_words, pos_tags = read_dataset(eval_path)
with open('./hownet/word_candidates_decoded.pkl','rb') as fp:
word_candidates = pickle.load(fp)
for difficult_word, pos_tag in zip(difficult_words, pos_tags):
try:
res = word_candidates[difficult_word].get('noun')
res.extend(word_candidates[difficult_word].get('verb'))
res.extend(word_candidates[difficult_word].get('adj'))
res.extend(word_candidates[difficult_word].get('adv'))
res = [word for word in res if len(word) <= len(difficult_word)]
except:
res = ['NULL']
if len(res)==0:
res.append('NULL')
save_res(res, output_path)
if __name__ == '__main__':
main() | StarcoderdataPython |
5009739 | import pytest
@pytest.fixture(scope="module")
def smtp_connection():
from uaa_bot.notifier import smtplib
| StarcoderdataPython |
3420398 | import numpy.testing as npt
import pytest
from scipy.special import logsumexp
import numpy as np
from ..lemm import LEMM_Parameters, GLEMM_Parameters, GLEMM_Parameters_Untied
class Test_Parameters:
def test_lemm_parameters(self):
M = 10
m = 4
n = 2
logp = np.random.standard_normal((M,))
logp -= logsumexp(logp, keepdims=True)
V = np.random.randn(m, n)
TH = LEMM_Parameters(V, M, logp)
assert TH.M == M
assert TH.m == m
assert TH.n == n
assert not TH.gaussian
TH2 = LEMM_Parameters(V, M, None)
assert TH2.logp.shape == (M,)
npt.assert_allclose(np.sum(np.exp(TH2.logp)), 1.0)
with pytest.raises(ValueError):
LEMM_Parameters(V, M-1, logp)
def test_glemm_parameters(self):
M = 10
m = 4
n = 2
V = np.random.randn(m, n)
covars = [
('spherical', 1.0),
('diagonal', np.ones(n)),
('full', np.eye(n)),
]
for cv_type, covar in covars:
GLEMM_Parameters(V, M, None, cv_type, covar)
X = np.random.randn(20, n)
TH = GLEMM_Parameters(V, M, None, 'spherical', 1.0)
TH.relax_type('diagonal')
assert TH.covar_type == 'diagonal'
assert TH.cv_chol.shape == (n,)
assert TH.calc_XX(X).shape == (n,)
TH.relax_type('full')
assert TH.covar_type == 'full'
assert TH.cv_invchol.shape == (n, n)
assert TH.calc_XX(X).shape == (n, n)
TH.restrict_type('diagonal')
assert TH.covar_type == 'diagonal'
TH.restrict_type('spherical')
assert TH.covar_type == 'spherical'
assert np.shape(TH.calc_XX(X)) == ()
TH.relax_type('full')
TH.restrict_type('spherical')
TH.untie()
covar = np.random.standard_exponential(size=(M,))
TH = GLEMM_Parameters_Untied(V, M, None, 'spherical', covar)
TH.relax_type('diagonal')
assert TH.covar_type == 'diagonal'
assert TH.cv_chol.shape == (M, n)
TH.relax_type('full')
assert TH.covar_type == 'full'
assert TH.cv_invchol.shape == (M, n, n)
TH.restrict_type('diagonal')
assert TH.covar_type == 'diagonal'
TH.restrict_type('spherical')
assert TH.covar_type == 'spherical'
TH.relax_type('full')
TH.restrict_type('spherical')
| StarcoderdataPython |
6602521 | <gh_stars>1-10
'''
Settings for Tornado app go here
'''
import os
from tornado.httpserver import HTTPServer
from tornado.options import define, options
from tornado.web import Application,StaticFileHandler
from tornado.ioloop import IOLoop
from ri5c.views import GraphView
define('port', default=8000, help='port to listen on')
def main():
"""Construct and serve the tornado application."""
cwd = os.getcwd() # static files
port = int(os.environ.get("PORT", 8000))
path = os.path.join(cwd, "paper") # Path to dl file
app = Application([
(r'/', GraphView),
# Static files, repeat for other file names
(r'/(.*\.js)', StaticFileHandler, {"path": cwd} ),
(r'/download/(barojas_v193\.pdf)', StaticFileHandler, {'path': path} ), # Static serving file
])
http_server = HTTPServer(app)
http_server.listen(port)
print('RI5C is listening on port:%i' % port)
IOLoop.current().start()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1907299 | import os
import pathlib
from flask_babelex import lazy_gettext
from psycopg2.errorcodes import CHECK_VIOLATION, UNIQUE_VIOLATION
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.base import NO_CHANGE, object_state
def strings_are_equal_ignoring_case(string1: str, string2: str):
return string1 and string2 and string1.casefold() == string2.casefold()
def get_event_category_name(category):
return lazy_gettext("Event_" + category.name)
def get_localized_enum_name(enum):
return lazy_gettext(enum.__class__.__name__ + "." + enum.name)
def get_localized_scope(scope: str) -> str:
loc_key = "Scope_" + scope
return lazy_gettext(loc_key)
def get_location_str(location) -> str:
if not location:
return ""
if location.street and not location.street.isspace():
return f"{location.street}, {location.postalCode} {location.city}"
if location.postalCode or location.city:
return f"{location.postalCode} {location.city}".strip()
return ""
def get_place_str(place) -> str:
if not place:
return ""
if place.location:
return f"{place.name}, {get_location_str(place.location)}"
return place.name
def make_dir(path):
try:
original_umask = os.umask(0)
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
finally:
os.umask(original_umask)
def clear_files_in_dir(path):
with os.scandir(path) as entries:
for entry in entries:
if entry.is_file() or entry.is_symlink():
os.remove(entry.path)
def split_by_crlf(s):
return [v for v in s.splitlines() if v]
def make_integrity_error(
pgcode: str, message: str = "", statement: str = None
) -> IntegrityError:
class Psycog2Error(object):
def __init__(self, pgcode, message):
self.pgcode = pgcode
self.message = message
orig = Psycog2Error(pgcode, message)
return IntegrityError(statement, list(), orig)
def make_check_violation(message: str = None, statement: str = "") -> IntegrityError:
return make_integrity_error(CHECK_VIOLATION, message, statement)
def make_unique_violation(message: str = None, statement: str = "") -> IntegrityError:
return make_integrity_error(UNIQUE_VIOLATION, message, statement)
def get_pending_changes(
instance, include_collections=True, passive=None, include_keys=None
) -> dict:
result = {}
state = object_state(instance)
if not state.modified: # pragma: no cover
return result
dict_ = state.dict
for attr in state.manager.attributes:
if (
(include_keys and attr.key not in include_keys)
or (not include_collections and hasattr(attr.impl, "get_collection"))
or not hasattr(attr.impl, "get_history")
): # pragma: no cover
continue
(added, unchanged, deleted) = attr.impl.get_history(
state, dict_, passive=NO_CHANGE
)
if added or deleted:
old_value = deleted[0] if deleted else None
new_value = added[0] if added else None
result[attr.key] = [new_value, old_value]
return result
| StarcoderdataPython |
369871 | #!/usr/bin/env python
from tinyrenderer.core import engine
import tinymath as tm
import tinyutils as tu
import numpy as np
from OpenGL.GL import *
if __name__ == '__main__' :
app = engine.Application()
cameraProjData = engine.CameraProjData()
cameraProjData.projection = engine.CameraProjection.PERSPECTIVE
cameraProjData.fov = 45.0
cameraProjData.aspect = app.window().aspect
cameraProjData.zNear = 0.1
cameraProjData.zFar = 100.0
camera = engine.OrbitCamera( 'orbit',
[ 0.0, 0.0, 3.0 ],
[ 0.0, 0.0, 0.0 ],
engine.Axis.Z,
cameraProjData,
app.window().width,
app.window().height )
#### cameraSensitivity = 0.25
#### cameraSpeed = 25.0
#### cameraMaxDelta = 10.0
#### camera = engine.FpsCamera( 'fps',
#### [ 0.0, 0.0, 3.0 ],
#### [ 0.0, 0.0, 0.0 ],
#### engine.Axis.Z,
#### cameraProjData,
#### cameraSensitivity,
#### cameraSpeed,
#### cameraMaxDelta )
box = engine.MeshBuilder.CreateBox( 3.0, 3.0, 3.0 )
sphere = engine.MeshBuilder.CreateSphere( 1.5 )
gizmo = engine.MeshBuilder.CreateBox( 0.2, 0.2, 0.2 )
gizmo.position = [ 0.0, 0.0, 2.0 ]
# load the shader used for this example
baseNamePhong = engine.ENGINE_EXAMPLES_PATH + 'lights/shaders/phong'
shaderPhong = engine.ShaderManager.CreateShaderFromFiles( 'phong_shader',
baseNamePhong + '_vs.glsl',
baseNamePhong + '_fs.glsl' )
baseNameGouraud = engine.ENGINE_EXAMPLES_PATH + 'lights/shaders/gouraud'
shaderGouraud = engine.ShaderManager.CreateShaderFromFiles( 'gouraud_shader',
baseNameGouraud + '_vs.glsl',
baseNameGouraud + '_fs.glsl' )
assert shaderPhong, 'Could not load phong shader for our tests :('
assert shaderGouraud, 'Could not load gouraud shader for our tests :('
# grab a simple shader to render the camera gizmo
shaderGizmo = engine.ShaderManager.GetCachedShader( "basic3d_no_textures" )
assert shaderGizmo, 'Could not grab the basic3d shader to render the light gizmo :('
# select shader to use
shaderLighting = shaderPhong
mesh = sphere
# mesh.position = [ 1.0, 1.0, 0.0 ]
# mesh.scale = [ 0.5, 1.0, 1.5 ]
moveLight = False
mvParam = 0.0
while( app.active() ) :
tu.Clock.Tick()
if ( engine.InputManager.IsKeyDown( engine.Keys.KEY_ESCAPE ) ) :
break
elif ( engine.InputManager.CheckSingleKeyPress( engine.Keys.KEY_S ) ) :
shaderLighting = shaderGouraud if ( shaderLighting.name == 'phong_shader' ) else shaderPhong
print( 'Using shader: {}'.format( shaderLighting.name ) )
elif ( engine.InputManager.CheckSingleKeyPress( engine.Keys.KEY_P ) ) :
moveLight = not moveLight
print( 'Light state: {}'.format( 'moving' if moveLight else 'fixed' ) )
engine.DebugDrawer.DrawLine( [ 0.0, 0.0, 0.0 ], [ 5.0, 0.0, 0.0 ], [ 1.0, 0.0, 0.0 ] )
engine.DebugDrawer.DrawLine( [ 0.0, 0.0, 0.0 ], [ 0.0, 5.0, 0.0 ], [ 0.0, 1.0, 0.0 ] )
engine.DebugDrawer.DrawLine( [ 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 5.0 ], [ 0.0, 0.0, 1.0 ] )
app.begin()
camera.update()
if ( moveLight ) :
mvParam += tu.Clock.GetAvgTimeStep()
#### gizmo.position = [ 1.0 + np.sin( mvParam ) * 2.0, np.sin( mvParam / 2.0 ) * 1.0 ]
gizmo.position = [ 10.0 * np.sin( mvParam ), 10.0 * np.cos( mvParam ), 0.0 ]
# do our thing here ######################################################
shaderLighting.bind()
shaderLighting.setMat4( "u_modelMat", mesh.matModel() )
shaderLighting.setMat4( "u_viewProjMat", np.dot(camera.matProj(), camera.matView() ) )
shaderLighting.setMat4( "u_normalMat", tm.inverse( mesh.matModel() ).transpose() )
shaderLighting.setVec3( "u_objectColor", [ 1.0, 0.5, 0.31 ] )
shaderLighting.setVec3( "u_lightColor", [ 1.0, 1.0, 1.0 ] )
shaderLighting.setVec3( "u_lightPosition", gizmo.position )
shaderLighting.setVec3( "u_viewerPosition", camera.position )
mesh.render()
shaderLighting.unbind()
shaderGizmo.bind()
shaderGizmo.setMat4( 'u_tModel', gizmo.matModel() )
shaderGizmo.setMat4( 'u_tView', camera.matView() )
shaderGizmo.setMat4( 'u_tProj', camera.matProj() )
shaderGizmo.setVec3( 'u_color', [ 1.0, 1.0, 1.0 ] )
gizmo.render()
shaderGizmo.unbind()
##########################################################################
#### engine.DebugDrawer.DrawNormals( mesh, [ 0.0, 0.0, 1.0 ] )
engine.DebugDrawer.Render( camera )
app.end()
tu.Clock.Tock()
| StarcoderdataPython |
1751619 | <gh_stars>0
'''
Manage projects for an instance of the Data Migration Service.
'''
from ... pyaz_utils import _call_az
from . import task
def create(location, name, resource_group, service_name, source_platform, target_platform, tags=None):
'''
Create a migration project which can contain multiple tasks.
Required Parameters:
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- name -- The name of the Project.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the Service.
- source_platform -- None
- target_platform -- None
Optional Parameters:
- tags -- A space-delimited list of tags in "tag1[=value1]" format.
'''
return _call_az("az dms project create", locals())
def delete(name, resource_group, service_name, delete_running_tasks=None, yes=None):
'''
Delete a project.
Required Parameters:
- name -- The name of the Project.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the Service.
Optional Parameters:
- delete_running_tasks -- Delete the resource even if it contains running tasks.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az dms project delete", locals())
def list(resource_group, service_name):
'''
List the projects within an instance of DMS.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the Service.
'''
return _call_az("az dms project list", locals())
def show(name, resource_group, service_name):
'''
Show the details of a migration project.
Required Parameters:
- name -- The name of the Project.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the Service.
'''
return _call_az("az dms project show", locals())
def check_name(name, resource_group, service_name):
'''
Check if a given project name is available within a given instance of DMS as well as the name's validity.
Required Parameters:
- name -- The name of the Project.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the Service.
'''
return _call_az("az dms project check-name", locals())
| StarcoderdataPython |
3346727 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pfss', '0011_creaturegroup_allowedextratype'),
]
operations = [
migrations.AddField(
model_name='creatureextratype',
name='Grouping',
field=models.IntegerField(default=0, blank=True),
preserve_default=True,
),
]
| StarcoderdataPython |
6613671 | from sklearn.datasets import make_regression
from matplotlib import pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
# Functions
def gram_schmidt(a):
q = []
for i in range(len(a)):
#orthogonalization
q_tilde = a[i]
for j in range(len(q)):
q_tilde = q_tilde - (q[j] @ a[i])*q[j]
#Test for dependennce
if np.sqrt(sum(q_tilde**2)) <= 1e-10:
print('Vectors are linearly dependent.')
print('GS algorithm terminates at iteration ', i+1)
return q
#Normalization
else:
q_tilde = q_tilde / np.sqrt(sum(q_tilde**2))
q.append(q_tilde)
print('Vectors are linearly independent.')
return q
def QR_factorization(A):
Q_transpose = np.array(gram_schmidt(A.T))
R = Q_transpose @ A
Q = Q_transpose.T
return Q, R
def back_subst(R,b_tilde):
n = R.shape[0]
x = np.zeros(n)
for i in reversed(range(n)):
x[i] = b_tilde[i]
for j in range(i+1,n):
x[i] = x[i] - R[i,j]*x[j]
x[i] = x[i]/R[i,i]
return x
def solve_via_backsub(A,b):
Q,R = QR_factorization(A)
b_tilde = Q.T @ b
x = back_subst(R,b_tilde)
return x
#########
# # Least squares problem
# A = np.array([[2,0],[-1,1],[0,2]])
# b = np.array([1,0,-1])
# x_hat = np.array([1/3, -1/3])
# r_hat = A @ x_hat - b
# print(np.linalg.norm(r_hat))
# x = np.array([1/2, -1/2]) #other value of x
# r = A @ x - b
# print(np.linalg.norm(r))
# print(np.linalg.inv(A.T @ A) @ A.T @ b)
# print(np.linalg.pinv(A) @ b)
# print((A.T @ A) @ x_hat - A.T @ b) #Check that normal equations hold
# # Principio da ortogonalidade
# z = np.array([-1.1,2.3])
# print(A @ z).T @ r_hat)
# z = np.array([5.3, -1.2])
# print((A @ z).T @ r_hat)
# # Resolvendo problemas de quadrados mínimos
# A = np.random.normal(size = (100,20))
# b = np.random.normal(size = 100)
# x1 = solve_via_backsub(A,b)
# x2 = np.linalg.inv(A.T @ A) @ (A.T @ b)
# x3 = np.linalg.pinv(A) @ b
# print(np.linalg.norm(x1-x2))
# print(np.linalg.norm(x2-x3))
# print(np.linalg.norm(x3-x1))
# Exemplo página 234
n = 10 # número de lâmpadas
# posições (x,y) das lâmpadas e altura acima do chão
lamps = np.array([[4.1 ,20.4, 4],
[14.1, 21.3, 3.5],
[22.6, 17.1,6],
[5.5 ,12.3, 4.0],
[12.2, 9.7, 4.0],
[15.3, 13.8, 6],
[21.3, 10.5, 5.5],
[3.9 ,3.3, 5.0],
[13.1, 4.3, 5.0],
[20.3,4.2, 4.5]])
N = 25 # grid size
m = N*N # Número de pixels
# construct m x 2 matrix with coordinates of pixel centers
pixels = np.hstack([np.outer(np.arange(0.5,N,1),np.ones(N)).reshape(m,1), np.outer(np.ones(N),np.arange(0.5,N,1)).reshape(m,1)])
# The m x n matrix A maps lamp powers to pixel intensities.
# A[i,j] is inversely proportional to the squared distance of
# lamp j to pixel i.
A = np.zeros((m,n))
for i in range(m):
for j in range(n):
A[i,j] = 1.0 / (np.linalg.norm(np.hstack([pixels[i,:], 0]) - lamps[j,:])**2)
A = (m / np.sum(A)) * A # scale elements of A
# Least squares solution
x = solve_via_backsub(A, np.ones(m))
rms_ls = (sum((A @ x - 1)**2)/m)**0.5
print(rms_ls)
import matplotlib.pyplot as plt
plt.ion()
plt.hist(A @ x, bins = 25)
plt.show()
plt.pause(10)
# Intensity if all lamp powers are one
rms_uniform = (sum((A @ np.ones(n) - 1)**2)/m)**0.5
print(rms_uniform)
plt.hist(A @ np.ones(n), bins = 25)
plt.show()
plt.pause(10)
| StarcoderdataPython |
3365671 | <reponame>flexiooss/hotballoon-shed<gh_stars>0
import shutil
from pathlib import Path
from cmd.Directories import Directories
from cmd.Tasks.Task import Task
from cmd.Tasks.Tasks import Tasks
from cmd.package.modules.Module import Module
from cmd.package.modules.ModulesHandler import ModulesHandler
class CleanSources(Task):
NAME = Tasks.CLEAN_SOURCES
def __modules_clean(self):
if self.package.config().has_modules():
modules: ModulesHandler = ModulesHandler(self.package)
module: Module
for module in modules.modules:
CleanSources(self.options, module.package, module.package.cwd).process()
def process(self):
print('CLEAN SOURCES: ' + self.package.name())
if Path(self.cwd.as_posix() + ('/' + Directories.GENERATED)).is_dir():
shutil.rmtree(Path(self.cwd.as_posix() + ('/' + Directories.GENERATED)).as_posix())
print('**** CLEAN : generated')
if self.options.module_only is not True:
self.__modules_clean()
| StarcoderdataPython |
11272841 | from .helper import PillowTestCase
from PIL import Image, PSDraw
import os
import sys
class TestPsDraw(PillowTestCase):
def _create_document(self, ps):
im = Image.open("Tests/images/hopper.ppm")
title = "hopper"
box = (1*72, 2*72, 7*72, 10*72) # in points
ps.begin_document(title)
# draw diagonal lines in a cross
ps.line((1*72, 2*72), (7*72, 10*72))
ps.line((7*72, 2*72), (1*72, 10*72))
# draw the image (75 dpi)
ps.image(box, im, 75)
ps.rectangle(box)
# draw title
ps.setfont("Courier", 36)
ps.text((3*72, 4*72), title)
ps.end_document()
def test_draw_postscript(self):
# Based on Pillow tutorial, but there is no textsize:
# https://pillow.readthedocs.io/en/latest/handbook/tutorial.html#drawing-postscript
# Arrange
tempfile = self.tempfile('temp.ps')
with open(tempfile, "wb") as fp:
# Act
ps = PSDraw.PSDraw(fp)
self._create_document(ps)
# Assert
# Check non-zero file was created
self.assertTrue(os.path.isfile(tempfile))
self.assertGreater(os.path.getsize(tempfile), 0)
def test_stdout(self):
# Temporarily redirect stdout
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
ps = PSDraw.PSDraw()
self._create_document(ps)
# Reset stdout
sys.stdout = old_stdout
self.assertNotEqual(mystdout.getvalue(), "")
| StarcoderdataPython |
3373201 | <gh_stars>0
# https://www.hackerrank.com/challenges/alphabet-rangoli/problem
def print_rangoli(size):
s = ""
for i in range(size):
s += chr(97 + i)
for i in range(size - 1, -size, -1):
line = s[: abs(i) : -1] + s[abs(i) :]
print("-".join(line).center(4 * n - 3, "-"))
| StarcoderdataPython |
6563741 | <filename>src/dct/camera/stream.py
import cv2
import numpy as np
import threading
import time
import queue
import uuid
import logging
from dct.util.silverstone import DeepRacerCar
class StreamConsumer:
def __init__(self):
self.queue = queue.Queue()
def notify(self, frame):
self.queue.put(frame)
def frame_iterator(self):
try:
while True:
# If no new frame in queue for 1 second, stop iterating.
frame = self.queue.get(timeout=1)
if frame is None:
return
yield frame
except queue.Empty:
return
class BaseStream:
"""Base class which serves as a blueprint for frame providing objects."""
def __init__(self):
self.identifier = uuid.uuid4()
self.consumers = []
logging.info("Creating source stream {}".format(self.identifier))
def publish_frame(self, frame):
for consumer in self.consumers:
consumer.notify(frame)
def subscribe(self, consumer: StreamConsumer):
self.consumers.append(consumer)
def unsubscribe(self, consumer: StreamConsumer):
if consumer in self.consumers:
self.consumers.remove(consumer)
@property
def fps(self):
raise NotImplementedError
@property
def width(self):
raise NotImplementedError
@property
def height(self):
raise NotImplementedError
class DeepRacerMJPEGStream(BaseStream):
def __init__(
self, car: DeepRacerCar, width=480, height=360, quality=90, min_fps=10.0
):
super().__init__()
self.car = car
self.videoThread = threading.Thread(target=self.process_frames)
self.videoThread.daemon = True
self.video_url = car.camera_feed(width=width, height=height, quality=quality)
self.video_width = width
self.video_height = height
if quality <= 0 or quality > 100:
raise ValueError("Video quality should be in range [1, 100]")
self.quality = quality # Minimum quality of the video stream, lower will use less data at the cost of lower video quality.
self.min_fps = min_fps # Minimum FPS required for broadcasting, if approx fps too low the stream will disconnect.
self.framerate = None # Stores current framerate approximation.
def start(self):
self.videoThread.start()
logging.debug("Starting streaming thread for stream {}".format(self.identifier))
def process_frames(self):
while True:
try:
logging.info(
"Attempting to connect to stream {}".format(self.identifier)
)
if not self.car.connected:
logging.info(
"Car '{}' not connected for input stream {}".format(
self.car.name, self.identifier
)
)
continue
bytebuffer = bytes()
response = self.car.session.get(self.video_url, stream=True, timeout=6)
response.raise_for_status()
chunk_size = 10 * 1024 * 1024
start_frame = time.time()
framerate_counter = 0
for chunk in response.iter_content(chunk_size=chunk_size):
bytebuffer += chunk
a = bytebuffer.find(b"\xff\xd8")
b = bytebuffer.find(b"\xff\xd9")
if a != -1 and b != -1:
framerate_counter += 1
frame_time = time.time()
jpg = bytebuffer[a : b + 2]
bytebuffer = bytebuffer[b + 2 :]
frame = cv2.imdecode(
np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR
)
# Car will start "enqueing" frames if it cannot send them fast enough causing huge delays on the stream after a period of bad connection.
# Workaround: monitor framerate, if it drops try to reconnect.
if (frame_time - start_frame) > 1:
self.framerate = framerate_counter / (
frame_time - start_frame
)
framerate_counter = 0
start_frame = frame_time
logging.debug("FPS: {}".format(self.framerate))
# If no approximate framerate yet, don't broadcast the frames to prevent lag when low framerate occurs.
if self.framerate is None:
continue
elif self.framerate < self.min_fps:
logging.debug(
"Stopping because of low framerate: {}".format(
self.framerate
)
)
self.framerate = None
break
self.publish_frame(frame)
except Exception as e:
logging.debug(e)
pass
finally:
retry_rate = 5
logging.debug(
"Finish stream for {}, retry in {} seconds...".format(
self.identifier, retry_rate
)
)
# Notify no frame is sent.
self.publish_frame(None)
# On failure try to reconnect to stream every 5 seconds.
time.sleep(retry_rate)
@property
def width(self):
return self.video_width
@property
def height(self):
return self.video_height
| StarcoderdataPython |
12818056 | <reponame>LaudateCorpus1/squest<filename>service_catalog/templatetags/version.py
from django import template
register = template.Library()
@register.simple_tag
def app_version():
"""
Return Squest version as listed in `__version__` in `init.py` of settings package
"""
from django.conf import settings
return settings.SQUEST_VERSION
| StarcoderdataPython |
5018991 | <filename>ridurre/random_pruning.py
from typing import Callable, Optional, List
import numpy as np
from keras import models, layers
from ridurre import base_filter_pruning
class RandomFilterPruning(base_filter_pruning.BasePruning):
def __init__(self,
removal_factor: float,
model_compile_fn: Callable[[models.Model], None],
model_finetune_fn: Optional[Callable[[models.Model, int, int], None]],
nb_finetune_epochs: int,
maximum_prune_iterations: int = None,
maximum_pruning_percent: float = 0.9,
nb_trained_for_epochs: int = 0):
super().__init__(pruning_factor=removal_factor,
model_compile_fn=model_compile_fn,
model_finetune_fn=model_finetune_fn,
nb_finetune_epochs=nb_finetune_epochs,
nb_trained_for_epochs=nb_trained_for_epochs,
maximum_prune_iterations=maximum_prune_iterations,
maximum_pruning_percent=maximum_pruning_percent)
def run_pruning_for_conv2d_layer(self, pruning_factor: float, layer: layers.Conv2D, layer_weight_mtx) -> List[int]:
_, _, _, nb_channels = layer_weight_mtx.shape
# If there is only a single filter left, then do not prune it
if nb_channels == 1:
print("Layer {0} has only a single filter left. No pruning is applied.".format(layer.name))
return []
# Calculate how much filters should be removed
_, nb_of_filters_to_remove = self._calculate_number_of_channels_to_keep(1.0 - pruning_factor, nb_channels)
# Select prunable filters randomly
filter_indices = np.arange(nb_channels)
np.random.shuffle(filter_indices)
filter_indices = list(filter_indices[:nb_of_filters_to_remove])
return filter_indices
| StarcoderdataPython |
5002967 | <gh_stars>0
import os
from setuptools import setup
from pip.req import parse_requirements
from blockchain_proofs import __version__
install_reqs = parse_requirements('requirements.txt', session=False)
requirements = [str(ir.req) for ir in install_reqs]
with open('README.rst') as readme:
long_description = readme.read()
setup(name='blockchain-proofs',
version=__version__,
description='Implements chainpoint v2 proof of existence approach',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['blockchain_proofs'],
keywords='blockchain proof receipt chainpoint validation',
install_requires=requirements,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3'
]
)
| StarcoderdataPython |
11348845 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 19 14:23:32 2021
@author: gualandi
"""
import numpy as np
from pyomo.environ import ConcreteModel, Var, Objective, Constraint, SolverFactory
from pyomo.environ import Binary, RangeSet, NonNegativeReals
def ParseData(filename):
fh = open(filename, 'r', encoding="utf-8")
Xs = []
Ys = []
for line in fh:
row = line.replace('\n','').split(',')
Xs.append( list(map(float, row[:-1])) )
Ys.append( int(row[-1]) )
return Xs, Ys
def LinearClassifier(Xs, Ys):
# Main Pyomo model
model = ConcreteModel()
# Parameters
n = len(Xs)
model.I = RangeSet(n)
m = len(Xs[0])
model.J = RangeSet(m)
# Variables
model.X = Var(model.J, bounds=(-float('inf'), float('inf')))
model.X0 = Var(bounds=(-float('inf'), float('inf')))
model.W = Var(model.I, within=NonNegativeReals)
model.U = Var(model.I, within=Binary)
# Objective Function
# model.obj = Objective(expr=sum(model.W[i] for i in model.I))
model.obj = Objective(expr=sum(model.U[i] for i in model.I))
# Constraints on the separation hyperplane
def ConLabel(m, i):
if Ys[i-1] == 0:
return sum(Xs[i-1][j-1]*m.X[j] for j in m.J) >= m.X0 + 1 - m.W[i]
else:
return sum(Xs[i-1][j-1]*m.X[j] for j in m.J) <= m.X0 - 1 + m.W[i]
model.Label = Constraint(model.I, rule = ConLabel)
model.Viol = Constraint(model.I,
rule = lambda m, i: m.W[i] <= 10000*m.U[i])
# Solve the model
sol = SolverFactory('gurobi').solve(model, tee=True)
# Get a JSON representation of the solution
sol_json = sol.json_repn()
# Check solution status
if sol_json['Solver'][0]['Status'] != 'ok':
return None
if sol_json['Solver'][0]['Termination condition'] != 'optimal':
return None
return model.obj(), [model.X[j]() for j in model.J] + [model.X0()]
def CheckSolution(Xs, Ys, A):
v, u = 0, 0.0
for i,xs in enumerate(Xs):
ax = sum(x*a for x,a in zip(xs, A[:-1]))
if ax < A[-1] and Ys[i] == 0:
v += 1
u += abs(-A[-1] + ax)
if ax > A[-1] and Ys[i] == 1:
v += 1
u += abs(-A[-1] + ax)
print("Violations: ", v, len(Xs))
print("total:", u)
print("Avg. ", round(u/v, 3))
def SplitTrainTestSet(Xs, Ys, t=0.3):
Ax, Al = [], [] # Train sets
Bx, Bl = [], [] # Test sets
np.random.seed(13)
for x, y in zip(Xs, Ys):
if np.random.uniform(0, 1) > t:
Ax.append(x)
Al.append(y)
else:
Bx.append(x)
Bl.append(y)
return Ax, Al, Bx, Bl
def Accuracy(A, Bx, Bl):
v = 0
for xs, y in zip(Bx, Bl):
ax = sum(x*a for x,a in zip(xs, A[:-1]))
if ax < A[-1] and y == 0:
v += 1
if ax > A[-1] and y == 1:
v += 1
return round((len(Bx)-v)/len(Bx)*100,3), v, len(Bx)
def Confusion(A, Bx, Bl):
tp, fp, tn, fn = 0, 0, 0, 0
for xs, y in zip(Bx, Bl):
ax = sum(x*a for x,a in zip(xs, A[:-1]))
if ax >= A[-1] and y == 0:
tn += 1
if ax < A[-1] and y == 1:
tp += 1
if ax < A[-1] and y == 0:
fn += 1
if ax > A[-1] and y == 1:
fp += 1
return tp, fp, tn, fn
#-----------------------------------------------
# MAIN function
#-----------------------------------------------
if __name__ == "__main__":
Xs, Ys = ParseData('banknote_train.csv')
Ax, Al, Bx, Bl = SplitTrainTestSet(Xs, Ys)
fobj, A = LinearClassifier(Ax, Al)
print('Accuracy Train set:', Accuracy(A, Ax, Al))
print('Accuracy Test set:', Accuracy(A, Bx, Bl))
Xs, Ys = ParseData('banknote_validate.csv')
print('Accuracy Test set:', Accuracy(A, Xs, Ys))
print('Confusion Matrix: ', Confusion(A, Xs, Ys))
if False:
fh = open("banknote_train.csv", "w")
for xs, y in zip(Ax, Al):
fh.write(",".join(list(map(str,xs))+[str(y)])+"\n")
fh.close()
fh = open("banknote_validate.csv", "w")
for xs, y in zip(Bx, Bl):
fh.write(",".join(list(map(str,xs))+[str(y)])+"\n")
fh.close()
| StarcoderdataPython |
8160793 | # -*- coding:utf-8 -*-
class FactoryMixin:
@classmethod
def as_factory(cls, *args, **kwargs):
"""
This class method creates a factory from a class reference (``cls``) and its arguments. It is then used by the solvers which need to be able to instantiate any object like solutions or even other solvers.
:param cls: Class Reference
:param args: Argument list with class parameters
:param kwargs: Argument dictionary with class parameters
:return: A function that, when executed, creates the object itself.
"""
def inner_factory():
return cls(*args, **kwargs)
return inner_factory
| StarcoderdataPython |
32668 | <filename>lintcode/0008-rotate-string.py
# Description
# 中文
# English
# Given a string(Given in the way of char array) and an offset, rotate the string by offset in place. (rotate from left to right)
# offset >= 0
# the length of str >= 0
# Have you met this question in a real interview?
# Example
# Example 1:
# Input: str="abcdefg", offset = 3
# Output: str = "efgabcd"
# Explanation: Note that it is rotated in place, that is, after str is rotated, it becomes "efgabcd".
# Example 2:
# Input: str="abcdefg", offset = 0
# Output: str = "abcdefg"
# Explanation: Note that it is rotated in place, that is, after str is rotated, it becomes "abcdefg".
# Example 3:
# Input: str="abcdefg", offset = 1
# Output: str = "gabcdef"
# Explanation: Note that it is rotated in place, that is, after str is rotated, it becomes "gabcdef".
# Example 4:
# Input: str="abcdefg", offset =2
# Output: str = "fgabcde"
# Explanation: Note that it is rotated in place, that is, after str is rotated, it becomes "fgabcde".
# Example 5:
# Input: str="abcdefg", offset = 10
# Output: str = "efgabcd"
# Explanation: Note that it is rotated in place, that is, after str is rotated, it becomes "efgabcd".
class Solution:
"""
@param str: An array of char
@param offset: An integer
@return: nothing
"""
def rotateString(self, s, offset):
# write your code here
if len(s) > 0:
offset = offset % len(s)
temp = (s + s)[len(s) - offset : 2 * len(s) - offset]
for i in range(len(temp)):
s[i] = temp[i] | StarcoderdataPython |
8139230 | <reponame>0xflotus/Bashfuscator
from bashfuscator.core.mutators.command_obfuscator import CommandObfuscator
from bashfuscator.core.mutators.command_obfuscator import Stub
class CaseSwapper(CommandObfuscator):
def __init__(self):
super().__init__(
name="Case Swapper",
description="Flips the case of all alpha chars",
sizeRating=1,
timeRating=1,
author="capnspacehook",
reversible=True
)
self.stubs = [
Stub(
name="bash case swap expansion",
sizeRating=1,
timeRating=1,
binariesUsed=[],
fileWrite=False,
escapeQuotes=True,
stub='''? ?VAR1='CMD'* *END0* *:printf:^ ^%s^ ^"${VAR1~~}"* *END0* *'''
)
]
def mutate(self, userCmd):
obCmd = userCmd.swapcase()
return self.deobStub.genStub(obCmd)
| StarcoderdataPython |
1748763 | <reponame>picam360/picam360tegra
import sys
from Adafruit_MotorHAT import Adafruit_MotorHAT
driver = Adafruit_MotorHAT(i2c_bus=1)
motor1 = driver.getMotor(1)
motor2 = driver.getMotor(2)
dir_forward = Adafruit_MotorHAT.BACKWARD
dir_backward = Adafruit_MotorHAT.FORWARD
duty = 40
speed = int(duty*255/100)
for line in sys.stdin:
line = line.strip()
if line == "init":
motor1.setSpeed(0)
motor1.run(dir_forward)
motor2.setSpeed(0)
motor2.run(dir_forward)
print(line + ' done')
elif line == "move_forward":
motor1.setSpeed(speed)
motor1.run(dir_forward)
motor2.setSpeed(speed)
motor2.run(dir_forward)
print(line + ' done')
elif line == "move_backward":
motor1.setSpeed(speed)
motor1.run(dir_backward)
motor2.setSpeed(speed)
motor2.run(dir_backward)
print(line + ' done')
elif line == "turn_left":
motor1.setSpeed(speed)
motor1.run(dir_backward)
motor2.setSpeed(speed)
motor2.run(dir_forward)
print(line + ' done')
elif line == "turn_right":
motor1.setSpeed(speed)
motor1.run(dir_forward)
motor2.setSpeed(speed)
motor2.run(dir_backward)
print(line + ' done')
elif line == "stop":
motor1.setSpeed(0)
motor1.run(dir_forward)
motor2.setSpeed(0)
motor2.run(dir_forward)
print(line + ' done')
elif line == "exit":
break
else:
print(line)
sys.stdout.flush()
print('finished') | StarcoderdataPython |
3467207 | <filename>conanfile.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import os
class LibRdKafkaConan(ConanFile):
name = "librdkafka"
version = "0.11.6"
description = "The Apache Kafka C/C++ client library"
url = "https://github.com/raulbocanegra/librdkafka"
homepage = "https://github.com/edenhill/librdkafka"
license = "https://github.com/raulbocanegra/librdkafka"
exports = ["LICENSE.md"]
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_plugins": [True, False],
"with_sasl": [True, False],
"with_ssl":[True, False],
"with_zlib": [True, False],
"with_zstd": [True, False],
"without_optimization": [True, False],
"without_win32config": [True, False]
}
default_options = "shared=False", "fPIC=True", "enable_testing=False", "enable_exceptions=True", "enable_lto=False", "enable_gtest_tests=True"
source_subfolder = "source_subfolder"
build_subfolder = "build_subfolder"
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
self.options.shared = False
if self.options.enable_testing == False:
self.options.enable_gtest_tests = False
def source(self):
source_url = "https://github.com/google/benchmark"
tools.get("{0}/archive/v{1}.zip".format(source_url, self.version))
extracted_dir = "benchmark-" + self.version
os.rename(extracted_dir, self.source_subfolder)
def configure_cmake(self):
cmake = CMake(self)
cmake.definitions['BENCHMARK_ENABLE_TESTING'] = "ON" if self.options.enable_testing else "OFF"
cmake.definitions['BENCHMARK_ENABLE_GTEST_TESTS'] = "ON" if self.options.enable_gtest_tests and self.options.enable_testing else "OFF"
cmake.definitions['BENCHMARK_BUILD_32_BITS'] = "ON" if self.settings.arch == "x86" and self.settings.compiler != "Visual Studio" else "OFF"
cmake.configure(build_folder=self.build_subfolder)
return cmake
def build_requirements(self):
self.build_requires("gtest/1.8.0@bincrafters/stable")
def build(self):
cmake = self.configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="license", src=self.source_subfolder)
cmake = self.configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
if self.settings.os == "Windows":
self.cpp_info.libs.append("Shlwapi")
elif self.settings.os == "Linux":
self.cpp_info.libs.append("pthread")
| StarcoderdataPython |
6663751 | <reponame>hydroshare/hsmodels<gh_stars>0
import uuid
from typing import List
from pydantic import AnyUrl, BaseModel, Field, root_validator, validator
from rdflib.term import Identifier as RDFIdentifier
from hsmodels.namespaces import CITOTERMS, DC, DCTERMS, HSRESOURCE, HSTERMS, ORE, RDF
from hsmodels.schemas.rdf.fields import (
AwardInfoInRDF,
ContributorInRDF,
CoverageInRDF,
CreatorInRDF,
DateInRDF,
DescriptionInRDF,
ExtendedMetadataInRDF,
IdentifierInRDF,
PublisherInRDF,
RelationInRDF,
RightsInRDF,
SourceInRDF,
)
from hsmodels.schemas.rdf.root_validators import (
parse_coverages,
parse_rdf_dates,
parse_rdf_extended_metadata,
rdf_parse_description,
rdf_parse_rdf_subject,
)
from hsmodels.schemas.rdf.validators import (
coverages_constraint,
coverages_spatial_constraint,
dates_constraint,
language_constraint,
parse_rdf_sources,
rdf_parse_identifier,
sort_creators,
)
def hs_uid():
return getattr(HSRESOURCE, uuid.uuid4().hex)
class FileMap(BaseModel):
rdf_subject: RDFIdentifier = Field(default_factory=hs_uid)
rdf_type: AnyUrl = Field(rdf_predicate=RDF.type, const=True, default=ORE.Aggregation)
is_documented_by: AnyUrl = Field(rdf_predicate=CITOTERMS.isDocumentedBy)
files: List[AnyUrl] = Field(rdf_predicate=ORE.aggregates, default=[])
title: str = Field(rdf_predicate=DC.title)
is_described_by: AnyUrl = Field(rdf_predicate=ORE.isDescribedBy)
class ResourceMap(BaseModel):
rdf_subject: RDFIdentifier = Field(default_factory=hs_uid)
rdf_type: AnyUrl = Field(rdf_predicate=RDF.type, const=True, default=ORE.ResourceMap)
describes: FileMap = Field(rdf_predicate=ORE.describes)
identifier: str = Field(rdf_predicate=DC.identifier, default=None)
# modified: datetime = Field(rdf_predicate=DCTERMS.modified)
creator: str = Field(rdf_predicate=DC.creator, default=None)
class ResourceMetadataInRDF(BaseModel):
rdf_subject: RDFIdentifier = Field(default_factory=hs_uid)
_parse_rdf_subject = root_validator(pre=True, allow_reuse=True)(rdf_parse_rdf_subject)
rdf_type: AnyUrl = Field(rdf_predicate=RDF.type, const=True, default=HSTERMS.CompositeResource)
label: str = Field(default="Composite Resource", const=True)
title: str = Field(rdf_predicate=DC.title)
description: DescriptionInRDF = Field(rdf_predicate=DC.description, default_factory=DescriptionInRDF)
language: str = Field(rdf_predicate=DC.language, default='eng')
subjects: List[str] = Field(rdf_predicate=DC.subject, default=[])
dc_type: AnyUrl = Field(rdf_predicate=DC.type, default=HSTERMS.CompositeResource, const=True)
identifier: IdentifierInRDF = Field(rdf_predicate=DC.identifier, cont=True)
creators: List[CreatorInRDF] = Field(rdf_predicate=DC.creator, default=[])
contributors: List[ContributorInRDF] = Field(rdf_predicate=DC.contributor, default=[])
sources: List[SourceInRDF] = Field(rdf_predicate=DC.source, default=[])
relations: List[RelationInRDF] = Field(rdf_predicate=DC.relation, default=[])
extended_metadata: List[ExtendedMetadataInRDF] = Field(rdf_predicate=HSTERMS.extendedMetadata, default=[])
rights: RightsInRDF = Field(rdf_predicate=DC.rights, default=None)
dates: List[DateInRDF] = Field(rdf_predicate=DC.date, default=[])
awards: List[AwardInfoInRDF] = Field(rdf_predicate=HSTERMS.awardInfo, default=[])
coverages: List[CoverageInRDF] = Field(rdf_predicate=DC.coverage, default=[])
publisher: PublisherInRDF = Field(rdf_predicate=DC.publisher, default=None)
citation: str = Field(rdf_predicate=DCTERMS.bibliographicCitation)
_parse_coverages = root_validator(pre=True, allow_reuse=True)(parse_coverages)
_parse_extended_metadata = root_validator(pre=True, allow_reuse=True)(parse_rdf_extended_metadata)
_parse_rdf_dates = root_validator(pre=True, allow_reuse=True)(parse_rdf_dates)
_parse_description = root_validator(pre=True, allow_reuse=True)(rdf_parse_description)
_parse_identifier = validator("identifier", pre=True, allow_reuse=True)(rdf_parse_identifier)
_parse_rdf_sources = validator("sources", pre=True, allow_reuse=True)(parse_rdf_sources)
_language_constraint = validator('language', allow_reuse=True)(language_constraint)
_dates_constraint = validator('dates', allow_reuse=True)(dates_constraint)
_coverages_constraint = validator('coverages', allow_reuse=True)(coverages_constraint)
_coverages_spatial_constraint = validator('coverages', allow_reuse=True)(coverages_spatial_constraint)
_sort_creators = validator("creators", pre=True)(sort_creators)
| StarcoderdataPython |
8045266 | #! /usr/bin/python3 -i
# coding=utf-8
import os
PACKAGE_DIR=os.path.abspath(os.path.dirname(__file__))
CHAPAS2UD=os.path.join(PACKAGE_DIR,"chapas2ud")
UNIDIC2IPADIC=os.path.join(PACKAGE_DIR,"unidic2ipadic")
import numpy
from spacy.language import Language
from spacy.symbols import LANG,NORM,LEMMA,POS,TAG,DEP,HEAD,ENT_IOB,ENT_TYPE
from spacy.tokens import Doc,Span,Token
from spacy.util import get_lang_class
class ChaPASLanguage(Language):
lang="ja"
max_length=10**6
def __init__(self,UniDic):
self.Defaults.lex_attr_getters[LANG]=lambda _text:"ja"
try:
self.vocab=self.Defaults.create_vocab()
self.pipeline=[]
except:
from spacy.vocab import create_vocab
self.vocab=create_vocab("ja",self.Defaults)
self._components=[]
self._disabled=set()
self.tokenizer=ChaPASTokenizer(self.vocab,UniDic)
self._meta={
"author":"<NAME>",
"description":"derived from ChaPAS-CaboCha-MeCab",
"lang":"ja_ChaPAS_CaboCha_MeCab",
"license":"MIT",
"name":"ChaPAS_CaboCha_MeCab",
"pipeline":"Tokenizer, POS-Tagger, Parser",
"spacy_version":">=2.2.2"
}
self._path=None
class ChaPASTokenizer(object):
to_disk=lambda self,*args,**kwargs:None
from_disk=lambda self,*args,**kwargs:None
to_bytes=lambda self,*args,**kwargs:None
from_bytes=lambda self,*args,**kwargs:None
def __init__(self,vocab,UniDic):
import subprocess
self.UniDic=UniDic
if UniDic:
d={ "gendai":"dic1", "spoken":"dic2", "qkana":"dic3", "kindai":"dic4", "kinsei":"dic5", "kyogen":"dic6", "wakan":"dic7", "wabun":"dic8", "manyo":"dic9" }
self.dictkey=d[UniDic]
self.model=self.ChamameWeb2ChaPASUD
try:
import unidic2ud
if os.path.isdir(os.path.join(unidic2ud.DOWNLOAD_DIR,UniDic)):
self.mecab=unidic2ud.load(UniDic,None).mecab
self.model=self.UniDic2ChaPASUD
except:
pass
else:
self.model=lambda s:subprocess.check_output([CHAPAS2UD,"-I","RAW"],input=s.encode("utf-8")).decode("utf-8")
self.vocab=vocab
def __call__(self,text):
t=text.replace("\r","").replace("(","(").replace(")",")").replace("[","[").replace("]","]").replace("{","{").replace("}","}")
u=self.model(t) if t else ""
vs=self.vocab.strings
r=vs.add("ROOT")
words=[]
lemmas=[]
pos=[]
tags=[]
morphs=[]
heads=[]
deps=[]
spaces=[]
norms=[]
ent_iobs=[]
ent_types=[]
bunsetu=[]
for t in u.split("\n"):
if t=="" or t.startswith("#"):
continue
s=t.split("\t")
if len(s)!=10:
continue
id,form,lemma,upos,xpos,feats,head,deprel,_,misc=s
words.append(form)
lemmas.append(vs.add(lemma))
pos.append(vs.add(upos))
tags.append(vs.add(xpos))
morphs.append(feats)
if deprel=="root":
heads.append(0)
deps.append(r)
else:
heads.append(int(head)-int(id))
deps.append(vs.add(deprel))
spaces.append(False if "SpaceAfter=No" in misc else True)
i=misc.find("Translit=")
norms.append(vs.add(form if i<0 else misc[i+9:]))
i=misc.find("NE=")
if i<0:
ent_iobs.append(2)
ent_types.append(0)
else:
j=misc.find("|",i)
if j<0:
j=len(misc)
if misc[i+3:i+4]=="B":
ent_iobs.append(3)
else:
ent_iobs.append(1)
ent_types.append(vs.add(misc[i+5:j]))
bunsetu.append("I")
if misc.startswith("BunsetuBILabel="):
bunsetu[-1]=misc[15:16]
doc=Doc(self.vocab,words=words,spaces=spaces)
a=numpy.array(list(zip(lemmas,pos,tags,deps,heads,norms,ent_iobs,ent_types)),dtype="uint64")
doc.from_array([LEMMA,POS,TAG,DEP,HEAD,NORM,ENT_IOB,ENT_TYPE],a)
try:
doc.is_tagged=True
doc.is_parsed=True
except:
for i,j in enumerate(morphs):
if j!="_" and j!="":
doc[i].set_morph(j)
doc.user_data["bunsetu_bi_labels"]=bunsetu
return doc
def ChamameWebAPI(self,sentence):
import random,urllib.request,json
f={ self.dictkey:"unidic-spoken" if self.UniDic=="spoken" else self.UniDic,
"st":sentence+"\n",
"f1":"1",
"f2":"1",
"f3":"1",
"f4":"1",
"f5":"1",
"f9":"1",
"f10":"1",
"out-e":"csv",
"c-code":"utf-8"
}
b="".join(random.choice("abcdefghijklmnopqrstuvwxyz0123456789") for i in range(10))
d="\n".join("--"+b+"\nContent-Disposition:form-data;name="+k+"\n\n"+v for k,v in f.items())+"\n--"+b+"--\n"
h={ "Content-Type":"multipart/form-data;charset=utf-8;boundary="+b }
u=urllib.request.Request("https://chamame.ninjal.ac.jp/chamamebin/webchamame.php",d.encode(),h)
with urllib.request.urlopen(u) as r:
q=r.read()
return q.decode("utf-8").replace("\r","")
def ChamameWeb2ChaPASUD(self,text):
import subprocess
s=self.ChamameWebAPI(text)
m=""
for t in s.split("\n"):
w=t.split(",")
if len(w)<9:
continue
if w[1]=="B":
if m!="":
m+="EOS\n"
elif w[1]!="I":
continue
p=(w[5]+"-*-*-*-*").split("-")
m+=w[2]+"\t"+",".join([p[0],p[1],p[2],p[3],"*" if w[6]=="" else w[6],"*" if w[7]=="" else w[7],w[4],w[3],w[2],w[8],w[9]])+"\n"
m+="EOS\n"
t=subprocess.check_output(["awk","-f",UNIDIC2IPADIC],input=m.encode("utf-8"))
u=subprocess.check_output(["cabocha","-f","1","-n","1","-I","1"],input=t)
return subprocess.check_output([CHAPAS2UD],input=u).decode("utf-8")
def UniDic2ChaPASUD(self,text):
import subprocess
m=self.mecab(text)
t=subprocess.check_output(["awk","-f",UNIDIC2IPADIC],input=m.encode("utf-8"))
u=subprocess.check_output(["cabocha","-f","1","-n","1","-I","1"],input=t)
return subprocess.check_output([CHAPAS2UD],input=u).decode("utf-8")
def load(UniDic=None):
return ChaPASLanguage(UniDic)
def bunsetu_spans(doc):
if type(doc)==Doc:
b=[i for i,j in enumerate(doc.user_data["bunsetu_bi_labels"]) if j=="B"]
b.append(len(doc))
return [Span(doc,i,j) for i,j in zip(b,b[1:])]
elif type(doc)==Span:
b=doc[0].doc.user_data["bunsetu_bi_labels"]
s=[bunsetu_span(doc[0])] if b[doc[0].i]=="I" else []
for t in doc:
if b[t.i]=="B":
s.append(bunsetu_span(t))
return s
elif type(doc)==Token:
return [bunsetu_span(doc)]
def bunsetu_span(token):
b="".join(token.doc.user_data["bunsetu_bi_labels"])+"B"
return Span(token.doc,b.rindex("B",0,token.i+1),b.index("B",token.i+1))
| StarcoderdataPython |
379093 | import gym
import argparse
# from gym.envs.mujoco import HalfCheetahEnv
from rlkit.envs.navigation2d.navigation2d import Navigation2d
from rlkit.envs.mujoco.ant import AntEnv
from rlkit.envs.mujoco.half_cheetah import HalfCheetahEnv
import torch
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.sac.msdads.msdads_env_replay_buffer import MSDADSEnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger
from rlkit.torch.sac.diayn.diayn_path_collector import DIAYNMdpPathCollector
from rlkit.samplers.data_collector.step_collector import MdpStepCollector
from rlkit.torch.sac.diayn.policies import SkillTanhGaussianPolicy, MakeDeterministic
from rlkit.torch.networks import FlattenMlp
from rlkit.torch.sac.msdads.msdads_torch_rl_algorithm import MSDADSTorchRLAlgorithm
from rlkit.torch.sac.msdads.msdads import MSDADSTrainer
from rlkit.torch.sac.gcs.gcs_path_collector import GCSMdpPathCollector, GCSMdpPathCollector2, GCSMdpPathCollector3
from rlkit.torch.sac.gcs.policies import UniformSkillTanhGaussianPolicy
from rlkit.torch.sac.gcs.networks import FlattenBNMlp
from rlkit.torch.sac.msdads.skill_dynamics import SkillDynamics
def experiment(variant, args):
expl_env, eval_env = get_env(str(args.env))
obs_dim = expl_env.observation_space.low.size - (len(variant['exclude_obs_ind']) if variant['exclude_obs_ind'] else 0)
action_dim = eval_env.action_space.low.size
skill_dim = args.skill_dim
# ends_dim = expl_env.observation_space.low.size
ends_dim = args.ends_dim
M = variant['layer_size']
qf1 = FlattenBNMlp(
input_size=obs_dim + action_dim + skill_dim,
output_size=1,
hidden_sizes=[M, M],
batch_norm=variant['batch_norm'],
)
qf2 = FlattenBNMlp(
input_size=obs_dim + action_dim + skill_dim,
output_size=1,
hidden_sizes=[M, M],
batch_norm=variant['batch_norm'],
)
target_qf1 = FlattenBNMlp(
input_size=obs_dim + action_dim + skill_dim,
output_size=1,
hidden_sizes=[M, M],
batch_norm=variant['batch_norm'],
)
target_qf2 = FlattenBNMlp(
input_size=obs_dim + action_dim + skill_dim,
output_size=1,
hidden_sizes=[M, M],
batch_norm=variant['batch_norm'],
)
skill_dynamics = SkillDynamics(
input_size=obs_dim + skill_dim,
state_dim=ends_dim,
hidden_sizes=[M, M],
num_components=1,
batch_norm=True,#variant['batch_norm'],
std=[1.] * ends_dim * 1
)
# goal_buffer = GCSGoalBuffer(
# variant['goal_buffer_size'],
# goal_dim=ends_dim,
# )
policy = UniformSkillTanhGaussianPolicy(
obs_dim=obs_dim + skill_dim,
action_dim=action_dim,
hidden_sizes=[M, M],
skill_dim=skill_dim,
low=[-1] * skill_dim,
high=[1] * skill_dim,
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = DIAYNMdpPathCollector(
eval_env,
eval_policy,
)
expl_step_collector = GCSMdpPathCollector3(
expl_env,
policy,
exclude_obs_ind=variant['exclude_obs_ind'],
goal_ind=variant['goal_ind'],
skill_horizon=variant['skill_horizon'],
# render=True
)
replay_buffer = MSDADSEnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
skill_dim,
ends_dim,
variant['algorithm_kwargs']['max_path_length']
)
trainer = MSDADSTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
skill_dynamics=skill_dynamics,
target_qf1=target_qf1,
target_qf2=target_qf2,
exclude_obs_ind=variant['exclude_obs_ind'],
**variant['trainer_kwargs']
)
algorithm = MSDADSTorchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_step_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
def get_env(name):
if name == 'test':
expl_env, eval_env = Navigation2d(), Navigation2d()
# expl_env.set_random_start_state(True)
# eval_env.set_random_start_state(True)
return NormalizedBoxEnv(expl_env), NormalizedBoxEnv(eval_env)
elif name == 'Ant':
return NormalizedBoxEnv(AntEnv(expose_all_qpos=True)), NormalizedBoxEnv(AntEnv(expose_all_qpos=True))
elif name == 'Half-cheetah':
return NormalizedBoxEnv(HalfCheetahEnv(expose_all_qpos=True)), NormalizedBoxEnv(HalfCheetahEnv(expose_all_qpos=True))
return NormalizedBoxEnv(gym.make('name')), NormalizedBoxEnv(gym.make('name'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('env', type=str,
help='environment')
parser.add_argument('--skill_dim', type=int, default=2,
help='skill dimension')
parser.add_argument('--ends_dim', type=int, default=2,
help='end_state dimension')
args = parser.parse_args()
# noinspection PyTypeChecker
variant = dict(
algorithm="GCS2",
version="normal",
layer_size=128,
replay_buffer_size=int(1E2),
sd_buffer_size=int(1E4),
goal_buffer_size=int(1E4),
exclude_obs_ind=[0],
goal_ind=None,
skill_horizon=200,
batch_norm=False,
algorithm_kwargs=dict(
num_epochs=3000, #1000
num_eval_steps_per_epoch=0,
num_trains_per_train_loop=64,
num_expl_steps_per_train_loop=1000,
num_trains_discriminator_per_train_loop=8,
min_num_steps_before_training=0,
max_path_length=200,
batch_size=128, #256
num_steps=2,
)
,
trainer_kwargs=dict(
discount=0.,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
dyn_lr=3E-4,
gamma=0.9,
reward_scale=1,
use_automatic_entropy_tuning=False,
),
)
setup_logger('MSDADS_' + str(args.skill_dim) + '_' + args.env, variant=variant,snapshot_mode="gap_and_last",
snapshot_gap=100,)
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant, args)
| StarcoderdataPython |
11390832 | import pathlib
import helpers
import numpy as np
import pytest
import meshio
@pytest.mark.parametrize(
"mesh",
[
# helpers.empty_mesh,
helpers.tri_mesh
],
)
def test_wkt(mesh):
def writer(*args, **kwargs):
return meshio.wkt.write(*args, **kwargs)
helpers.write_read(writer, meshio.wkt.read, mesh, 1.0e-12)
@pytest.mark.parametrize(
"filename, ref_sum, ref_num_cells",
[("simple.wkt", 4, 2), ("whitespaced.wkt", 3.2, 2)],
)
def test_reference_file(filename, ref_sum, ref_num_cells):
this_dir = pathlib.Path(__file__).resolve().parent
filename = this_dir / "meshes" / "wkt" / filename
mesh = meshio.read(filename)
tol = 1.0e-5
s = np.sum(mesh.points)
assert abs(s - ref_sum) < tol * abs(ref_sum)
assert mesh.cells[0].type == "triangle"
assert len(mesh.cells[0].data) == ref_num_cells
| StarcoderdataPython |
9693286 | from .node_client import NodeCommClient
from .popup_manager import PopupManager
from .service_proxy import ServiceProxy
from .editor_client import cli, EditorClient
from .popup_manager import get_popup_manager
from .logger import log
from . import logger
__all__ = [
'cli',
'EditorClient',
'logger',
'log',
'get_popup_manager',
'NodeCommClient',
'json_helpers',
'PopupManager',
'ServiceProxy',
'work_scheduler'
] | StarcoderdataPython |
4820982 | <gh_stars>0
from collections import namedtuple
Point = namedtuple('Point', 'x, y')
points_of_a_triangle = (
Point(x=1, y=3),
Point(x=4, y=5),
Point(x=3, y=0)
)
for x_coordinate, y_coordinate in points_of_a_triangle:
print(f'X Coordinate {x_coordinate}')
print(f'Y Coordinate {y_coordinate}')
print()
| StarcoderdataPython |
6403009 | <reponame>David-Lor/twitterscraper<filename>twitterscraper/tests/test_twitter.py
import datetime
import datetimerange
import pytest
from twitterscraper.services import TwitterAPIClient, TwitterNitterClient
from twitterscraper.models import TwitterTweet
from twitterscraper.settings import load_settings
from twitterscraper.utils import datetime_to_timestamp, timestamp_to_datetime
from .base import BaseTest
class BaseTwitterTest(BaseTest):
client_twitterapi: TwitterAPIClient
client_nitter: TwitterNitterClient
@classmethod
def setup_class(cls):
super().setup_class()
settings = load_settings().twitter
cls.client_twitterapi = TwitterAPIClient(
api_key=settings.keys.key,
api_secret=settings.keys.secret,
api_token=settings.keys.token,
)
cls.client_nitter = TwitterNitterClient(settings.nitter_baseurl)
class TestNitter(BaseTwitterTest):
@pytest.mark.asyncio
async def test_get_tweets_in_range_possumeveryhour(self):
userid = "123456"
username = "possumeveryhour"
from_datetime = datetime.datetime.fromisoformat("2022-01-01T00:00:00+00:00")
to_datetime = datetime.datetime.fromisoformat(
"2022-01-03T14:59:59+00:00") # datetimerange 'to' is inclusive, but we want exclusive
expected_last_datetime = datetime.datetime.fromisoformat("2022-01-03T14:00:00+00:00")
expected_tweets_datetimes = list(
datetimerange.DateTimeRange(from_datetime, to_datetime).range(datetime.timedelta(hours=1)))
expected_tweets = [
TwitterTweet(tweet_id='1477067061964775424', text='', timestamp=1640995200, is_reply=False),
TwitterTweet(tweet_id='1477082160490242053', text='', timestamp=1640998800, is_reply=False),
TwitterTweet(tweet_id='1477097253261217796', text='', timestamp=1641002400, is_reply=False),
TwitterTweet(tweet_id='1477112354328489988', text='', timestamp=1641006000, is_reply=False),
TwitterTweet(tweet_id='1477127454141652996', text='', timestamp=1641009600, is_reply=False),
TwitterTweet(tweet_id='1477142551849320449', text='', timestamp=1641013200, is_reply=False),
TwitterTweet(tweet_id='1477157652945936397', text='', timestamp=1641016800, is_reply=False),
TwitterTweet(tweet_id='1477172751299493888', text='', timestamp=1641020400, is_reply=False),
TwitterTweet(tweet_id='1477187850777108481', text='', timestamp=1641024000, is_reply=False),
TwitterTweet(tweet_id='1477202950120517637', text='', timestamp=1641027600, is_reply=False),
TwitterTweet(tweet_id='1477218051787530244', text='', timestamp=1641031200, is_reply=False),
TwitterTweet(tweet_id='1477233147817381891', text='', timestamp=1641034800, is_reply=False),
TwitterTweet(tweet_id='1477248250797305860', text='', timestamp=1641038400, is_reply=False),
TwitterTweet(tweet_id='1477263348748230659', text='', timestamp=1641042000, is_reply=False),
TwitterTweet(tweet_id='1477278448691425281', text='', timestamp=1641045600, is_reply=False),
TwitterTweet(tweet_id='1477293548722692097', text='', timestamp=1641049200, is_reply=False),
TwitterTweet(tweet_id='1477308646719639553', text='', timestamp=1641052800, is_reply=False),
TwitterTweet(tweet_id='1477323746310594568', text='', timestamp=1641056400, is_reply=False),
TwitterTweet(tweet_id='1477338848011116547', text='', timestamp=1641060000, is_reply=False),
TwitterTweet(tweet_id='1477353947820089345', text='', timestamp=1641063600, is_reply=False),
TwitterTweet(tweet_id='1477369046018445318', text='', timestamp=1641067200, is_reply=False),
TwitterTweet(tweet_id='1477384142929240069', text='', timestamp=1641070800, is_reply=False),
TwitterTweet(tweet_id='1477399249860112386', text='', timestamp=1641074400, is_reply=False),
TwitterTweet(tweet_id='1477414344195383296', text='', timestamp=1641078000, is_reply=False),
TwitterTweet(tweet_id='1477429444126121985', text='', timestamp=1641081600, is_reply=False),
TwitterTweet(tweet_id='1477444541120720898', text='', timestamp=1641085200, is_reply=False),
TwitterTweet(tweet_id='1477459644067065856', text='', timestamp=1641088800, is_reply=False),
TwitterTweet(tweet_id='1477474743129452544', text='', timestamp=1641092400, is_reply=False),
TwitterTweet(tweet_id='1477489842829279233', text='', timestamp=1641096000, is_reply=False),
TwitterTweet(tweet_id='1477504940163670020', text='', timestamp=1641099600, is_reply=False),
TwitterTweet(tweet_id='1477520038349479937', text='', timestamp=1641103200, is_reply=False),
TwitterTweet(tweet_id='1477535140813459463', text='', timestamp=1641106800, is_reply=False),
TwitterTweet(tweet_id='1477550240848879617', text='', timestamp=1641110400, is_reply=False),
TwitterTweet(tweet_id='1477565336883011588', text='', timestamp=1641114000, is_reply=False),
TwitterTweet(tweet_id='1477580436813623297', text='', timestamp=1641117600, is_reply=False),
TwitterTweet(tweet_id='1477595539059445762', text='', timestamp=1641121200, is_reply=False),
TwitterTweet(tweet_id='1477610638113492994', text='', timestamp=1641124800, is_reply=False),
TwitterTweet(tweet_id='1477625735468699652', text='', timestamp=1641128400, is_reply=False),
TwitterTweet(tweet_id='1477640835374239749', text='', timestamp=1641132000, is_reply=False),
TwitterTweet(tweet_id='1477655933237006341', text='', timestamp=1641135600, is_reply=False),
TwitterTweet(tweet_id='1477671037374509056', text='', timestamp=1641139200, is_reply=False),
TwitterTweet(tweet_id='1477686134981505030', text='', timestamp=1641142800, is_reply=False),
TwitterTweet(tweet_id='1477701237483130884', text='', timestamp=1641146400, is_reply=False),
TwitterTweet(tweet_id='1477716334997876738', text='', timestamp=1641150000, is_reply=False),
TwitterTweet(tweet_id='1477731431979835394', text='', timestamp=1641153600, is_reply=False),
TwitterTweet(tweet_id='1477746533244231691', text='', timestamp=1641157200, is_reply=False),
TwitterTweet(tweet_id='1477761633887965188', text='', timestamp=1641160800, is_reply=False),
TwitterTweet(tweet_id='1477776731129929733', text='', timestamp=1641164400, is_reply=False),
TwitterTweet(tweet_id='1477791836190216193', text='', timestamp=1641168000, is_reply=False),
TwitterTweet(tweet_id='1477806929418276871', text='', timestamp=1641171600, is_reply=False),
TwitterTweet(tweet_id='1477822031324499974', text='', timestamp=1641175200, is_reply=False),
TwitterTweet(tweet_id='1477837129585725443', text='', timestamp=1641178800, is_reply=False),
TwitterTweet(tweet_id='1477852235027685376', text='', timestamp=1641182400, is_reply=False),
TwitterTweet(tweet_id='1477867334308188167', text='', timestamp=1641186000, is_reply=False),
TwitterTweet(tweet_id='1477882431927693316', text='', timestamp=1641189600, is_reply=False),
TwitterTweet(tweet_id='1477897532105711621', text='', timestamp=1641193200, is_reply=False),
TwitterTweet(tweet_id='1477912630174097414', text='', timestamp=1641196800, is_reply=False),
TwitterTweet(tweet_id='1477927728561197058', text='', timestamp=1641200400, is_reply=False),
TwitterTweet(tweet_id='1477942825207615488', text='', timestamp=1641204000, is_reply=False),
TwitterTweet(tweet_id='1477957927331713025', text='', timestamp=1641207600, is_reply=False),
TwitterTweet(tweet_id='1477973024452362243', text='', timestamp=1641211200, is_reply=False),
TwitterTweet(tweet_id='1477988124374573063', text='', timestamp=1641214800, is_reply=False),
TwitterTweet(tweet_id='1478003225479548939', text='', timestamp=1641218400, is_reply=False)
]
tweets = await self.client_nitter.get_tweets_in_range(
username=username,
from_timestamp=datetime_to_timestamp(from_datetime),
to_timestamp=datetime_to_timestamp(to_datetime),
include_replies=True
)
tweets = sorted(tweets, key=lambda tweet: tweet.timestamp)
tweets_datetimes = [timestamp_to_datetime(tweet.timestamp) for tweet in tweets]
assert tweets_datetimes == expected_tweets_datetimes
assert tweets_datetimes[-1] == expected_last_datetime
assert tweets == expected_tweets
| StarcoderdataPython |
11331929 | import configparser
import os
import logging
class Config(object):
""" Class to manage accessing and writing a configuration
"""
config_file_name = "made.config"
section_wp = 'work_products'
section_inputs = 'inputs'
section_project = 'project'
section_pipeline = 'pipeline'
def __init__(self, folder):
self.project_folder = folder
self.config = configparser.ConfigParser()
self.path = os.path.join(folder, self.config_file_name)
self.config.read(self.path)
logging.getLogger("my logger").debug("Configuring from: " + self.path)
# add the section if it does not already exist
if not self.config.has_section(self.section_inputs):
self.config.add_section(self.section_inputs)
if not self.config.has_section(self.section_wp):
self.config.add_section(self.section_wp)
if not self.config.has_section(self.section_project):
self.config.add_section(self.section_project)
if not self.config.has_section(self.section_pipeline):
self.config.add_section(self.section_pipeline)
def has_config_file(self):
if os.path.exists(self.path):
logging.getLogger("my logger").info("Config file already exists")
return True
else:
logging.getLogger("my logger").debug("Config file does not exist")
return False
def get_path(self):
return self.config.get('templates', 'path')
def has_section_wp(self):
return self.config.has_section(self.section_wp)
def add_section_wp(self):
return self.config.add_section(self.section_wp)
def add_section_pipeline(self):
return self.config.add_section(self.section_pipeline)
def add_option_wp_prefix(self, option_value='wp'):
self.config.set(self.section_wp, 'prefix', option_value)
def get_project_name(self):
return self.config.get(Config.section_project, 'name')
def add_option_project_name(self, option_value):
# TODO validate option value
self.config.set(self.section_project, 'name', option_value)
def get_option_wp_prefix(self):
if not self.config.has_option(self.section_wp, 'prefix'):
self.add_option_wp_prefix()
return self.config.get(self.section_wp, 'prefix')
def add_option_inputs_root(self, option_value='s3'):
""" Input folder type (S3 or file) """
self.config.set(self.section_inputs, 'root', option_value)
def add_option_inputs_S3bucket(self, option_value):
self.config.set(self.section_inputs, 'bucket', option_value)
def get_S3bucket_name(self):
return self.config.get(Config.section_inputs, 'bucket')
def get_inputs_root(self):
return self.config.get(Config.section_inputs, 'root')
def write(self):
print(self.path)
cfgfile = open(self.path, 'w')
"""Save configuration to file"""
self.config.write(cfgfile)
cfgfile.close()
if __name__ == '__main__':
c = Config(os.getcwd())
c.add_option_wp_prefix()
c.add_option_inputs_root()
print(c.get_option_wp_prefix())
c.write()
| StarcoderdataPython |
5167471 | <reponame>mkudlej/3scale-tests
"""
Rewrite /spec/functional_specs/do_not_send_openresty_version_spec.rb
When requesting non existing endpoint openresty version should not be sent
in the response body or in the response header
"""
import backoff
import pytest
from testsuite import rawobj
@pytest.fixture(scope="module")
def service_proxy_settings():
"""Set backend url"""
return rawobj.Proxy("https://echo-api.example.local")
@pytest.fixture(scope="module")
def client(api_client):
"""
Client configured not to retry requests.
By default, the failed requests are retried by the api_client.
As 404 is the desired outcome of one of the tests, the client is
configured not to retry requests to avoid long time execution.
"""
return api_client(disable_retry_status_list={503, 404})
@backoff.on_predicate(
backoff.fibo, lambda x: x.headers.get("server", "") not in ("openresty", "envoy"), 8, jitter=None)
def make_requests(client):
"""Make sure that we get 503 apicast (backend is not available)"""
return client.get("/anything")
@pytest.mark.issue("https://issues.jboss.org/browse/THREESCALE-1989")
def test_do_not_send_openresty_version(client):
"""
Make request to non existing endpoint
Assert that the response does not contain openresty version in the headers
Assert that the response does not contain openresty version in the body
"""
response = make_requests(client)
assert response.status_code == 503
assert "server" in response.headers
if response.headers["server"] == "envoy":
pytest.skip("envoy edge proxy in use")
assert response.headers["server"] == "openresty"
assert "<center>openresty</center>" in response.text
| StarcoderdataPython |
11211703 | <reponame>lsst-camera-dh/IandT-jobs
#!/usr/bin/env python
"""
Producer script for BOT_acq_recovery harnessed job.
Given an acq_run number, specified in the lcatr.cfg file, this job
will collect the symlinks to the frames from all of the retry attempts
in the BOT_acq job's working directories for that run and copy the
symlinks to the current working directory. The activityIds for the
retry attempts will be iterated over in reverse order so that the
first encountered symlink with a given frame name will be the last one
taken, and the data associated with that symlink will be used in the
aggregated data. Subsequent symlinks with that frame name are
presumably from prior acquisitions attempts and are more likely to be
corrupted or incomplete and so will be ignored.
"""
import os
import glob
import shutil
import pathlib
import siteUtils
# Get the acq_run to use for the data recovery and aggregation from
# lcatr.cfg.
acq_run = os.environ['LCATR_ACQ_RUN']
# Get any bad frames from a text file specified in lcatr.cfg.
try:
with open(os.environ['LCATR_BAD_FRAME_LIST'], 'r') as fd:
bad_frames = [_.strip() for _ in fd]
except KeyError:
bad_frames = []
staging_dir = os.path.join(os.environ['LCATR_STAGE_ROOT'],
siteUtils.getUnitType(), siteUtils.getUnitId())
outdir = '.'
acqs_dir = os.path.join(staging_dir, acq_run, 'BOT_acq', 'v0')
job_id_dirs = sorted(glob.glob(os.path.join(acqs_dir, '[0-9]*')), reverse=True)
for job_id_dir in job_id_dirs:
frame_dirs = glob.glob(os.path.join(job_id_dir, '*'))
for frame_dir in frame_dirs:
dest = os.path.join(outdir, os.path.basename(frame_dir))
if ((os.path.islink(frame_dir) or frame_dir.endswith('.cfg'))
and not os.path.lexists(dest)):
shutil.copyfile(frame_dir, dest, follow_symlinks=False)
# Delete any folders with bad data
fits_images = []
for bad_frame in bad_frames:
pattern = os.path.join('*', f'*{bad_frame}*.fits')
fits_images.extend(glob.glob(pattern))
bad_symlinks = set([os.path.dirname(_) for _ in fits_images])
for bad_symlink in bad_symlinks:
os.remove(bad_symlink)
pathlib.Path('PRESERVE_SYMLINKS').touch()
| StarcoderdataPython |
5173564 | <gh_stars>0
"""
Basic LSTM.
"""
import torch
import torch.nn as nn
from src.models.base_torch_model import BaseTorchModel
class LSTM(BaseTorchModel):
"""
Basic LSTM.
"""
def __init__(self, config, vocab) -> None:
"""
[summary]
:param config: [description]
:type config: [type]
:param vocab: [description]
:type vocab: [type]
"""
super(LSTM, self).__init__(config)
self._config = config
self._batch_size = config['dataloader_params']['batch_size']
net_conf = self._config['network']
# assign pre-trained embeddings
if net_conf['pretrained_embeddings']:
self._logger.info('Using pre-trained embeddings of size {}'.format(
len(vocab)
))
self._embedding = nn.Embedding.from_pretrained(
torch.FloatTensor(vocab.vectors),
freeze=net_conf['freeze_embeddings']
)
else:
self._logger.info('Will train embeddings')
self._embedding = nn.Embedding(
len(vocab),
net_conf['embedding_size']
)
self._lstm = nn.LSTM(
input_size=net_conf['embedding_size'],
hidden_size=net_conf['hidden_size'],
num_layers=net_conf['num_layers'],
bias=net_conf['bias'],
dropout=net_conf['dropout'],
bidirectional=net_conf['bidirectional']
)
self._dropout = nn.Dropout(self._config['dropout'])
self._output = nn.Linear(
net_conf['hidden_size'], net_conf['num_classes']
)
@property
def num_classes(self) -> int:
return self._config['network']['num_classes']
def forward(self, x):
"""
Forward pass logic.
"""
# x.shape: (sequence_lenght, batch_size)
x = self._embedding(x)
x = self._dropout(x)
# x.shape: (sequence_length, batch_size, embedding_dim)
output, (h_n, c_n) = self._lstm(x)
# predict based on last hidden state
logits = self._output(h_n[-1])
return logits
| StarcoderdataPython |
1743145 | # -*- coding: utf-8 -*-
"""
pybitcoin
~~~~~
:copyright: (c) 2014-2016 by Halfmoon Labs, Inc.
:license: MIT, see LICENSE for more details.
"""
import opcodes
from .network import broadcast_transaction, send_to_address, get_unspents, \
embed_data_in_blockchain, make_send_to_address_tx, make_op_return_tx, \
analyze_private_key, serialize_sign_and_broadcast, \
sign_all_unsigned_inputs
from .scripts import make_pay_to_address_script, make_op_return_script, \
script_to_hex
from .serialize import serialize_input, serialize_output, \
serialize_transaction, deserialize_transaction
from .outputs import make_op_return_outputs, make_pay_to_address_outputs
from .utils import flip_endian, variable_length_int
| StarcoderdataPython |
5161108 | <reponame>RevansChen/online-judge
# Python - 3.6.0
string_to_number = int
| StarcoderdataPython |
1868821 | <filename>model/ipopt_predict.py
import sys
import os
ARGLIST = sys.argv
ARGLEN = len(ARGLIST)
derp = "(options) output_file_name integration_steps\n -h for options\n -u for usage and disclaimers\n"
options = " -P path : specify a standard PATH (including end /) for all files assuming standard names which is overwritten by other options\n\
-e path : specify an equations.txt to use. Otherwise looks in current directory\n\
-s path : specify a specs file to use to find NSKIP. Otherwise looks in current directory\n\
-d path : specify the IPOPT output path file to use (data.dat). Otherwise looks in current directory\n\
-p path : specify the IPOPT output parameter file to use (param.dat). Otherwise looks in current directory\n\
-c path : specify a directory to find stimulus files (including end /). Otherwise looks in current directory\n\
-r int : specify the number of integration steps per model time step. Default is currently 10\n"
usage = "This code is for forward integration of the estimates provided by IPOPT. This code takes the end of the path estimate and parameter estimates as the starting point and integrates these forward using RK4. This code requires an output file name and the number of time steps to integrate as input. There are as of yet not much in the way of error messages, but most errors are due to files with an improper number of or order of entries. If you successfully did a run with IPOPT the files used for that run and created by that run will work. If a segfault occurs, the most common reason is stimulus files are not where you think they are or are not long enough.\n"
equations_file = "equations.txt"
specs_file = "specs.txt"
data_file = "data.dat"
param_file = "param.dat"
cpath = ''
PATH = ''
RESO = '10'
ispath=0
isequa=0
isspec=0
isdata=0
ispara=0
iscpat=0
isreso=0
argnum = 1
if ARGLEN==1:
print derp
sys.exit(0)
if ARGLIST[1] == '-h':
print options
sys.exit(0)
if ARGLIST[1] == '-u':
print usage
sys.exit(0)
while argnum < (ARGLEN -1):
if ARGLIST[argnum] == '-e':
equations_file_temp = ARGLIST[argnum+1]
isequa=1
argnum=argnum+2
elif ARGLIST[argnum] == '-s':
specs_file_temp = ARGLIST[argnum+1]
isspec=1
argnum=argnum+2
elif ARGLIST[argnum] == '-d':
data_file_temp = ARGLIST[argnum+1]
isdata=1
argnum=argnum+2
elif ARGLIST[argnum] == '-p':
param_file_temp = ARGLIST[argnum+1]
ispara=1
argnum=argnum+2
elif ARGLIST[argnum] == '-P':
PATH = ARGLIST[argnum+1]
ispath=1
argnum=argnum+2
elif ARGLIST[argnum] == '-c':
cpath_temp = ARGLIST[argnum+1]
argnum=argnum+2
elif ARGLIST[argnum] == '-r':
RESO = ARGLIST[argnum+1]
argnum=argnum+2
else:
break
outfile = ARGLIST[argnum]
NTIME = ARGLIST[argnum+1]
if ispath:
equations_file = PATH+"equations.txt"
specs_file = PATH+"specs.txt"
data_file = PATH+"data.dat"
param_file = PATH+"param.dat"
cpath = PATH
if isequa:
equations_file = equations_file_temp
if isdata:
data_file = data_temp_file
if isspec:
specs_file = specs_file_temp
if ispara:
param_file = param_file_temp
if iscpat:
cpath = cpath_temp
file = open(equations_file,"r")
temp=[]
for line in file:
if line.startswith('#'): # Pound used as comment in text file
continue
elif line.startswith('\\'): # In case file has UTF-8 markers
continue
elif line=='':
continue
else:
temp.append(line)
file.close()
h=[]
for i in range(len(temp)):
temp1=temp[i].rstrip( )
if temp1!='':
h.append(temp1)
a=h[1].split(',')
NSTAT = int(a[0])
NPARA = int(a[1])
NCOUP = int(a[2])
NSTIM = int(a[3])
NFUNC = int(a[4])
if len(a)>5:
NMEAS = int(a[5])
else:
NMEAS=NCOUP
if len(h)!=(3+2*NSTAT+NPARA+NCOUP+NSTIM+NFUNC+NMEAS):
print "improper equations.txt - check the declared number of variables with what are given\n"
print 'number of non-comment lines in equations.txt : '+str(len(h))+'\n'
print 'number expected based on declared variable numbers : '+str(3+2*NSTAT+NPARA+NCOUP+NSTIM+NFUNC+NMEAS)+'\n'
sys.exit()
FEQNSTR = []
VARSTR = []
for i in range(NSTAT):
FEQNSTR.append(h[2+i])
VARSTR.append(h[3+NSTAT+i])
OBJSTR = h[2+NSTAT]
PRMSTR = []
for i in range(NPARA):
PRMSTR.append(h[3+2*NSTAT+i])
CPLSTR = []
for i in range(NCOUP):
CPLSTR.append(h[3+2*NSTAT+NPARA+i])
MSRSTR = []
for i in range(NMEAS):
MSRSTR.append(h[3+2*NSTAT+NPARA+NCOUP+i])
STMSTR = []
for i in range(NSTIM):
STMSTR.append(h[3+2*NSTAT+NPARA+NCOUP+NMEAS+i])
OUTFUNC = []
for i in range(NSTAT):
temp = FEQNSTR[i]
for j in range(NSTAT):
temp = temp.replace(VARSTR[j],"x["+str(j)+"]")
for j in range(NPARA):
temp = temp.replace(PRMSTR[j],"p["+str(j)+"]")
for j in range(NCOUP):
temp = temp.replace(CPLSTR[j],'0')
for j in range(NMEAS):
temp = temp.replace(MSRSTR[j],'0')
for j in range(NSTIM):
temp = temp.replace(STMSTR[j],'stim['+str(j)+']')
OUTFUNC.append(temp)
file = open(param_file,"r")
temp=[]
for line in file:
if line.startswith('#'): # Pound used as comment in text file
continue
elif line.startswith('\\'): # In case file has UTF-8 markers
continue
else:
temp.append(line)
file.close()
PLIST=[]
for i in range(len(temp)):
temp1=temp[i].rstrip( )
a=temp1.split('\t')
if temp1!='':
PLIST.append(a[3])
print a[3]
if len(PLIST)!=NPARA:
print 'error : number of parameter values in param.dat does not equal NP in equations.txt\n\n aborting\n'
sys.exit(0)
file = open(data_file,'r')
#lazy way to get last line
time=0
for line in file:
temp = line
time = time+1
#print time
file.close()
temp = temp.rstrip( )
a = temp.split() #no argument in split assumes any amount of whitespace is a delimiter
if len(a)!=(NSTAT+NCOUP+NMEAS+1):
print 'warning : data file appears to be from a source other than that generated by IPOPT\n'
print ' be sure the data file you are using corresponds to the correct model.\n'
XLIST = []
for i in range(NSTAT):
XLIST.append(a[i+1])
file = open(specs_file,'r')
temp=[]
for line in file:
if line.startswith('#'): # Pound used as comment in text file
continue
elif line.startswith('\\'): # In case file has UTF-8 markers
continue
else:
temp.append(line)
file.close()
h=[]
for i in range(len(temp)):
h.append(temp[i].rstrip( ))
NSKIP = h[1]
time = time+int(NSKIP)
DT = str(float(h[2])*0.5)
ISTIMFILE=[]
for i in range(NSTIM):
ISTIMFILE.append(h[3+NMEAS+i])
#uncomment these if errors occurm it may help finding them
#print XLIST
#print PLIST
#print ISTIMFILE
#print time
#sys.exit(0)
########################################
####### make vector field ########
########################################
file = open("RKTEMP.cpp","w")
file.write(
'#include <cmath>\n'
'#include <iostream>\n'
'#include <fstream>\n'
'#include <string>\n'
'#include <iomanip>\n'
'#include <sstream>\n'
)
if NFUNC!=0:
file.write(
'#include \"'+PATH+'myfunctions.cpp\"\n'
)
file.write(
'#define RESO '+RESO+'\n'
'#define NSTAT '+str(NSTAT)+'\n'
'#define NPARA '+str(NPARA)+'\n'
'#define NSTIM '+str(NSTIM)+'\n'
'#define NTIME RESO*'+NTIME+'\n'
'#define NMEAS '+str(NMEAS)+'\n'
'#define DT '+DT+'\n'
'#define dt DT/((double) RESO)\n'
'using namespace std;\n'
'void Func(double *dx, double *x, double *p, double *stim);\n\n'
'int main(int argc, char **argv){\n\n'
' double *K1 = new double[NSTAT];\n'
' double *K2 = new double[NSTAT];\n'
' double *K3 = new double[NSTAT];\n'
' double *K4 = new double[NSTAT];\n'
' double *X = new double[NSTAT];\n'
' double *STIM = new double[2*NSTIM];\n'
' double *Stimstep = new double[NSTIM];\n'
' double *STIMTEMP = new double[3*NSTIM];\n'
' double *P = new double[NPARA];\n'
' int i,j,k;\n'
' double Temp[NSTAT];\n'
' const int WIDTH=13;\n'
' const int PRECISION=6;\n\n'
)
j= 0
for i in PLIST:
file.write(
' P['+str(j)+'] = '+i+';\n'
)
j+=1
j= 0
for i in XLIST:
file.write(
' X['+str(j)+'] = '+i+';\n'
)
j+=1
file.write(
' ifstream stim[NSTIM];\n'
)
for i in range(len(ISTIMFILE)):
file.write(
' stim['+str(i)+'].open(\"'+cpath+ISTIMFILE[i]+'\");\n'
#' cout<<\"'+cpath+ISTIMFILE[i]+'\"<<endl;\n'
)
file.write(
' ofstream *outfile=new ofstream;\n'
' outfile->open(\"'+outfile+'\");\n'
' (*outfile)<<setw(WIDTH)<<setprecision(PRECISION)<<'+str(time)+';\n'
' (*outfile)<<setw(WIDTH)<<setprecision(PRECISION)<<'+str(time)+'*DT;\n'
' for(i=0;i<NSTAT;i++){\n'
' (*outfile)<<setw(WIDTH)<<setprecision(PRECISION)<<X[i];\n'
' }\n\n'
' (*outfile)<<endl;\n'
)
if NSTIM !=0:
file.write(
#' cout<<NSTIM*('+str(time)+')<<endl;\n'
' for(j=0;j<NSTIM*('+str(time)+');j++){\n' #this should skip until the right place in the stimulus files
' STIM[j%NSTIM+NSTIM] = STIM[j%NSTIM];\n'
' stim[j%NSTIM]>>STIM[j%NSTIM];\n\n'
' }\n'
' for(j=0;j<NSTIM;j++){\n'
' Stimstep[j] = (STIM[j+NSTIM]-STIM[j])/((double) RESO);\n'
' STIMTEMP[j] = STIM[j];\n'
' STIMTEMP[j+NSTIM] = STIM[j] + .5*Stimstep[j];\n'
' STIMTEMP[j+2*NSTIM] = STIM[j] + Stimstep[j];\n'
' }\n\n'
)
file.write(
' for(i=0;i<NTIME;i++){\n'
' if(X[0] != X[0]){\n'
' cout<<"NAN error"<<endl;\n'
' break;\n'
' }\n'
' Func(K1,X,P,&STIMTEMP[0]);\n'
' for(j=0;j<NSTAT;j++)\n'
' Temp[j] = X[j]+.5*dt*K1[j];\n\n'
' Func(K2,Temp,P,&STIMTEMP[NSTIM]);\n'
' for(j=0;j<NSTAT;j++)\n'
' Temp[j] = X[j]+.5*dt*K2[j];\n\n'
' Func(K3,Temp,P,&STIMTEMP[NSTIM]);\n'
' for(j=0;j<NSTAT;j++)\n'
' Temp[j] = X[j]+dt*K3[j];\n\n'
' Func(K4,Temp,P,&STIMTEMP[2*NSTIM]);\n'
' for(j=0;j<NSTAT;j++)\n'
' X[j] = X[j]+1.0/6.0*dt*(K1[j]+2.0*K2[j]+2.0*K3[j]+K4[j]);\n\n'
' if((i+1)%RESO==0){\n'
)
#if NSTIM != 0:
file.write(
' for(j=0;j<NSTIM;j++){\n'
' STIM[j]=STIM[j+NSTIM];\n'
' stim[j]>>STIM[j+NSTIM];\n'
' Stimstep[j] = (STIM[j+NSTIM]-STIM[j])/((double) RESO);\n'
' }\n'
)
file.write(
' (*outfile)<<setw(WIDTH)<<setprecision(PRECISION)<<'+str(time)+'+i/RESO;\n'
' (*outfile)<<setw(WIDTH)<<setprecision(PRECISION)<<('+str(time)+'+i/RESO)*DT;\n'
' for(j=0;j<NSTAT;j++)\n'
' (*outfile)<<setw(WIDTH)<<setprecision(PRECISION)<<X[j];\n\n'
' //cout<<(i+1)/RESO<<endl;\n'
' (*outfile)<<endl;\n'
' }\n'
)
if NSTIM != 0:
file.write(
' for(j=0;j<NSTIM;j++){\n'
' STIMTEMP[j]=STIMTEMP[j+2*NSTIM];\n'
' STIMTEMP[j+NSTIM]=STIMTEMP[j]+.5*Stimstep[j];\n'
' STIMTEMP[j+2*NSTIM]=STIMTEMP[j]+Stimstep[j];\n'
' }\n'
)
file.write(
' }\n'
' return 0;\n'
'}\n'
)
file.write(
'void Func(double *dx, double *x, double *p, double *stim){\n'
)
for i in range(NSTAT):
file.write(
' dx['+str(i)+'] = '+OUTFUNC[i]+';\n'
)
file.write('}\n\n')
file.close()
os.system("g++ -o theproblem -lm RKTEMP.cpp")
os.system("./theproblem")
os.system("rm RKTEMP.cpp")
os.system("rm theproblem")
| StarcoderdataPython |
9682990 | <reponame>bendavis/gehome<filename>gehomesdk/erd/values/laundry/tumble_status.py
import enum
@enum.unique
class TumbleStatus(enum.Enum):
NOT_AVAILABLE = "N/A"
ENABLE = "Enabled"
DISABLE = "Disabled"
| StarcoderdataPython |
6529988 | <reponame>aollio/toys
#!/usr/bin/env python3
import time
import multiprocessing
__author__ = '<NAME>'
__email__ = '<EMAIL>'
def say(message):
for i in range(3):
print(message)
time.sleep(1)
def main():
pool = multiprocessing.Pool(processes=4)
for i in range(3):
msg = 'hello %s' % i
pool.apply_async(func=say, args=(msg,))
pool.close()
pool.join()
print('done. sub process(es)')
if __name__ == '__main__':
main()
| StarcoderdataPython |
9716510 | # Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import os.path
import shutil
import tempfile
import fixtures
import glance_store
from oslo_config import cfg
from oslo_db import options
import glance.common.client
from glance.common import config
import glance.db.sqlalchemy.api
from glance import tests as glance_tests
from glance.tests import utils as test_utils
TESTING_API_PASTE_CONF = """
[pipeline:glance-api]
pipeline = versionnegotiation gzip unauthenticated-context rootapp
[pipeline:glance-api-caching]
pipeline = versionnegotiation gzip unauthenticated-context cache rootapp
[pipeline:glance-api-cachemanagement]
pipeline =
versionnegotiation
gzip
unauthenticated-context
cache
cache_manage
rootapp
[pipeline:glance-api-fakeauth]
pipeline = versionnegotiation gzip fakeauth context rootapp
[pipeline:glance-api-noauth]
pipeline = versionnegotiation gzip context rootapp
[composite:rootapp]
paste.composite_factory = glance.api:root_app_factory
/: apiversions
/v2: apiv2app
[app:apiversions]
paste.app_factory = glance.api.versions:create_resource
[app:apiv2app]
paste.app_factory = glance.api.v2.router:API.factory
[filter:versionnegotiation]
paste.filter_factory =
glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
[filter:gzip]
paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
[filter:cache]
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
[filter:cache_manage]
paste.filter_factory =
glance.api.middleware.cache_manage:CacheManageFilter.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
CONF = cfg.CONF
class ApiTest(test_utils.BaseTestCase):
def setUp(self):
super(ApiTest, self).setUp()
self.test_dir = self.useFixture(fixtures.TempDir()).path
self._configure_logging()
self._setup_database()
self._setup_stores()
self._setup_property_protection()
self.glance_api_app = self._load_paste_app(
'glance-api',
flavor=getattr(self, 'api_flavor', ''),
conf=getattr(self, 'api_paste_conf', TESTING_API_PASTE_CONF),
)
self.http = test_utils.Httplib2WsgiAdapter(self.glance_api_app)
def _setup_property_protection(self):
self._copy_data_file('property-protections.conf', self.test_dir)
self.property_file = os.path.join(self.test_dir,
'property-protections.conf')
def _configure_logging(self):
self.config(default_log_levels=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=DEBUG'
])
def _setup_database(self):
sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
options.set_defaults(CONF, connection=sql_connection)
glance.db.sqlalchemy.api.clear_db_env()
glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE'
if glance_db_env in os.environ:
# use the empty db created and cached as a tempfile
# instead of spending the time creating a new one
db_location = os.environ[glance_db_env]
shutil.copyfile(db_location, "%s/tests.sqlite" % self.test_dir)
else:
test_utils.db_sync()
# copy the clean db to a temp location so that it
# can be reused for future tests
(osf, db_location) = tempfile.mkstemp()
os.close(osf)
shutil.copyfile('%s/tests.sqlite' % self.test_dir, db_location)
os.environ[glance_db_env] = db_location
# cleanup the temp file when the test suite is
# complete
def _delete_cached_db():
try:
os.remove(os.environ[glance_db_env])
except Exception:
glance_tests.logger.exception(
"Error cleaning up the file %s" %
os.environ[glance_db_env])
atexit.register(_delete_cached_db)
def _setup_stores(self):
glance_store.register_opts(CONF)
image_dir = os.path.join(self.test_dir, "images")
self.config(group='glance_store',
filesystem_store_datadir=image_dir)
glance_store.create_stores()
def _load_paste_app(self, name, flavor, conf):
conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name)
with open(conf_file_path, 'w') as conf_file:
conf_file.write(conf)
conf_file.flush()
return config.load_paste_app(name, flavor=flavor,
conf_file=conf_file_path)
def tearDown(self):
glance.db.sqlalchemy.api.clear_db_env()
super(ApiTest, self).tearDown()
| StarcoderdataPython |
1776396 | <reponame>nxphi47/eye_gesture_app_wth
#!/usr/bin/env python
from __future__ import print_function
import tensorflow as tf
from keras.layers.merge import Concatenate, Add, Dot, Multiply
import glob
import os
from PIL import Image
import numpy as np
from keras import backend as K
from keras.layers import Input, Activation, Conv2D, Dense, Dropout, Lambda, \
LSTM, Bidirectional, TimeDistributed, MaxPooling2D, BatchNormalization, AveragePooling2D, Flatten
from keras.models import Model, Sequential
from sklearn.model_selection import train_test_split
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import utils
import base_model
# Convolutional blocks
def add_conv_layer(model, filters, kernel_size,
conv_num=1, use_bias=False,
activation='relu', pooling=None, batch_norm=False,
input_shape=False, padding='valid', dropout=0.0, stride=1):
for i in range(conv_num):
if input_shape and i == 0:
model.add(Conv2D(input_shape=input_shape, filters=filters, kernel_size=(kernel_size, kernel_size),
padding=padding,strides=[stride, stride],
use_bias=use_bias))
else:
model.add(Conv2D(filters=filters, kernel_size=(kernel_size, kernel_size),
padding=padding, strides=[stride, stride],
use_bias=use_bias))
if batch_norm:
# conv = BatchNormalization()(conv)
model.add(BatchNormalization())
if activation and activation != '':
# conv = Activation(activation)(conv)
model.add(Activation(activation))
if pooling == 'max_pool':
# conv = MaxPooling2D()(conv)
model.add(MaxPooling2D())
elif pooling == 'avg_pool':
# conv = AveragePooling2D()(conv)
model.add(AveragePooling2D())
else:
# raise Exception('Pooling invalid: {}'.format(pooling))
print('no pooling')
if 0.0 < dropout < 1.0:
model.add(Dropout(dropout))
return model
def CNN_block(input_shape, print_fn=print):
use_bias = False
batch_norm = True
# pooling = 'max_pool'
pooling = None
padding = 'valid'
conv_dropout = -1
model = Sequential()
model = add_conv_layer(model,
filters=16,
conv_num=2,
kernel_size=4,
stride=1,
use_bias=use_bias,
padding=padding,
pooling=pooling, batch_norm=batch_norm,
input_shape=input_shape, dropout=conv_dropout)
# model = add_conv_layer(model, filters=8, kernel_size=3,
# use_bias=use_bias, pooling=pooling,
# batch_norm=batch_norm, dropout=conv_dropout)
model = add_conv_layer(model, filters=16, kernel_size=4, conv_num=2,
padding=padding,
stride=2,
use_bias=use_bias, pooling=pooling,
batch_norm=batch_norm, dropout=conv_dropout)
# model = add_conv_layer(model, filters=16, kernel_size=3,
# use_bias=use_bias, pooling=pooling,
# batch_norm=batch_norm, dropout=conv_dropout)
model = add_conv_layer(model, filters=16, kernel_size=4, conv_num=2,
padding=padding,
stride=1,
use_bias=use_bias, pooling=pooling,
batch_norm=batch_norm, dropout=conv_dropout)
# model = add_conv_layer(model, filters=32, kernel_size=3, conv_num=2,
# padding=padding,
# stride=1,
# use_bias=use_bias, pooling=pooling,
# batch_norm=batch_norm, dropout=conv_dropout)
# 4x4x64
# flatten = Flatten()(conv)
model.add(Flatten())
model.add(Dense(32,))
if batch_norm:
model.add(BatchNormalization())
model.add(Activation(activation='relu'))
# model.add(Dropout(0.5))
# model = Model(outputs=fc)
# CNN model
print_fn('----------- CNN model ------------------')
model.summary(print_fn=print_fn)
print_fn('----------- <CNN model> -----------------')
return model
def CNN_RNN_Sequential_model(print_f=print,
sequence_length=15,
input_dim=64,
label_set=None
):
if label_set is None:
label_set = ['left', 'right', 'up', 'down', 'center', 'double_blink']
batch_norm = True
# inputs = Input(shape=(config.SEQUENCE_LENGTH, config.INPUT_DIM, config.INPUT_DIM, config.CHANNEL))
inputs = Input(shape=(sequence_length, input_dim, input_dim, 3))
preprocess = Lambda(lambda x: (x - 127.5) / 127.5)(inputs)
# preprocess = inputs
cnn_input_shape = (input_dim, input_dim, 3)
timedistributed = TimeDistributed(CNN_block(cnn_input_shape, print_fn=print_f))(preprocess)
feed_input = Bidirectional(LSTM(units=32))(timedistributed)
# dropout = Dropout(0.4)(lstm)
# feed_input = Dense(32)(feed_input)
#
# if batch_norm:
# feed_input = BatchNormalization()(feed_input)
# feed_input = Activation('relu')(feed_input)
# dropout = Dropout(0.2)(dense)
out_layer = Dense(len(label_set), activation='softmax')(feed_input)
model = Model(inputs=inputs, outputs=out_layer)
model.predict()
# CNN model
print_f('----------- CNN sequential model -----------------')
model.summary(print_fn=print_f)
print_f('----------- <CNN sequential model> -----------------')
return model
class CNN_RNN_Sequential(base_model.ClassiferKerasModel):
def __init__(self,
config_file,
job_dir,
checkpoint_path,
print_f=print,
sequence_length=15,
input_dim=64,
label_set=None,
batch_norm=False
):
super().__init__(config_file, job_dir, checkpoint_path, print_f, sequence_length, input_dim, label_set, batch_norm)
def compile(self, **kwargs):
self.load_config()
K.set_learning_phase(1)
self.model = CNN_RNN_Sequential_model(self.print_f, self.sequence_length, self.input_dim, self.label_set)
self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy', 'mae'], )
# def process_feed_dict(self, feed_dict):
# self.feed_dict = feed_dict
# self.X = np.vstack([self.feed_dict[k] for k in self.feed_dict])
# self.y = np.vstack([np.array([np.array(self.eye[i])] * len(self.feed_dict[k])) for i, k in enumerate(self.feed_dict)])
# print ('Shape x: {}'.format(self.X.shape))
# print ('Shape y: {}'.format(self.y.shape))
#
# def process_data(self, train_dirs, split=0.2):
#
# X, y, y_raws, label_set = utils.load_dataset(train_dirs, self.label_set, self.sequence_length)
#
# self.X, self.X_val, self.y, self.y_val = train_test_split(X, y, test_size=split, random_state=42, stratify=y)
#
# print ('Train Shape x: {}'.format(self.X.shape))
# print ('Train Shape y: {}'.format(self.y.shape))
# # print ('Eval Shape x: {}'.format(self.X_test.shape))
# # print ('Eval Shape y: {}'.format(self.y_test.shape))
#
# # TODO: feed_dict is data keyed by label
# def fit(self, train_dirs, batch_size=32, epochs=10, validation_split=0.1, callbacks=None, **kwargs):
# self.process_data(train_dirs, validation_split)
# self.model.fit(self.X, self.y,
# validation_data=[self.X_val, self.y_val],
# batch_size=batch_size, epochs=epochs,
# # validation_split=validation_split,
# callbacks=callbacks
# )
| StarcoderdataPython |
8065334 | # python game with pygame : Jumping stickman
import pygame
import sys
# step1 : set screen, fps
# step2 : show stickman, jump stickman
# step3 : show tree, move tree
pygame.init()
pygame.display.set_caption('Jumping stickman')
MAX_WIDTH = 800
MAX_HEIGHT =1000
def main():
# set screen, fps
screen = pygame.display.set_mode((MAX_WIDTH, MAX_HEIGHT))
fps = pygame.time.Clock()
# stickman
imgStick1 = pygame.image.load('images/stickman1.png')
imgStick2 = pygame.image.load('images/stickman2.png')
stick_height = imgStick1.get_size()[1]
stick_bottom = MAX_HEIGHT - stick_height
stick_x = 50
stick_y = stick_bottom
jump_top = 500
leg_swap = True
is_bottom = True
is_go_up = False
# tree
imgTree = pygame.image.load('images/tree.png')
tree_height = imgTree.get_size()[1]
tree_x = MAX_WIDTH
tree_y = MAX_HEIGHT - tree_height
while True:
screen.fill((255, 255, 255))
# event check
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if is_bottom:
is_go_up = True
is_bottom = False
# stickman move
if is_go_up:
stick_y -= 10.0
elif not is_go_up and not is_bottom:
stick_y += 10.0
# stickman top and bottom check
if is_go_up and stick_y <= jump_top:
is_go_up = False
if not is_bottom and stick_y >= stick_bottom:
is_bottom = True
stick_y = stick_bottom
# tree move
tree_x -= 12.0
if tree_x <= 0:
tree_x = MAX_WIDTH
# draw tree
screen.blit(imgTree, (tree_x, tree_y))
# draw stickman
if leg_swap:
screen.blit(imgStick1, (stick_x, stick_y))
leg_swap = False
else:
screen.blit(imgStick2, (stick_x, stick_y))
leg_swap = True
# update
pygame.display.update()
fps.tick(30)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4865174 | # MIT License
# Copyright (c) Facebook, Inc. and its affiliates.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import models
import argparse
import torch
import numpy as np
import time
from inception_score_pytorch.inception_score import inception_score as compute_IS
parser = argparse.ArgumentParser()
parser.add_argument('input')
args = parser.parse_args()
INPUT_PATH = args.input
CUDA = True
BATCH_SIZE = 1000
N_CHANNEL = 3
RESOLUTION = 32
NUM_SAMPLES = 50000
DEVICE = 'cpu'
checkpoint = torch.load(INPUT_PATH, map_location=DEVICE)
args = argparse.Namespace(**checkpoint['args'])
MODEL = args.model
N_LATENT = args.num_latent
N_FILTERS_G = args.num_filters_gen
BATCH_NORM_G = True
def get_inception_score():
all_samples = []
samples = torch.randn(NUM_SAMPLES, N_LATENT)
for i in xrange(0, NUM_SAMPLES, BATCH_SIZE):
batch_samples = samples[i:i+BATCH_SIZE]
if CUDA:
batch_samples = batch_samples.cuda(0)
all_samples.append(gen(batch_samples).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
return compute_IS(torch.from_numpy(all_samples), resize=True, cuda=True)
print "Init..."
if MODEL == "resnet":
gen = models.ResNet32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, BATCH_NORM_G)
elif MODEL == "dcgan":
gen = models.DCGAN32Generator(N_LATENT, N_CHANNEL, N_FILTERS_G, batchnorm=BATCH_NORM_G)
t_0 = time.time()
t = t_0
print "Eval..."
gen.load_state_dict(checkpoint['state_gen'])
if CUDA:
gen.cuda(0)
inception_score = get_inception_score()[0]
s = time.time()
print "Time: %.2f; done: IS" % (s - t)
t = s
for j, param in enumerate(gen.parameters()):
param.data = checkpoint['gen_param_avg'][j]
if CUDA:
gen = gen.cuda(0)
inception_score_avg = get_inception_score()[0]
s = time.time()
print "Time: %.2f; done: IS Avg" % (s - t)
t = s
for j, param in enumerate(gen.parameters()):
param.data = checkpoint['gen_param_ema'][j]
if CUDA:
gen = gen.cuda(0)
inception_score_ema = get_inception_score()[0]
s = time.time()
print "Time: %.2f; done: IS EMA" % (s - t)
print 'IS: %.2f, IS Avg: %.2f, IS EMA: %.2f' % (inception_score, inception_score_avg, inception_score_ema)
print "Total Time: %.2f" % (s - t_0)
| StarcoderdataPython |
9716532 | import abc
class BaseRestriction(abc.ABC):
"""
Base class for all restrictions.
"""
def __init__(self, penalty=0):
self.penalty = penalty
def __repr__(self):
cls = self.__class__.__name__
return f"{cls}(penalty={self.penalty})"
def __eq__(self, other):
self_cls = self.__class__.__name__
other_cls = other.__class__.__name__
names_match = self_cls == other_cls
penalties_match = self.penalty == other.penalty
return names_match and penalties_match
def __hash__(self):
return hash(self._key())
def _key(self):
cls = self.__class__.__name__
val = self.penalty
return (cls, val)
def change_penalty(self, new_penalty):
setattr(self, "penalty", new_penalty)
class WardRestriction(BaseRestriction):
"""
Base class for ward level restrictions. These restrictions are evaluated
for each bed, the total penalty is the sum across all bed within the ward.
Wards can be initialised with the desired restrictions and penalties:
`ward = Ward(
name=name,
rooms=rooms,
restrictions=[NameOfRestriction(penalty)]
)`
Ward restrictions can also be appended of modified later:
`ward.restrictions.append(NameOfRestriction(penalty))`
`ward.restrictions[0].change_penalty(new_panalty)`
"""
def evaluate(self, ward):
"""
Scan through beds in the ward and penalize
violations to the restriction
"""
return sum(self._evaluate_bed(bed) for bed in ward.beds)
@abc.abstractmethod
def _evaluate_bed(self, bed):
"""
Determine the penalty for the bed in question.
"""
class RoomRestriction(BaseRestriction):
"""
Base class for room level restrictions. These restrictions are evaluated
based on the state of the entire room/bed bay. For example,
adding a female patient to an all male room will incur the penalty once,
rather than multiple times for each occupied bed.
Rooms can be initialised with the desired restrictions and penalties:
`room = Room(
name=name,
beds=beds,
restrictions=[NameOfRestriction(penalty)]
)`
Room restrictions can also be appended of modified later:
`room.restrictions.append(NameOfRestriction(penalty))`
`room.restrictions[0].change_penalty(new_panalty)`
"""
def evaluate(self, room):
"""
Evaluate the room level restriction.
"""
return self._evaluate_room(room)
@abc.abstractclassmethod
def _evaluate_room(self, room):
"""
Determine the penalty for the room in question.
"""
class PatientRestriction(BaseRestriction):
"""
Base class for patient level restrictions. These restrictions evaluated
based on the bed/room/ward allocated to a patient. For example, if the
patient needed to be in a sideroom but was assigned to a shared
bedbay a penalty will incur.
Patients are automatically initalised with relevent restrictions and
their associated penalties based on key attributes. The penalties can be
modified:
`patient.restrictions[0].change_penalty(new_penalty)`
"""
def evaluate(self, patient):
"""
Evaluate the patient level restriction.
"""
return self._evaluate_patient(patient)
@abc.abstractclassmethod
def _evaluate_patient(self, patient):
"""
Determine the penalty for the bed in question.
"""
| StarcoderdataPython |
9766558 | import datetime
from collections import deque
from typing import List, Deque
import redis
from redisolar.dao.base import MetricDaoBase
from redisolar.dao.redis.base import RedisDaoBase
from redisolar.models import Measurement
from redisolar.models import MeterReading
from redisolar.models import MetricUnit
from redisolar.schema import MeasurementSchema
#l = ["a", "b"]
#print(f"list : {l}")
#site_id, value, unit, time = 1, 14, 'whU', datetime.datetime(2020, 10, 20, 18, 11, 12)
#m=Measurement(site_id, value, unit, time)
#print(f"{m}")
# testing list comprehension
print([letter for letter in "human" if letter >= 'h'])
# testing dict comprehension
fruits=dict()
fruits['apple'] = "green"
fruits.update({'orange': 'yellow', 'kiwi': 'green', 'guava': 'green', 'grape': 'red'})
print({fruit_type for (fruit_type,color) in fruits.items() if color == 'green'})
#print({fruit_type:color for (fruit_type, color) in fruits.items()})
# test set comprehension
students=set(('justin', 'chris', 'issac', 'kai', 'chris'))
print(students)
#print(type(students))
print({student for student in students if 'c' in student})
a = ("John", "Charles", "Mike")
b = ("Jenny", "Christy", "Monica")
x = zip(a, b)
print(f"{dict(x)} is of type {type(x)}")
t=set(["asb","bad"])
print(f"{type(t)} -> {t}") | StarcoderdataPython |
3573753 | <gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
from keras.optimizers import SGD
#model, X, Y_oh, x_ph=x_ph, y_ph=y_ph, sess=sess,
# model_path='./saved_model/lenet_dogscats',nb_epochs=NB_EPOCHS,
# nb_batches=NB_BATCHES, nb_rows=NB_ROWS, nb_cols=NB_COLS,
# nb_channels=NB_CHANNELS, nb_classes=NB_CLASSES)
def run(model, X, Y, optimizer=None, nb_epochs=30, nb_batches=128):
if optimizer==None:
optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# compile the model
print('Model compile...')
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# run the training
print('Model fit...')
print(X['train'].shape, Y['train'].shape)
history = model.fit(X['train'],
Y['train'],
epochs=nb_epochs,
batch_size=nb_batches,
validation_data=(X['valid'], Y['valid']))
# Evaluate the model on test data
score = model.evaluate(X['test'], Y['test'], batch_size=nb_batches)
print('Score : ', score)
return history, score
| StarcoderdataPython |
3376905 | from os.path import join
from torchvision import transforms
from datasets import DatasetFromFolder as data_loader
from datasets_real import DatasetFromFolder as data_loader_real
from torch.utils.data import DataLoader
def transform():
return transforms.Compose([
# ColorJitter(hue=0.3, brightness=0.3, saturation=0.3),
# RandomRotation(10, resample=PIL.Image.BILINEAR),
# transforms.Resize((512, 256), interpolation=2),
transforms.ToTensor(),
# transforms.Lambda(lambda x: x.mul(255))
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
def get_training_set(data_dir, data_augmentation):
train_set = data_loader(data_dir, data_augmentation, transform=transform())
return train_set
def get_training_set_real(data_dir, data_augmentation):
train_set = data_loader_real(data_dir, data_augmentation, transform=transform())
return train_set
| StarcoderdataPython |
120087 | <reponame>oil-rope/oil-and-rope
from ..viewsets.registration import ProfileViewSet, UserViewSet
from .routers import OilAndRopeDefaultRouter
router = OilAndRopeDefaultRouter()
router.register(prefix=r'user', viewset=UserViewSet, basename='user')
router.register(prefix=r'profile', viewset=ProfileViewSet, basename='profile')
urls = router.urls
| StarcoderdataPython |
8147391 | import logging
import traceback
import sys
from typing import List
from aiotinydb import AIOTinyDB, AIOJSONStorage
from aiotinydb.middleware import CachingMiddleware
from discord.ext import commands
from utils.singleton import singleton
@singleton
class Bot:
logger = logging.getLogger("bot")
def __init__(
self, *args,
allowed_server_ids: List[int] = None, log_channel_id: int = None, welcome_channel_id: int = None,
reports_user_channel_id: int = None, reports_admin_channel_id: int = None,
db_file: str = None,
**kwargs
):
self.bot = commands.Bot(*args, **kwargs)
self.allowed_server_ids = allowed_server_ids
self.log_channel_id = log_channel_id
self.welcome_channel_id = welcome_channel_id
self.reports_user_channel_id = reports_user_channel_id
self.reports_admin_channel_id = reports_admin_channel_id
self.db_file = db_file
def censored_words_db(self) -> AIOTinyDB:
return AIOTinyDB(self.db_file, storage=CachingMiddleware(AIOJSONStorage))
async def log(self, message: str) -> None:
await (self.bot.get_channel(self.log_channel_id)).send(message)
@property
def reports_enabled(self) -> bool:
return bool(self.reports_admin_channel_id) and bool(self.reports_user_channel_id)
| StarcoderdataPython |
9639099 | # -*- coding: utf-8 -*-
"""Templates for common tasks in ML and statistics."""
import numpy as np
from functools import partial
from .core import plot, bar, hist
import math
prob_hist = partial(
hist,
ylab="Observation Count (Valid)",
xlab="Probability Bucket",
bins=np.arange(0, 1.01, 0.05),
)
prob_hist.__doc__ = """Histogram for charting probabilities."""
pr_curve = partial(
plot,
labels=["Recall", "Precision"],
xlab="Threshold Cutoff for Positive Class",
ylab="Precision or Recall",
title="Choosing a Threshold",
markers=["g-", "g--", "b-", "b--", "r-", "r--"],
pct_ticks=(False, True),
grid=True,
)
pr_curve.__doc__ = """Dashed line chart for charting precision and recall curve."""
acc_vs_cov = partial(
plot,
xlab="Document Coverage",
ylab="Document Accuracy",
grid=True,
markers=["k--", "ko-", "ks-"],
xticks=np.arange(0, 1.05, 0.1),
markersize=8,
title="Accuracy vs. Document Coverage",
pct_ticks=(True, True),
)
pr_curve.__doc__ = """Dashed line chart for accuracy and coverage."""
expected = [0.55, 0.65, 0.75, 0.85, 0.95]
grp_labels = [f"{10*math.floor(10*i)}-{10+10*math.floor(10*i)}%" for i in expected]
calib = partial(
bar,
x=grp_labels,
xlab="Probability Bucket",
ylab="Accuracy",
ylim=[0.4, 1],
grid=True,
alpha=0.8,
pct_ticks=True,
)
pr_curve.__doc__ = """For assessing model calibration."""
| StarcoderdataPython |
328051 | import FWCore.ParameterSet.Config as cms
process = cms.Process("CONVERT")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.load("Configuration.Geometry.GeometryIdeal_cff")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.DTGeometryAlInputDB = cms.ESProducer("DTGeometryESModule",
appendToDataLabel = cms.string('idealForInputDB'),
applyAlignment = cms.bool(False),
alignmentsLabel = cms.string(''),
fromDDD = cms.bool(True)
)
process.CSCGeometryAlInputDB = cms.ESProducer("CSCGeometryESModule",
appendToDataLabel = cms.string('idealForInputDB'),
debugV = cms.untracked.bool(False),
useGangedStripsInME1a = cms.bool(False),
alignmentsLabel = cms.string(''),
useOnlyWiresInME1a = cms.bool(False),
useRealWireGeometry = cms.bool(True),
useCentreTIOffsets = cms.bool(False),
applyAlignment = cms.bool(False),
fromDDD = cms.bool(True),
fromDD4hep = cms.bool(False)
)
process.DTGeometryAlOutputXML = cms.ESProducer("DTGeometryESModule",
appendToDataLabel = cms.string('idealForOutputXML'),
applyAlignment = cms.bool(False),
alignmentsLabel = cms.string(''),
fromDDD = cms.bool(True)
)
process.CSCGeometryAlOutputXML = cms.ESProducer("CSCGeometryESModule",
appendToDataLabel = cms.string('idealForOutputXML'),
debugV = cms.untracked.bool(False),
useGangedStripsInME1a = cms.bool(False),
alignmentsLabel = cms.string(''),
useOnlyWiresInME1a = cms.bool(False),
useRealWireGeometry = cms.bool(True),
useCentreTIOffsets = cms.bool(False),
applyAlignment = cms.bool(False),
fromDDD = cms.bool(True),
fromDD4hep = cms.bool(False)
)
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDBSetup,
connect = cms.string("sqlite_file:NEW.db"),
toGet = cms.VPSet(
cms.PSet(record = cms.string("DTAlignmentRcd"), tag = cms.string("DTAlignmentRcd")),
cms.PSet(record = cms.string("DTAlignmentErrorExtendedRcd"), tag = cms.string("DTAlignmentErrorExtendedRcd")),
cms.PSet(record = cms.string("CSCAlignmentRcd"), tag = cms.string("CSCAlignmentRcd")),
cms.PSet(record = cms.string("CSCAlignmentErrorExtendedRcd"), tag = cms.string("CSCAlignmentErrorExtendedRcd"))))
process.inertGlobalPositionRcd = cms.ESSource("PoolDBESSource",
process.CondDBSetup,
connect = cms.string("sqlite_file:inertGlobalPositionRcd.db"),
toGet = cms.VPSet(cms.PSet(record = cms.string("GlobalPositionRcd"), tag = cms.string("inertGlobalPositionRcd"))))
process.MuonGeometryDBConverter = cms.EDAnalyzer("MuonGeometryDBConverter",
input = cms.string("db"),
dtLabel = cms.string(""),
cscLabel = cms.string(""),
shiftErr = cms.double(1000.),
angleErr = cms.double(6.28),
getAPEs = cms.bool(True),
output = cms.string("xml"),
outputXML = cms.PSet(
fileName = cms.string("REPLACEME.xml"),
relativeto = cms.string("ideal"),
survey = cms.bool(False),
rawIds = cms.bool(False),
eulerAngles = cms.bool(False),
precision = cms.int32(10),
suppressDTBarrel = cms.untracked.bool(True),
suppressDTWheels = cms.untracked.bool(True),
suppressDTStations = cms.untracked.bool(True),
suppressDTChambers = cms.untracked.bool(False),
suppressDTSuperLayers = cms.untracked.bool(False),
suppressDTLayers = cms.untracked.bool(False),
suppressCSCEndcaps = cms.untracked.bool(True),
suppressCSCStations = cms.untracked.bool(True),
suppressCSCRings = cms.untracked.bool(True),
suppressCSCChambers = cms.untracked.bool(False),
suppressCSCLayers = cms.untracked.bool(False)))
process.Path = cms.Path(process.MuonGeometryDBConverter)
| StarcoderdataPython |
3314201 | import re
from six import text_type
"""Translate strings to and from SOAP 1.2 XML name encoding
Implements rules for mapping application defined name to XML names
specified by the w3 SOAP working group for SOAP version 1.2 in
Appendix A of "SOAP Version 1.2 Part 2: Adjuncts", W3C Working Draft
17, December 2001, <http://www.w3.org/TR/soap12-part2/#namemap>
Also see <http://www.w3.org/2000/xp/Group/xmlp-issues>.
Author: <NAME> <<EMAIL>>
Date:: 2002-04-25
Version 0.9.0
"""
ident = "$Id$"
def _NCNameChar(x):
return x.isalpha() or x.isdigit() or x == "." or x == '-' or x == "_"
def _NCNameStartChar(x):
return x.isalpha() or x == "_"
def _toUnicodeHex(x):
hexval = hex(ord(x[0]))[2:]
hexlen = len(hexval)
# Make hexval have either 4 or 8 digits by prepending 0's
if (hexlen == 1):
hexval = "000" + hexval
elif (hexlen == 2):
hexval = "00" + hexval
elif (hexlen == 3):
hexval = "0" + hexval
elif (hexlen == 4):
hexval = "" + hexval
elif (hexlen == 5):
hexval = "000" + hexval
elif (hexlen == 6):
hexval = "00" + hexval
elif (hexlen == 7):
hexval = "0" + hexval
elif (hexlen == 8):
hexval = "" + hexval
else:
raise Exception("Illegal Value returned from hex(ord(x))")
return "_x" + hexval + "_"
def _fromUnicodeHex(x):
return eval(r'u"\u' + x[2:-1] + '"')
def toXMLname(string):
"""Convert string to a XML name."""
if string.find(':') != -1:
(prefix, localname) = string.split(':', 1)
else:
prefix = None
localname = string
T = text_type(localname)
N = len(localname)
X = []
for i in range(N):
if i < N - 1 and T[i] == u'_' and T[i + 1] == u'x':
X.append(u'_x005F_')
elif i == 0 and N >= 3 and \
(T[0] == u'x' or T[0] == u'X') and \
(T[1] == u'm' or T[1] == u'M') and \
(T[2] == u'l' or T[2] == u'L'):
X.append(u'_xFFFF_' + T[0])
elif (not _NCNameChar(T[i])) or (i == 0 and not _NCNameStartChar(T[i])):
X.append(_toUnicodeHex(T[i]))
else:
X.append(T[i])
if prefix:
return "%s:%s" % (prefix, u''.join(X))
return u''.join(X)
def fromXMLname(string):
"""Convert XML name to unicode string."""
retval = re.sub(r'_xFFFF_', '', string)
def fun(matchobj):
return _fromUnicodeHex(matchobj.group(0))
retval = re.sub(r'_x[0-9A-Fa-f]{4}_', fun, retval)
return retval
| StarcoderdataPython |
6566424 | #!/usr/bin/env python3
import json
from mock import patch
from unittest import TestCase
from pytest import fixture
from distribution.domain.building import Building
from distribution.rest import rest
from distribution.service import locationservice
from distribution.foundation.exceptions import DomainIdError
@fixture
def domain_buildings():
return [
Building({"id": 1,"address": "Karlsplatz",
"xcoord": 16,"ycoord": 48,
"hive":{"id": 11,"name": "Karlsplatz",
"demand": -1, "free": 1}}),
Building({"id": 2,"address": "Westbahnhof",
"xcoord": 32,"ycoord": 11,
"hive":{"id": 12,"name": "Westbahnhof",
"demand": -1, "free": 3}}),
Building({"id": 3,"address": "Stephansplatz",
"xcoord": 2,"ycoord": 21,
"hive":{"id": 13,"name": "Stephansplatz",
"demand": -1, "free": 8}})]
@fixture
def json_reachable():
return [{ "id": 1,"start": {"id": 11},
"end": {"id": 13},"distance": 3000},
{"id": 2,"start": {"id": 12},
"end": {"id": 13},"distance": 2500},
{"id": 3,"start": {"id": 12},
"end": {"id": 11},"distance": 2500}]
@patch('distribution.rest.rest.get_reachable_buildings')
def test_get_average_distance_to(mock_reachable):
reachable_buildings = json.dumps(json_reachable())
mock_reachable.return_value = json.loads(reachable_buildings)
reachable_buildings = locationservice.get_average_distance_to(13)
expected_hives = 5500 / 2
assert reachable_buildings == expected_hives
@patch('distribution.rest.rest.get_reachable_buildings')
def test_get_distance_between(mock_reachable):
reachable_buildings = json.dumps(json_reachable())
mock_reachable.return_value = json.loads(reachable_buildings)
distance = locationservice.get_distance_between(12, 13)
expected_distance = 2500
assert distance == expected_distance
@patch('distribution.rest.rest.get_reachable_buildings')
def test_get_distance_between_return_error_code(mock_reachable):
reachable_buildings = json.dumps(json_reachable())
mock_reachable.return_value = json.loads(reachable_buildings)
with TestCase.assertRaises(TestCase, DomainIdError) as die:
locationservice.get_distance_between(12, 14)
def test_get_x_values_descending_false():
x = locationservice.get_x_values(domain_buildings())
expected_x = [ 2, 16, 32 ]
assert x == expected_x
def test_get_x_descending_true():
x = locationservice.get_x_values(domain_buildings(), True)
expected_x = [ 32, 16, 2 ]
assert x == expected_x
def test_get_y_descending_false():
y = locationservice.get_y_values(domain_buildings())
expected_y = [ 11, 21, 48 ]
assert y == expected_y
def test_get_y_descending_true():
y = locationservice.get_y_values(domain_buildings(), True)
expected_y = [ 48, 21, 11 ]
assert y == expected_y
def test_get_hives_by_x():
hives = locationservice.get_buildings_by_x(2, domain_buildings())
expected_hives = [ domain_buildings()[2] ]
assert hives == expected_hives
def test_get_hives_by_y():
hives = locationservice.get_buildings_by_y(11,domain_buildings())
expected_hives = [ domain_buildings()[1] ]
assert hives == expected_hives
def test_get_upper_x():
x = locationservice.get_upper_x(domain_buildings())
expected_x = 32
assert x == expected_x
def test_get_upper_y():
y = locationservice.get_upper_y(domain_buildings())
expected_y = 48
assert y == expected_y
def test_get_lower_x():
x = locationservice.get_lower_x(domain_buildings())
expected_x = 2
assert x == expected_x
def test_get_lower_y():
y = locationservice.get_lower_y(domain_buildings())
expected_y = 11
assert y == expected_y | StarcoderdataPython |
5056339 | # -*-coding:utf-8-*-
'''
使用post方法对有道进行在线翻译
通过抓取有道词典的在线翻译 http://fanyi.youdao.com/
抓取有道词典的请求头信息,来进行POST提交,进行在线翻译
有道的请求头
POST http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule HTTP/1.1
Host: fanyi.youdao.com
Proxy-Connection: keep-alive
Content-Length: 209
Accept: application/json, text/javascript, */*; q=0.01
Origin: http://fanyi.youdao.com
X-Requested-With: XMLHttpRequest
User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36
Content-Type: application/x-www-form-urlencoded; charset=UTF-8
Referer: http://fanyi.youdao.com/
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.8
Cookie: _ntes_nnid=1137c1e9883cff16999be4aab1e1d797,1498894433080; OUTFOX_SEARCH_USER_ID_NCOO=1914719784.6467974; OUTFOX_SEARCH_USER_ID=1284781035@192.168.127.12; JSESSIONID=aaay4thmnwBSb5l81rp4v; ___rl__test__cookies=1503494985244
i=I+love++Python&from=AUTO&to=AUTO&smartresult=dict&client=fanyideskweb&salt=1503494985247&sign=ceadbb605aceeb14ec60bc6e717686ec&doctype=json&version=2.1&keyfrom=fanyi.web&action=FY_BY_REALTIME&typoResult=true
=====================================
WebForms:
i I love Python
from AUTO
to AUTO
smartresult dict
client fanyideskweb
salt 1503494985247
sign ceadbb605aceeb14ec60bc6e717686ec
doctype json
version 2.1
keyfrom fanyi.web
action FY_BY_REALTIME
typoResult true
'''
import urllib
import urllib2
# POST请求的目标URL
url = "http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=null"
headers={"User-Agent": "Mozilla...."}
# 要翻译的信息:
# txt="i love python"
txt="性"
# 里面使用的是用Fidder 抓取的WebForms信息
formdata = {
"type":"AUTO",
"i":txt,
"doctype":"json",
"xmlVersion":"1.8",
"keyfrom":"fanyi.web",
"ue":"UTF-8",
"action":"FY_BY_ENTER",
"typoResult":"true"
}
data = urllib.urlencode(formdata)
request = urllib2.Request(url, data = data, headers = headers)
response = urllib2.urlopen(request)
print response.read()
'''
发送POST请求时,需要特别注意headers的一些属性:
Content-Length: 144: 是指发送的表单数据长度为144,也就是字符个数是144个。
X-Requested-With: XMLHttpRequest :表示Ajax异步请求。
Content-Type: application/x-www-form-urlencoded : 表示浏览器提交 Web 表单时使用,表单数据会按照 name1=value1&name2=value2 键值对形式进行编码。
''' | StarcoderdataPython |
98763 | <gh_stars>1-10
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.effects.UsesEffectNode
from pandac.PandaModules import *
class UsesEffectNode(NodePath):
__module__ = __name__
def __init__(self, offset=3.0):
self.billboardNode = self.attachNewNode('billboardNode')
self.billboardNode.node().setEffect(BillboardEffect.make(Vec3(0, 0, 1), 0, 1, offset, NodePath(), Point3(0, 0, 0)))
self.effectNode = self.billboardNode.attachNewNode('effectNode')
def getEffectParent(self):
return self.effectNode
def resetEffectParent(self):
self.billboardNode.reparentTo(self)
def delete(self):
self.effectNode.removeNode()
self.billboardNode.removeNode()
del self.effectNode
del self.billboardNode | StarcoderdataPython |
67543 | <gh_stars>0
# Simple Python 3.x Sample for receiving CAN Frames via IP / UDP to PEAK-System Gateway
# (c) 2022 PEAK-System technik GmbH
# This is a SAMPLE - it is NOT optimzed - it is for training - we are no Python Gurus...
# Author: U.W.
# www.peak-system.com
from ctypes.wintypes import BYTE
import socket
from turtle import end_fill
# change for your need
localIP = "0.0.0.0"
localPort = 58204
# buffer for payload in IP Frame
bufferSize = 1024
DLC_to_LEN = [0,1,2,3,4,5,6,7,8,12,16,20,24,32,48,64]
##########################################################################################
# Simple function to decode first CAN Frame in package - need to be extend for your need #
# not included until now is the support of multple CAN Frames in one package #
# (check package size in Byte 1/2) #
##########################################################################################
def Decode_CAN_IP_Data( message ):
# Check type of CAN Frame - here done by directly check he single byte of IP Frame
if message[3] == 0x80 :
print ("CAN 2.0a/b Frame ", end='' )
elif message[3] == 0x81 :
print ("CAN 2.0a/b Frame with CRC ", end='' )
elif message[3] == 0x90 :
print ("CAN FD Frame ", end='' )
elif message[3] == 0x91 :
print ("CAN FD Frame with CRC ", end='' )
else:
print ("no CAN Frame" )
return
# Get the CAN Msg Flags (Ext. dat Lenght , Bit Rate Switch, Error State Indicator, Ext. ID )
# here done by converting the 2 bytes to a int Value...Big Endian, unsigned
CAN_MSG_FLAG = int.from_bytes(message[22:24],byteorder='big', signed=False) # Byte 22 and 23
if CAN_MSG_FLAG & 0x40 :
print ("Error State Indicator set ", end='')
if CAN_MSG_FLAG & 0x20 :
print ("Bit Rate Switch aktive ", end='')
if CAN_MSG_FLAG & 0x10 :
print ("Frame use Extended Data Lenght ", end='')
# Get the CAN-ID Field (inkl. the MSG Type Info)
CAN_ID_NUM = int.from_bytes(message[24:28],byteorder='big', signed=False) # Byte 24 to 27
# Get Bit 29 to 31 only --> RTR / EXT
CAN_MSG_Type = CAN_ID_NUM & 0xC0000000 # Mask it
CAN_MSG_Type = CAN_MSG_Type >> 24 # Shift it
if CAN_MSG_Type & 0x80 :
print ("Ext 29 Bit")
else:
print ("Std 11 Bit")
if CAN_MSG_Type & 0x40 :
print ("RTR")
# Mask Out the Bit 29 to 31 --> RTR / EXT
CAN_ID_NUM = CAN_ID_NUM & 0x3FFFFFFF
print("CAN ID: " + "0x" + '{:x}'.format(CAN_ID_NUM) + " ", end='')
# Get the DLC
DLC = message[21] # place of the DLC Information
print("CAN DLC: " + format(DLC) +" ", end='')
# using CAN FD DLC is NOT Lenght - convert it first - if less / eq 8 - keep it as it is...!
LEN = DLC_to_LEN[DLC]
if LEN>8: # only if we use CAN-FD and receive a DLC >8 teh D>LC is not the Length
print("CAN Data Byte counter: " + format(LEN), end='')
# Loop to all available Data and print (max. 8 DB in a row)
i=1
print("\nData Bytes: ")
while (i <= LEN):
print("DB[" + '{:02d}'.format(i-1) + "]:" + "0x" + '{:02X}'.format(message[27+i]) + " ", end='' ) #27+1 --> Place of DB0
if (i % 8) == 0 : # limit to max 8 DB in one row
print("")
i = i + 1
print("\n------------------------------------------------------------------------------\n")
##########
# main...
##########
# Create a datagram socket
UDPServerSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
# Bind to address and ip
UDPServerSocket.bind((localIP, localPort))
print("UDP server up and listening")
# Listen for incoming datagrams
while(True):
# get the Data Package
bytesAddressPair = UDPServerSocket.recvfrom(bufferSize)
message = bytesAddressPair[0]
address = bytesAddressPair[1]
clientMsg = "Data from Gateway:{}".format(message)
clientIP = "Gateway IP Address:{}".format(address[0])
clientPort = "Port:{}".format(address[1])
print(clientIP + " " + clientPort)
# call the Package decoder.....
Decode_CAN_IP_Data(message)
# ...do what you want / need
# print(clientMsg)
# or sending a reply to client
# UDPServerSocket.sendto(bytesToSend, address)
| StarcoderdataPython |
6702691 | # This module implements a handler for serial_for_url("no_op://").
from panoptes.utils.serial_handlers import NoOpSerial
# Export it as Serial so that it will be picked up by PySerial's serial_for_url.
Serial = NoOpSerial
| StarcoderdataPython |
4977598 | <gh_stars>0
from rest_framework.decorators import api_view
from rest_framework.response import Response
from core.authors.models import Author
from django.contrib.auth import authenticate
QUERY = "login"
def handle_request(request):
success = True
message = "You have been signed in successfully"
username = request.data["username"]
password = request.data["password"]
userId = None
if (request.data["query"] != QUERY):
success = False
message = "The query value was not correct"
else:
user = authenticate(username=username, password=password)
if user:
author = Author.objects.get(user=user)
if (author.approved):
userId = author.id
else:
success = False
message = "This account has not been approved by an administrator yet"
else:
success = False
message = "The provided credentials were incorrect"
return (success, message, userId)
@api_view(['POST'])
def login(request):
try:
success, message, userId = handle_request(request)
except:
return Response({
"query": QUERY,
"success": False,
"message": "The body did not contain all of the required parameters"
}, status=400)
response = Response({
"query": QUERY,
"success": success,
"message": message,
"userId": str(userId)
})
if success:
response.status_code = 200
else:
response.status_code = 400
return response | StarcoderdataPython |
1797437 | <reponame>Db2-DTE-POC/db2shift
#
# Set up Jupyter MAGIC commands "sql".
# %sql will return results from a DB2 select statement or execute a DB2 command
#
# IBM 2019: <NAME>
# Version 2019-10-03
#
from __future__ import print_function
from IPython.display import HTML as pHTML, Image as pImage, display as pdisplay, Javascript as Javascript
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic, line_cell_magic, needs_local_scope)
import ibm_db
import pandas
import ibm_db_dbi
import json
import getpass
import os
import pickle
import time
import sys
import re
import warnings
warnings.filterwarnings("ignore")
# Python Hack for Input between 2 and 3
try:
input = raw_input
except NameError:
pass
_settings = {
"maxrows" : 10,
"maxgrid" : 5,
"runtime" : 1,
"display" : "PANDAS",
"database" : "",
"hostname" : "localhost",
"port" : "50000",
"protocol" : "TCPIP",
"uid" : "DB2INST1",
"pwd" : "password",
"ssl" : ""
}
_environment = {
"jupyter" : True,
"qgrid" : True
}
_display = {
'fullWidthRows': True,
'syncColumnCellResize': True,
'forceFitColumns': False,
'defaultColumnWidth': 150,
'rowHeight': 28,
'enableColumnReorder': False,
'enableTextSelectionOnCells': True,
'editable': False,
'autoEdit': False,
'explicitInitialization': True,
'maxVisibleRows': 5,
'minVisibleRows': 5,
'sortable': True,
'filterable': False,
'highlightSelectedCell': False,
'highlightSelectedRow': True
}
# Connection settings for statements
_connected = False
_hdbc = None
_hdbi = None
_stmt = []
_stmtID = []
_stmtSQL = []
_vars = {}
_macros = {}
_flags = []
_debug = False
# Db2 Error Messages and Codes
sqlcode = 0
sqlstate = "0"
sqlerror = ""
sqlelapsed = 0
# Check to see if QGrid is installed
try:
import qgrid
qgrid.set_defaults(grid_options=_display)
except:
_environment['qgrid'] = False
# Check if we are running in iPython or Jupyter
try:
if (get_ipython().config == {}):
_environment['jupyter'] = False
_environment['qgrid'] = False
else:
_environment['jupyter'] = True
except:
_environment['jupyter'] = False
_environment['qgrid'] = False
def setOptions(inSQL):
global _settings, _display
cParms = inSQL.split()
cnt = 0
while cnt < len(cParms):
if cParms[cnt].upper() == 'MAXROWS':
if cnt+1 < len(cParms):
try:
_settings["maxrows"] = int(cParms[cnt+1])
except Exception as err:
errormsg("Invalid MAXROWS value provided.")
pass
cnt = cnt + 1
else:
errormsg("No maximum rows specified for the MAXROWS option.")
return
elif cParms[cnt].upper() == 'MAXGRID':
if cnt+1 < len(cParms):
try:
maxgrid = int(cParms[cnt+1])
if (maxgrid <= 5): # Minimum window size is 5
maxgrid = 5
_display["maxVisibleRows"] = int(cParms[cnt+1])
try:
import qgrid
qgrid.set_defaults(grid_options=_display)
except:
_environment['qgrid'] = False
except Exception as err:
errormsg("Invalid MAXGRID value provided.")
pass
cnt = cnt + 1
else:
errormsg("No maximum rows specified for the MAXROWS option.")
return
elif cParms[cnt].upper() == 'RUNTIME':
if cnt+1 < len(cParms):
try:
_settings["runtime"] = int(cParms[cnt+1])
except Exception as err:
errormsg("Invalid RUNTIME value provided.")
pass
cnt = cnt + 1
else:
errormsg("No value provided for the RUNTIME option.")
return
elif cParms[cnt].upper() == 'DISPLAY':
if cnt+1 < len(cParms):
if (cParms[cnt+1].upper() == 'GRID'):
_settings["display"] = 'GRID'
elif (cParms[cnt+1].upper() == 'PANDAS'):
_settings["display"] = 'PANDAS'
else:
errormsg("Invalid DISPLAY value provided.")
cnt = cnt + 1
else:
errormsg("No value provided for the DISPLAY option.")
return
elif (cParms[cnt].upper() == 'LIST'):
print("(MAXROWS) Maximum number of rows displayed: " + str(_settings["maxrows"]))
print("(MAXGRID) Maximum grid display size: " + str(_settings["maxgrid"]))
print("(RUNTIME) How many seconds to a run a statement for performance testing: " + str(_settings["runtime"]))
print("(DISPLAY) Use PANDAS or GRID display format for output: " + _settings["display"])
return
else:
cnt = cnt + 1
save_settings()
def sqlhelp():
global _environment
if (_environment["jupyter"] == True):
sd = '<td style="text-align:left;">'
ed1 = '</td>'
ed2 = '</td>'
sh = '<th style="text-align:left;">'
eh1 = '</th>'
eh2 = '</th>'
sr = '<tr>'
er = '</tr>'
helpSQL = """
<h3>SQL Options</h3>
<p>The following options are available as part of a SQL statement. The options are always preceded with a
minus sign (i.e. -q).
<table>
{sr}
{sh}Option{eh1}{sh}Description{eh2}
{er}
{sr}
{sd}a, all{ed1}{sd}Return all rows in answer set and do not limit display{ed2}
{er}
{sr}
{sd}d{ed1}{sd}Change SQL delimiter to "@" from ";"{ed2}
{er}
{sr}
{sd}e, echo{ed1}{sd}Echo the SQL command that was generated after macro and variable substituion.{ed2}
{er}
{sr}
{sd}h, help{ed1}{sd}Display %sql help information.{ed2}
{er}
{sr}
{sd}j{ed1}{sd}Create a pretty JSON representation. Only the first column is formatted{ed2}
{er}
{sr}
{sd}json{ed1}{sd}Retrieve the result set as a JSON record{ed2}
{er}
{sr}
{sd}q, quiet{ed1}{sd}Quiet results - no answer set or messages returned from the function{ed2}
{er}
{sr}
{sd}r, array{ed1}{sd}Return the result set as an array of values{ed2}
{er}
{sr}
{sd}sampledata{ed1}{sd}Create and load the EMPLOYEE and DEPARTMENT tables{ed2}
{er}
{sr}
{sd}t,time{ed1}{sd}Time the following SQL statement and return the number of times it executes in 1 second{ed2}
{er}
{sr}
{sd}grid{ed1}{sd}Display the results in a scrollable grid{ed2}
{er}
</table>
"""
else:
helpSQL = """
SQL Options
The following options are available as part of a SQL statement. Options are always
preceded with a minus sign (i.e. -q).
Option Description
a, all Return all rows in answer set and do not limit display
d Change SQL delimiter to "@" from ";"
e, echo Echo the SQL command that was generated after substitution
h, help Display %sql help information
j Create a pretty JSON representation. Only the first column is formatted
json Retrieve the result set as a JSON record
q, quiet Quiet results - no answer set or messages returned from the function
r, array Return the result set as an array of values
t,time Time the SQL statement and return the execution count per second
grid Display the results in a scrollable grid
"""
helpSQL = helpSQL.format(**locals())
if (_environment["jupyter"] == True):
pdisplay(pHTML(helpSQL))
else:
print(helpSQL)
def connected_help():
sd = '<td style="text-align:left;">'
ed = '</td>'
sh = '<th style="text-align:left;">'
eh = '</th>'
sr = '<tr>'
er = '</tr>'
if (_environment['jupyter'] == True):
helpConnect = """
<h3>Connecting to Db2</h3>
<p>The CONNECT command has the following format:
<p>
<pre>
%sql CONNECT TO <database> USER <userid> USING <password|?> HOST <ip address> PORT <port number> <SSL>
%sql CONNECT CREDENTIALS <varname>
%sql CONNECT CLOSE
%sql CONNECT RESET
%sql CONNECT PROMPT - use this to be prompted for values
</pre>
<p>
If you use a "?" for the password field, the system will prompt you for a password. This avoids typing the
password as clear text on the screen. If a connection is not successful, the system will print the error
message associated with the connect request.
<p>
The <b>CREDENTIALS</b> option allows you to use credentials that are supplied by Db2 on Cloud instances.
The credentials can be supplied as a variable and if successful, the variable will be saved to disk
for future use. If you create another notebook and use the identical syntax, if the variable
is not defined, the contents on disk will be used as the credentials. You should assign the
credentials to a variable that represents the database (or schema) that you are communicating with.
Using familiar names makes it easier to remember the credentials when connecting.
<p>
<b>CONNECT CLOSE</b> will close the current connection, but will not reset the database parameters. This means that
if you issue the CONNECT command again, the system should be able to reconnect you to the database.
<p>
<b>CONNECT RESET</b> will close the current connection and remove any information on the connection. You will need
to issue a new CONNECT statement with all of the connection information.
<p>
If the connection is successful, the parameters are saved on your system and will be used the next time you
run an SQL statement, or when you issue the %sql CONNECT command with no parameters.
<p>If you issue CONNECT RESET, all of the current values will be deleted and you will need to
issue a new CONNECT statement.
<p>A CONNECT command without any parameters will attempt to re-connect to the previous database you
were using. If the connection could not be established, the program to prompt you for
the values. To cancel the connection attempt, enter a blank value for any of the values. The connection
panel will request the following values in order to connect to Db2:
<table>
{sr}
{sh}Setting{eh}
{sh}Description{eh}
{er}
{sr}
{sd}Database{ed}{sd}Database name you want to connect to.{ed}
{er}
{sr}
{sd}Hostname{ed}
{sd}Use localhost if Db2 is running on your own machine, but this can be an IP address or host name.
{er}
{sr}
{sd}PORT{ed}
{sd}The port to use for connecting to Db2. This is usually 50000.{ed}
{er}
{sr}
{sd}SSL{ed}
{sd}If you are connecting to a secure port (50001) with SSL then you must include this keyword in the connect string.{ed}
{sr}
{sd}Userid{ed}
{sd}The userid to use when connecting (usually DB2INST1){ed}
{er}
{sr}
{sd}Password{ed}
{sd}No password is provided so you have to enter a value{ed}
{er}
</table>
"""
else:
helpConnect = """\
Connecting to Db2
The CONNECT command has the following format:
%sql CONNECT TO database USER userid USING password | ?
HOST ip address PORT port number SSL
%sql CONNECT CREDENTIALS varname
%sql CONNECT CLOSE
%sql CONNECT RESET
If you use a "?" for the password field, the system will prompt you for a password.
This avoids typing the password as clear text on the screen. If a connection is
not successful, the system will print the error message associated with the connect
request.
The CREDENTIALS option allows you to use credentials that are supplied by Db2 on
Cloud instances. The credentials can be supplied as a variable and if successful,
the variable will be saved to disk for future use. If you create another notebook
and use the identical syntax, if the variable is not defined, the contents on disk
will be used as the credentials. You should assign the credentials to a variable
that represents the database (or schema) that you are communicating with. Using
familiar names makes it easier to remember the credentials when connecting.
CONNECT CLOSE will close the current connection, but will not reset the database
parameters. This means that if you issue the CONNECT command again, the system
should be able to reconnect you to the database.
CONNECT RESET will close the current connection and remove any information on the
connection. You will need to issue a new CONNECT statement with all of the connection
information.
If the connection is successful, the parameters are saved on your system and will be
used the next time you run an SQL statement, or when you issue the %sql CONNECT
command with no parameters. If you issue CONNECT RESET, all of the current values
will be deleted and you will need to issue a new CONNECT statement.
A CONNECT command without any parameters will attempt to re-connect to the previous
database you were using. If the connection could not be established, the program to
prompt you for the values. To cancel the connection attempt, enter a blank value for
any of the values. The connection panel will request the following values in order
to connect to Db2:
Setting Description
Database Database name you want to connect to
Hostname Use localhost if Db2 is running on your own machine, but this can
be an IP address or host name.
PORT The port to use for connecting to Db2. This is usually 50000.
Userid The userid to use when connecting (usually DB2INST1)
Password <PASSWORD> provided so you have to enter a value
SSL Include this keyword to indicate you are connecting via SSL (usually port 50001)
"""
helpConnect = helpConnect.format(**locals())
if (_environment['jupyter'] == True):
pdisplay(pHTML(helpConnect))
else:
print(helpConnect)
# Prompt for Connection information
def connected_prompt():
global _settings
_database = ''
_hostname = ''
_port = ''
_uid = ''
_pwd = ''
_ssl = ''
print("Enter the database connection details (Any empty value will cancel the connection)")
_database = input("Enter the database name: ");
if (_database.strip() == ""): return False
_hostname = input("Enter the HOST IP address or symbolic name: ");
if (_hostname.strip() == ""): return False
_port = input("Enter the PORT number: ");
if (_port.strip() == ""): return False
_ssl = input("Is this a secure (SSL) port (y or n)");
if (_ssl.strip() == ""): return False
if (_ssl == "n"):
_ssl = ""
else:
_ssl = "Security=SSL;"
_uid = input("Enter Userid on the DB2 system: ").upper();
if (_uid.strip() == ""): return False
_pwd = getpass.getpass("Password [password]: ");
if (_pwd.strip() == ""): return False
_settings["database"] = _database.strip()
_settings["hostname"] = _hostname.strip()
_settings["port"] = _port.strip()
_settings["uid"] = _uid.strip()
_settings["pwd"] = _pwd.strip()
_settings["ssl"] = _ssl.strip()
_settings["maxrows"] = 10
_settings["maxgrid"] = 5
_settings["runtime"] = 1
return True
# Split port and IP addresses
def split_string(in_port,splitter=":"):
# Split input into an IP address and Port number
global _settings
checkports = in_port.split(splitter)
ip = checkports[0]
if (len(checkports) > 1):
port = checkports[1]
else:
port = None
return ip, port
# Parse the CONNECT statement and execute if possible
def parseConnect(inSQL,local_ns):
global _settings, _connected
_connected = False
cParms = inSQL.split()
cnt = 0
_settings["ssl"] = ""
while cnt < len(cParms):
if cParms[cnt].upper() == 'TO':
if cnt+1 < len(cParms):
_settings["database"] = cParms[cnt+1].upper()
cnt = cnt + 1
else:
errormsg("No database specified in the CONNECT statement")
return
elif cParms[cnt].upper() == "SSL":
_settings["ssl"] = "Security=SSL;"
cnt = cnt + 1
elif cParms[cnt].upper() == 'CREDENTIALS':
if cnt+1 < len(cParms):
credentials = cParms[cnt+1]
tempid = eval(credentials,local_ns)
if (isinstance(tempid,dict) == False):
errormsg("The CREDENTIALS variable (" + credentials + ") does not contain a valid Python dictionary (JSON object)")
return
if (tempid == None):
fname = credentials + ".pickle"
try:
with open(fname,'rb') as f:
_id = pickle.load(f)
except:
errormsg("Unable to find credential variable or file.")
return
else:
_id = tempid
try:
_settings["database"] = _id["db"]
_settings["hostname"] = _id["hostname"]
_settings["port"] = _id["port"]
_settings["uid"] = _id["username"]
_settings["pwd"] = _id["password"]
try:
fname = credentials + ".pickle"
with open(fname,'wb') as f:
pickle.dump(_id,f)
except:
errormsg("Failed trying to write Db2 Credentials.")
return
except:
errormsg("Credentials file is missing information. db/hostname/port/username/password required.")
return
else:
errormsg("No Credentials name supplied")
return
cnt = cnt + 1
elif cParms[cnt].upper() == 'USER':
if cnt+1 < len(cParms):
_settings["uid"] = cParms[cnt+1].upper()
cnt = cnt + 1
else:
errormsg("No userid specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'USING':
if cnt+1 < len(cParms):
_settings["pwd"] = cParms[cnt+1]
if (_settings["pwd"] == '?'):
_settings["pwd"] = getpass.getpass("Password [password]: ") or "password"
cnt = cnt + 1
else:
errormsg("No password specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'HOST':
if cnt+1 < len(cParms):
hostport = cParms[cnt+1].upper()
ip, port = split_string(hostport)
if (port == None): _settings["port"] = "50000"
_settings["hostname"] = ip
cnt = cnt + 1
else:
errormsg("No hostname specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'PORT':
if cnt+1 < len(cParms):
_settings["port"] = cParms[cnt+1].upper()
cnt = cnt + 1
else:
errormsg("No port specified in the CONNECT statement")
return
elif cParms[cnt].upper() == 'PROMPT':
if (connected_prompt() == False):
print("Connection canceled.")
return
else:
cnt = cnt + 1
elif cParms[cnt].upper() in ('CLOSE','RESET') :
try:
result = ibm_db.close(_hdbc)
_hdbi.close()
except:
pass
success("Connection closed.")
if cParms[cnt].upper() == 'RESET':
_settings["database"] = ''
return
else:
cnt = cnt + 1
_ = db2_doConnect()
def db2_doConnect():
global _hdbc, _hdbi, _connected, _runtime
global _settings
if _connected == False:
if len(_settings["database"]) == 0:
return False
dsn = (
"DRIVER={{IBM DB2 ODBC DRIVER}};"
"DATABASE={0};"
"HOSTNAME={1};"
"PORT={2};"
"PROTOCOL=TCPIP;"
"UID={3};"
"PWD={4};{5}").format(_settings["database"],
_settings["hostname"],
_settings["port"],
_settings["uid"],
_settings["pwd"],
_settings["ssl"])
# Get a database handle (hdbc) and a statement handle (hstmt) for subsequent access to DB2
try:
_hdbc = ibm_db.connect(dsn, "", "")
except Exception as err:
db2_error(False,True) # errormsg(str(err))
_connected = False
_settings["database"] = ''
return False
try:
_hdbi = ibm_db_dbi.Connection(_hdbc)
except Exception as err:
db2_error(False,True) # errormsg(str(err))
_connected = False
_settings["database"] = ''
return False
_connected = True
# Save the values for future use
save_settings()
success("Connection successful.")
return True
def load_settings():
# This routine will load the settings from the previous session if they exist
global _settings
fname = "db2connect.pickle"
try:
with open(fname,'rb') as f:
_settings = pickle.load(f)
# Reset runtime to 1 since it would be unexpected to keep the same value between connections
_settings["runtime"] = 1
_settings["maxgrid"] = 5
except:
pass
return
def save_settings():
# This routine will save the current settings if they exist
global _settings
fname = "db2connect.pickle"
try:
with open(fname,'wb') as f:
pickle.dump(_settings,f)
except:
errormsg("Failed trying to write Db2 Configuration Information.")
return
try:
with open(fname,'wb') as f:
pickle.dump(_settings,f)
except:
errormsg("Failed trying to write Db2 Configuration Information.")
return
def db2_error(quiet,connect=False):
global sqlerror, sqlcode, sqlstate, _environment
try:
if (connect == False):
errmsg = ibm_db.stmt_errormsg().replace('\r',' ')
errmsg = errmsg[errmsg.rfind("]")+1:].strip()
else:
errmsg = ibm_db.conn_errormsg().replace('\r',' ')
errmsg = errmsg[errmsg.rfind("]")+1:].strip()
sqlerror = errmsg
msg_start = errmsg.find("SQLSTATE=")
if (msg_start != -1):
msg_end = errmsg.find(" ",msg_start)
if (msg_end == -1):
msg_end = len(errmsg)
sqlstate = errmsg[msg_start+9:msg_end]
else:
sqlstate = "0"
msg_start = errmsg.find("SQLCODE=")
if (msg_start != -1):
msg_end = errmsg.find(" ",msg_start)
if (msg_end == -1):
msg_end = len(errmsg)
sqlcode = errmsg[msg_start+8:msg_end]
try:
sqlcode = int(sqlcode)
except:
pass
else:
sqlcode = 0
except:
errmsg = "Unknown error."
sqlcode = -99999
sqlstate = "-99999"
sqlerror = errmsg
return
msg_start = errmsg.find("SQLSTATE=")
if (msg_start != -1):
msg_end = errmsg.find(" ",msg_start)
if (msg_end == -1):
msg_end = len(errmsg)
sqlstate = errmsg[msg_start+9:msg_end]
else:
sqlstate = "0"
msg_start = errmsg.find("SQLCODE=")
if (msg_start != -1):
msg_end = errmsg.find(" ",msg_start)
if (msg_end == -1):
msg_end = len(errmsg)
sqlcode = errmsg[msg_start+8:msg_end]
try:
sqlcode = int(sqlcode)
except:
pass
else:
sqlcode = 0
if quiet == True: return
if (errmsg == ""): return
html = '<p><p style="border:2px; border-style:solid; border-color:#FF0000; background-color:#ffe6e6; padding: 1em;">'
if (_environment["jupyter"] == True):
pdisplay(pHTML(html+errmsg+"</p>"))
else:
print(errmsg)
# Print out an error message
def errormsg(message):
global _environment
if (message != ""):
html = '<p><p style="border:2px; border-style:solid; border-color:#FF0000; background-color:#ffe6e6; padding: 1em;">'
if (_environment["jupyter"] == True):
pdisplay(pHTML(html + message + "</p>"))
else:
print(message)
def success(message):
if (message != ""):
print(message)
return
def debug(message,error=False):
global _environment
if (_environment["jupyter"] == True):
spacer = "<br>" + " "
else:
spacer = "\n "
if (message != ""):
lines = message.split('\n')
msg = ""
indent = 0
for line in lines:
delta = line.count("(") - line.count(")")
if (msg == ""):
msg = line
indent = indent + delta
else:
if (delta < 0): indent = indent + delta
msg = msg + spacer * (indent*2) + line
if (delta > 0): indent = indent + delta
if (indent < 0): indent = 0
if (error == True):
html = '<p><pre style="font-family: monospace; border:2px; border-style:solid; border-color:#FF0000; background-color:#ffe6e6; padding: 1em;">'
else:
html = '<p><pre style="font-family: monospace; border:2px; border-style:solid; border-color:#008000; background-color:#e6ffe6; padding: 1em;">'
if (_environment["jupyter"] == True):
pdisplay(pHTML(html + msg + "</pre></p>"))
else:
print(msg)
return
def setMacro(inSQL,parms):
global _macros
names = parms.split()
if (len(names) < 2):
errormsg("No command name supplied.")
return None
macroName = names[1].upper()
_macros[macroName] = inSQL
return
def checkMacro(in_sql):
global _macros
if (len(in_sql) == 0): return(in_sql) # Nothing to do
tokens = parseArgs(in_sql,None) # Take the string and reduce into tokens
macro_name = tokens[0].upper() # Uppercase the name of the token
if (macro_name not in _macros):
return(in_sql) # No macro by this name so just return the string
result = runMacro(_macros[macro_name],in_sql,tokens) # Execute the macro using the tokens we found
return(result) # Runmacro will either return the original SQL or the new one
def parseCallArgs(macro):
quoteChar = ""
inQuote = False
inParm = False
name = ""
parms = []
parm = ''
sql = macro
for ch in macro:
if (inParm == False):
if (ch in ["("," ","\n"]):
inParm = True
else:
name = name + ch
else:
if (inQuote == True):
if (ch == quoteChar):
inQuote = False
#if (quoteChar == "]"):
# parm = parm + "'"
else:
parm = parm + ch
elif (ch in ("\"","\'","[")): # Do we have a quote
if (ch == "["):
# parm = parm + "'"
quoteChar = "]"
else:
quoteChar = ch
inQuote = True
elif (ch == ")"):
if (parm != ""):
parm_name, parm_value = splitassign(parm)
parms.append([parm_name,parm_value])
parm = ""
break
elif (ch == ","):
if (parm != ""):
parm_name, parm_value = splitassign(parm)
parms.append([parm_name,parm_value])
else:
parms.append(["null","null"])
parm = ""
else:
parm = parm + ch
if (inParm == True):
if (parm != ""):
parm_name, parm_value = splitassign(parm)
parms.append([parm_name,parm_value])
return(name,parms)
def splitassign(arg):
var_name = "null"
var_value = "null"
arg = arg.strip()
eq = arg.find("=")
if (eq != -1):
var_name = arg[:eq].strip()
temp_value = arg[eq+1:].strip()
if (temp_value != ""):
ch = temp_value[0]
if (ch in ["'",'"']):
if (temp_value[-1:] == ch):
var_value = temp_value[1:-1]
else:
var_value = temp_value
else:
var_value = temp_value
else:
var_value = arg
return var_name, var_value
def parseArgs(argin,_vars):
quoteChar = ""
inQuote = False
inArg = True
args = []
arg = ''
for ch in argin.lstrip():
if (inQuote == True):
if (ch == quoteChar):
inQuote = False
arg = arg + ch #z
else:
arg = arg + ch
elif (ch == "\"" or ch == "\'"): # Do we have a quote
quoteChar = ch
arg = arg + ch #z
inQuote = True
elif (ch == " "):
if (arg != ""):
arg = subvars(arg,_vars)
args.append(arg)
else:
args.append("null")
arg = ""
else:
arg = arg + ch
if (arg != ""):
arg = subvars(arg,_vars)
args.append(arg)
return(args)
def runMacro(script,in_sql,tokens):
result = ""
runIT = True
code = script.split("\n")
level = 0
runlevel = [True,False,False,False,False,False,False,False,False,False]
ifcount = 0
_vars = {}
for i in range(0,len(tokens)):
vstr = str(i)
_vars[vstr] = tokens[i]
if (len(tokens) == 0):
_vars["argc"] = "0"
else:
_vars["argc"] = str(len(tokens)-1)
for line in code:
line = line.strip()
if (line == "" or line == "\n"): continue
if (line[0] == "#"): continue # A comment line starts with a # in the first position of the line
args = parseArgs(line,_vars) # Get all of the arguments
if (args[0] == "if"):
ifcount = ifcount + 1
if (runlevel[level] == False): # You can't execute this statement
continue
level = level + 1
if (len(args) < 4):
print("Macro: Incorrect number of arguments for the if clause.")
return insql
arg1 = args[1]
arg2 = args[3]
if (len(arg2) > 2):
ch1 = arg2[0]
ch2 = arg2[-1:]
if (ch1 in ['"',"'"] and ch1 == ch2):
arg2 = arg2[1:-1].strip()
op = args[2]
if (op in ["=","=="]):
if (arg1 == arg2):
runlevel[level] = True
else:
runlevel[level] = False
elif (op in ["<=","=<"]):
if (arg1 <= arg2):
runlevel[level] = True
else:
runlevel[level] = False
elif (op in [">=","=>"]):
if (arg1 >= arg2):
runlevel[level] = True
else:
runlevel[level] = False
elif (op in ["<>","!="]):
if (arg1 != arg2):
runlevel[level] = True
else:
runlevel[level] = False
elif (op in ["<"]):
if (arg1 < arg2):
runlevel[level] = True
else:
runlevel[level] = False
elif (op in [">"]):
if (arg1 > arg2):
runlevel[level] = True
else:
runlevel[level] = False
else:
print("Macro: Unknown comparison operator in the if statement:" + op)
continue
elif (args[0] in ["exit","echo"] and runlevel[level] == True):
msg = ""
for msgline in args[1:]:
if (msg == ""):
msg = subvars(msgline,_vars)
else:
msg = msg + " " + subvars(msgline,_vars)
if (msg != ""):
if (args[0] == "echo"):
debug(msg,error=False)
else:
debug(msg,error=True)
if (args[0] == "exit"): return ''
elif (args[0] == "pass" and runlevel[level] == True):
pass
elif (args[0] == "var" and runlevel[level] == True):
value = ""
for val in args[2:]:
if (value == ""):
value = subvars(val,_vars)
else:
value = value + " " + subvars(val,_vars)
value.strip()
_vars[args[1]] = value
elif (args[0] == 'else'):
if (ifcount == level):
runlevel[level] = not runlevel[level]
elif (args[0] == 'return' and runlevel[level] == True):
return(result)
elif (args[0] == "endif"):
ifcount = ifcount - 1
if (ifcount < level):
level = level - 1
if (level < 0):
print("Macro: Unmatched if/endif pairs.")
return ''
else:
if (runlevel[level] == True):
if (result == ""):
result = subvars(line,_vars)
else:
result = result + "\n" + subvars(line,_vars)
return(result)
def subvars(script,_vars):
if (_vars == None): return script
remainder = script
result = ""
done = False
while done == False:
bv = remainder.find("{")
if (bv == -1):
done = True
continue
ev = remainder.find("}")
if (ev == -1):
done = True
continue
result = result + remainder[:bv]
vvar = remainder[bv+1:ev]
remainder = remainder[ev+1:]
upper = False
allvars = False
if (vvar[0] == "^"):
upper = True
vvar = vvar[1:]
elif (vvar[0] == "*"):
vvar = vvar[1:]
allvars = True
else:
pass
if (vvar in _vars):
if (upper == True):
items = _vars[vvar].upper()
elif (allvars == True):
try:
iVar = int(vvar)
except:
return(script)
items = ""
sVar = str(iVar)
while sVar in _vars:
if (items == ""):
items = _vars[sVar]
else:
items = items + " " + _vars[sVar]
iVar = iVar + 1
sVar = str(iVar)
else:
items = _vars[vvar]
else:
if (allvars == True):
items = ""
else:
items = "null"
result = result + items
if (remainder != ""):
result = result + remainder
return(result)
def sqlTimer(hdbc, runtime, inSQL):
count = 0
t_end = time.time() + runtime
while time.time() < t_end:
try:
stmt = ibm_db.exec_immediate(hdbc,inSQL)
if (stmt == False):
db2_error(flag(["-q","-quiet"]))
return(-1)
ibm_db.free_result(stmt)
except Exception as err:
db2_error(False)
return(-1)
count = count + 1
return(count)
def splitargs(arguments):
import types
# String the string and remove the ( and ) characters if they at the beginning and end of the string
results = []
step1 = arguments.strip()
if (len(step1) == 0): return(results) # Not much to do here - no args found
if (step1[0] == '('):
if (step1[-1:] == ')'):
step2 = step1[1:-1]
step2 = step2.strip()
else:
step2 = step1
else:
step2 = step1
# Now we have a string without brackets. Start scanning for commas
quoteCH = ""
pos = 0
arg = ""
args = []
while pos < len(step2):
ch = step2[pos]
if (quoteCH == ""): # Are we in a quote?
if (ch in ('"',"'")): # Check to see if we are starting a quote
quoteCH = ch
arg = arg + ch
pos += 1
elif (ch == ","): # Are we at the end of a parameter?
arg = arg.strip()
args.append(arg)
arg = ""
inarg = False
pos += 1
else: # Continue collecting the string
arg = arg + ch
pos += 1
else:
if (ch == quoteCH): # Are we at the end of a quote?
arg = arg + ch # Add the quote to the string
pos += 1 # Increment past the quote
quoteCH = "" # Stop quote checking (maybe!)
else:
pos += 1
arg = arg + ch
if (quoteCH != ""): # So we didn't end our string
arg = arg.strip()
args.append(arg)
elif (arg != ""): # Something left over as an argument
arg = arg.strip()
args.append(arg)
else:
pass
results = []
for arg in args:
result = []
if (len(arg) > 0):
if (arg[0] in ('"',"'")):
value = arg[1:-1]
isString = True
isNumber = False
else:
isString = False
isNumber = False
try:
value = eval(arg)
if (type(value) == int):
isNumber = True
elif (isinstance(value,float) == True):
isNumber = True
else:
value = arg
except:
value = arg
else:
value = ""
isString = False
isNumber = False
result = [value,isString,isNumber]
results.append(result)
return results
def sqlParser(sqlin,local_ns):
sql_cmd = ""
encoded_sql = sqlin
firstCommand = "(?:^\s*)([a-zA-Z]+)(?:\s+.*|$)"
findFirst = re.match(firstCommand,sqlin)
if (findFirst == None): # We did not find a match so we just return the empty string
return sql_cmd, encoded_sql
cmd = findFirst.group(1)
sql_cmd = cmd.upper()
#
# Scan the input string looking for variables in the format :var. If no : is found just return.
# Var must be alpha+number+_ to be valid
#
if (':' not in sqlin): # A quick check to see if parameters are in here, but not fool-proof!
return sql_cmd, encoded_sql
inVar = False
inQuote = ""
varName = ""
encoded_sql = ""
STRING = 0
NUMBER = 1
LIST = 2
RAW = 3
for ch in sqlin:
if (inVar == True): # We are collecting the name of a variable
if (ch.upper() in "@_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789[]"):
varName = varName + ch
continue
else:
if (varName == ""):
encode_sql = encoded_sql + ":"
elif (varName[0] in ('[',']')):
encoded_sql = encoded_sql + ":" + varName
else:
if (ch == '.'): # If the variable name is stopped by a period, assume no quotes are used
flag_quotes = False
else:
flag_quotes = True
varValue, varType = getContents(varName,flag_quotes,local_ns)
if (varValue == None):
encoded_sql = encoded_sql + ":" + varName
else:
if (varType == STRING):
encoded_sql = encoded_sql + varValue
elif (varType == NUMBER):
encoded_sql = encoded_sql + str(varValue)
elif (varType == RAW):
encoded_sql = encoded_sql + varValue
elif (varType == LIST):
start = True
for v in varValue:
if (start == False):
encoded_sql = encoded_sql + ","
if (isinstance(v,int) == True): # Integer value
encoded_sql = encoded_sql + str(v)
elif (isinstance(v,float) == True):
encoded_sql = encoded_sql + str(v)
else:
flag_quotes = True
try:
if (v.find('0x') == 0): # Just guessing this is a hex value at beginning
encoded_sql = encoded_sql + v
else:
encoded_sql = encoded_sql + addquotes(v,flag_quotes) # String
except:
encoded_sql = encoded_sql + addquotes(str(v),flag_quotes)
start = False
encoded_sql = encoded_sql + ch
varName = ""
inVar = False
elif (inQuote != ""):
encoded_sql = encoded_sql + ch
if (ch == inQuote): inQuote = ""
elif (ch in ("'",'"')):
encoded_sql = encoded_sql + ch
inQuote = ch
elif (ch == ":"): # This might be a variable
varName = ""
inVar = True
else:
encoded_sql = encoded_sql + ch
if (inVar == True):
varValue, varType = getContents(varName,True,local_ns) # We assume the end of a line is quoted
if (varValue == None):
encoded_sql = encoded_sql + ":" + varName
else:
if (varType == STRING):
encoded_sql = encoded_sql + varValue
elif (varType == NUMBER):
encoded_sql = encoded_sql + str(varValue)
elif (varType == LIST):
flag_quotes = True
start = True
for v in varValue:
if (start == False):
encoded_sql = encoded_sql + ","
if (isinstance(v,int) == True): # Integer value
encoded_sql = encoded_sql + str(v)
elif (isinstance(v,float) == True):
encoded_sql = encoded_sql + str(v)
else:
try:
if (v.find('0x') == 0): # Just guessing this is a hex value
encoded_sql = encoded_sql + v
else:
encoded_sql = encoded_sql + addquotes(v,flag_quotes) # String
except:
encoded_sql = encoded_sql + addquotes(str(v),flag_quotes)
start = False
return sql_cmd, encoded_sql
def getContents(varName,flag_quotes,local_ns):
#
# Get the contents of the variable name that is passed to the routine. Only simple
# variables are checked, i.e. arrays and lists are not parsed
#
STRING = 0
NUMBER = 1
LIST = 2
RAW = 3
DICT = 4
try:
value = eval(varName,None,local_ns) # globals()[varName] # eval(varName)
except:
return(None,STRING)
if (isinstance(value,dict) == True): # Check to see if this is JSON dictionary
return(addquotes(value,flag_quotes),STRING)
elif(isinstance(value,list) == True): # List - tricky
return(value,LIST)
elif (isinstance(value,int) == True): # Integer value
return(value,NUMBER)
elif (isinstance(value,float) == True): # Float value
return(value,NUMBER)
else:
try:
# The pattern needs to be in the first position (0 in Python terms)
if (value.find('0x') == 0): # Just guessing this is a hex value
return(value,RAW)
else:
return(addquotes(value,flag_quotes),STRING) # String
except:
return(addquotes(str(value),flag_quotes),RAW)
def addquotes(inString,flag_quotes):
if (isinstance(inString,dict) == True): # Check to see if this is JSON dictionary
serialized = json.dumps(inString)
else:
serialized = inString
# Replace single quotes with '' (two quotes) and wrap everything in single quotes
if (flag_quotes == False):
return(serialized)
else:
return("'"+serialized.replace("'","''")+"'") # Convert single quotes to two single quotes
def checkOption(args_in, option, vFalse=False, vTrue=True):
args_out = args_in.strip()
found = vFalse
if (args_out != ""):
if (args_out.find(option) >= 0):
args_out = args_out.replace(option," ")
args_out = args_out.strip()
found = vTrue
return args_out, found
def findProc(procname):
global _hdbc, _hdbi, _connected, _runtime
# Split the procedure name into schema.procname if appropriate
upper_procname = procname.upper()
schema, proc = split_string(upper_procname,".") # Expect schema.procname
if (proc == None):
proc = schema
# Call ibm_db.procedures to see if the procedure does exist
schema = "%"
try:
stmt = ibm_db.procedures(_hdbc, None, schema, proc)
if (stmt == False): # Error executing the code
errormsg("Procedure " + procname + " not found in the system catalog.")
return None
result = ibm_db.fetch_tuple(stmt)
resultsets = result[5]
if (resultsets >= 1): resultsets = 1
return resultsets
except Exception as err:
errormsg("Procedure " + procname + " not found in the system catalog.")
return None
def getColumns(stmt):
columns = []
types = []
colcount = 0
try:
colname = ibm_db.field_name(stmt,colcount)
coltype = ibm_db.field_type(stmt,colcount)
while (colname != False):
columns.append(colname)
types.append(coltype)
colcount += 1
colname = ibm_db.field_name(stmt,colcount)
coltype = ibm_db.field_type(stmt,colcount)
return columns,types
except Exception as err:
db2_error(False)
return None
def parseCall(hdbc, inSQL, local_ns):
global _hdbc, _hdbi, _connected, _runtime, _environment
# Check to see if we are connected first
if (_connected == False): # Check if you are connected
db2_doConnect()
if _connected == False: return None
remainder = inSQL.strip()
procName, procArgs = parseCallArgs(remainder[5:]) # Assume that CALL ... is the format
resultsets = findProc(procName)
if (resultsets == None): return None
argvalues = []
if (len(procArgs) > 0): # We have arguments to consider
for arg in procArgs:
varname = arg[1]
if (len(varname) > 0):
if (varname[0] == ":"):
checkvar = varname[1:]
varvalue = getContents(checkvar,True,local_ns)
if (varvalue == None):
errormsg("Variable " + checkvar + " is not defined.")
return None
argvalues.append(varvalue)
else:
if (varname.upper() == "NULL"):
argvalues.append(None)
else:
argvalues.append(varname)
else:
if (varname.upper() == "NULL"):
argvalues.append(None)
else:
argvalues.append(varname)
try:
if (len(procArgs) > 0):
argtuple = tuple(argvalues)
result = ibm_db.callproc(_hdbc,procName,argtuple)
stmt = result[0]
else:
result = ibm_db.callproc(_hdbc,procName)
stmt = result
if (resultsets == 1 and stmt != None):
columns, types = getColumns(stmt)
if (columns == None): return None
rows = []
rowlist = ibm_db.fetch_tuple(stmt)
while ( rowlist ) :
row = []
colcount = 0
for col in rowlist:
try:
if (types[colcount] in ["int","bigint"]):
row.append(int(col))
elif (types[colcount] in ["decimal","real"]):
row.append(float(col))
elif (types[colcount] in ["date","time","timestamp"]):
row.append(str(col))
else:
row.append(col)
except:
row.append(col)
colcount += 1
rows.append(row)
rowlist = ibm_db.fetch_tuple(stmt)
if flag(["-r","-array"]):
rows.insert(0,columns)
if len(procArgs) > 0:
allresults = []
allresults.append(rows)
for x in result[1:]:
allresults.append(x)
return allresults # rows,returned_results
else:
return rows
else:
df = pandas.DataFrame.from_records(rows,columns=columns)
if flag("-grid") or _settings['display'] == 'GRID':
if (_environment['qgrid'] == False):
with pandas.option_context('display.max_rows', None, 'display.max_columns', None):
pdisplay(df)
else:
try:
pdisplay(qgrid.show_grid(df))
except:
errormsg("Grid cannot be used to display data with duplicate column names. Use option -a or %sql OPTION DISPLAY PANDAS instead.")
return
else:
if flag(["-a","-all"]) or _settings["maxrows"] == -1 : # All of the rows
with pandas.option_context('display.max_rows', None, 'display.max_columns', None):
pdisplay(df)
else:
return df
else:
if len(procArgs) > 0:
allresults = []
for x in result[1:]:
allresults.append(x)
return allresults # rows,returned_results
else:
return None
except Exception as err:
db2_error(False)
return None
def parsePExec(hdbc, inSQL):
import ibm_db
global _stmt, _stmtID, _stmtSQL, sqlcode
cParms = inSQL.split()
parmCount = len(cParms)
if (parmCount == 0): return(None) # Nothing to do but this shouldn't happen
keyword = cParms[0].upper() # Upper case the keyword
if (keyword == "PREPARE"): # Prepare the following SQL
uSQL = inSQL.upper()
found = uSQL.find("PREPARE")
sql = inSQL[found+7:].strip()
try:
pattern = "\?\*[0-9]+"
findparm = re.search(pattern,sql)
while findparm != None:
found = findparm.group(0)
count = int(found[2:])
markers = ('?,' * count)[:-1]
sql = sql.replace(found,markers)
findparm = re.search(pattern,sql)
stmt = ibm_db.prepare(hdbc,sql) # Check error code here
if (stmt == False):
db2_error(False)
return(False)
stmttext = str(stmt).strip()
stmtID = stmttext[33:48].strip()
if (stmtID in _stmtID) == False:
_stmt.append(stmt) # Prepare and return STMT to caller
_stmtID.append(stmtID)
else:
stmtIX = _stmtID.index(stmtID)
_stmt[stmtiX] = stmt
return(stmtID)
except Exception as err:
print(err)
db2_error(False)
return(False)
if (keyword == "EXECUTE"): # Execute the prepare statement
if (parmCount < 2): return(False) # No stmtID available
stmtID = cParms[1].strip()
if (stmtID in _stmtID) == False:
errormsg("Prepared statement not found or invalid.")
return(False)
stmtIX = _stmtID.index(stmtID)
stmt = _stmt[stmtIX]
try:
if (parmCount == 2): # Only the statement handle available
result = ibm_db.execute(stmt) # Run it
elif (parmCount == 3): # Not quite enough arguments
errormsg("Missing or invalid USING clause on EXECUTE statement.")
sqlcode = -99999
return(False)
else:
using = cParms[2].upper()
if (using != "USING"): # Bad syntax again
errormsg("Missing USING clause on EXECUTE statement.")
sqlcode = -99999
return(False)
uSQL = inSQL.upper()
found = uSQL.find("USING")
parmString = inSQL[found+5:].strip()
parmset = splitargs(parmString)
if (len(parmset) == 0):
errormsg("Missing parameters after the USING clause.")
sqlcode = -99999
return(False)
parms = []
parm_count = 0
CONSTANT = 0
VARIABLE = 1
const = [0]
const_cnt = 0
for v in parmset:
parm_count = parm_count + 1
if (v[1] == True or v[2] == True): # v[1] true if string, v[2] true if num
parm_type = CONSTANT
const_cnt = const_cnt + 1
if (v[2] == True):
if (isinstance(v[0],int) == True): # Integer value
sql_type = ibm_db.SQL_INTEGER
elif (isinstance(v[0],float) == True): # Float value
sql_type = ibm_db.SQL_DOUBLE
else:
sql_type = ibm_db.SQL_INTEGER
else:
sql_type = ibm_db.SQL_CHAR
const.append(v[0])
else:
parm_type = VARIABLE
# See if the variable has a type associated with it varname@type
varset = v[0].split("@")
parm_name = varset[0]
parm_datatype = "char"
# Does the variable exist?
if (parm_name not in globals()):
errormsg("SQL Execute parameter " + parm_name + " not found")
sqlcode = -99999
return(false)
if (len(varset) > 1): # Type provided
parm_datatype = varset[1]
if (parm_datatype == "dec" or parm_datatype == "decimal"):
sql_type = ibm_db.SQL_DOUBLE
elif (parm_datatype == "bin" or parm_datatype == "binary"):
sql_type = ibm_db.SQL_BINARY
elif (parm_datatype == "int" or parm_datatype == "integer"):
sql_type = ibm_db.SQL_INTEGER
else:
sql_type = ibm_db.SQL_CHAR
try:
if (parm_type == VARIABLE):
result = ibm_db.bind_param(stmt, parm_count, globals()[parm_name], ibm_db.SQL_PARAM_INPUT, sql_type)
else:
result = ibm_db.bind_param(stmt, parm_count, const[const_cnt], ibm_db.SQL_PARAM_INPUT, sql_type)
except:
result = False
if (result == False):
errormsg("SQL Bind on variable " + parm_name + " failed.")
sqlcode = -99999
return(false)
result = ibm_db.execute(stmt) # ,tuple(parms))
if (result == False):
errormsg("SQL Execute failed.")
return(False)
if (ibm_db.num_fields(stmt) == 0): return(True) # Command successfully completed
return(fetchResults(stmt))
except Exception as err:
db2_error(False)
return(False)
return(False)
return(False)
def fetchResults(stmt):
global sqlcode
rows = []
columns, types = getColumns(stmt)
# By default we assume that the data will be an array
is_array = True
# Check what type of data we want returned - array or json
if (flag(["-r","-array"]) == False):
# See if we want it in JSON format, if not it remains as an array
if (flag("-json") == True):
is_array = False
# Set column names to lowercase for JSON records
if (is_array == False):
columns = [col.lower() for col in columns] # Convert to lowercase for each of access
# First row of an array has the column names in it
if (is_array == True):
rows.append(columns)
result = ibm_db.fetch_tuple(stmt)
rowcount = 0
while (result):
rowcount += 1
if (is_array == True):
row = []
else:
row = {}
colcount = 0
for col in result:
try:
if (types[colcount] in ["int","bigint"]):
if (is_array == True):
row.append(int(col))
else:
row[columns[colcount]] = int(col)
elif (types[colcount] in ["decimal","real"]):
if (is_array == True):
row.append(float(col))
else:
row[columns[colcount]] = float(col)
elif (types[colcount] in ["date","time","timestamp"]):
if (is_array == True):
row.append(str(col))
else:
row[columns[colcount]] = str(col)
else:
if (is_array == True):
row.append(col)
else:
row[columns[colcount]] = col
except:
if (is_array == True):
row.append(col)
else:
row[columns[colcount]] = col
colcount += 1
rows.append(row)
result = ibm_db.fetch_tuple(stmt)
if (rowcount == 0):
sqlcode = 100
else:
sqlcode = 0
return rows
def parseCommit(sql):
global _hdbc, _hdbi, _connected, _runtime, _stmt, _stmtID, _stmtSQL
if (_connected == False): return # Nothing to do if we are not connected
cParms = sql.split()
if (len(cParms) == 0): return # Nothing to do but this shouldn't happen
keyword = cParms[0].upper() # Upper case the keyword
if (keyword == "COMMIT"): # Commit the work that was done
try:
result = ibm_db.commit (_hdbc) # Commit the connection
if (len(cParms) > 1):
keyword = cParms[1].upper()
if (keyword == "HOLD"):
return
del _stmt[:]
del _stmtID[:]
except Exception as err:
db2_error(False)
return
if (keyword == "ROLLBACK"): # Rollback the work that was done
try:
result = ibm_db.rollback(_hdbc) # Rollback the connection
del _stmt[:]
del _stmtID[:]
except Exception as err:
db2_error(False)
return
if (keyword == "AUTOCOMMIT"): # Is autocommit on or off
if (len(cParms) > 1):
op = cParms[1].upper() # Need ON or OFF value
else:
return
try:
if (op == "OFF"):
ibm_db.autocommit(_hdbc, False)
elif (op == "ON"):
ibm_db.autocommit (_hdbc, True)
return
except Exception as err:
db2_error(False)
return
return
def setFlags(inSQL):
global _flags
_flags = [] # Delete all of the current flag settings
pos = 0
end = len(inSQL)-1
inFlag = False
ignore = False
outSQL = ""
flag = ""
while (pos <= end):
ch = inSQL[pos]
if (ignore == True):
outSQL = outSQL + ch
else:
if (inFlag == True):
if (ch != " "):
flag = flag + ch
else:
_flags.append(flag)
inFlag = False
else:
if (ch == "-"):
flag = "-"
inFlag = True
elif (ch == ' '):
outSQL = outSQL + ch
else:
outSQL = outSQL + ch
ignore = True
pos += 1
if (inFlag == True):
_flags.append(flag)
return outSQL
def flag(inflag):
global _flags
if isinstance(inflag,list):
for x in inflag:
if (x in _flags):
return True
return False
else:
if (inflag in _flags):
return True
else:
return False
def splitSQL(inputString, delimiter):
pos = 0
arg = ""
results = []
quoteCH = ""
inSQL = inputString.strip()
if (len(inSQL) == 0): return(results) # Not much to do here - no args found
while pos < len(inSQL):
ch = inSQL[pos]
pos += 1
if (ch in ('"',"'")): # Is this a quote characters?
arg = arg + ch # Keep appending the characters to the current arg
if (ch == quoteCH): # Is this quote character we are in
quoteCH = ""
elif (quoteCH == ""): # Create the quote
quoteCH = ch
else:
None
elif (quoteCH != ""): # Still in a quote
arg = arg + ch
elif (ch == delimiter): # Is there a delimiter?
results.append(arg)
arg = ""
else:
arg = arg + ch
if (arg != ""):
results.append(arg)
return(results)
@magics_class
class DB2(Magics):
@needs_local_scope
@line_cell_magic
def sql(self, line, cell=None, local_ns=None):
# Before we event get started, check to see if you have connected yet. Without a connection we
# can't do anything. You may have a connection request in the code, so if that is true, we run those,
# otherwise we connect immediately
# If your statement is not a connect, and you haven't connected, we need to do it for you
global _settings, _environment
global _hdbc, _hdbi, _connected, _runtime, sqlstate, sqlerror, sqlcode, sqlelapsed
# If you use %sql (line) we just run the SQL. If you use %%SQL the entire cell is run.
flag_cell = False
flag_output = False
sqlstate = "0"
sqlerror = ""
sqlcode = 0
sqlelapsed = 0
start_time = time.time()
end_time = time.time()
# Macros gets expanded before anything is done
SQL1 = setFlags(line.strip())
SQL1 = checkMacro(SQL1) # Update the SQL if any macros are in there
SQL2 = cell
if flag("-sampledata"): # Check if you only want sample data loaded
if (_connected == False):
if (db2_doConnect() == False):
errormsg('A CONNECT statement must be issued before issuing SQL statements.')
return
db2_create_sample(flag(["-q","-quiet"]))
return
if SQL1 == "?" or flag(["-h","-help"]): # Are you asking for help
sqlhelp()
return
if len(SQL1) == 0 and SQL2 == None: return # Nothing to do here
# Check for help
if SQL1.upper() == "? CONNECT": # Are you asking for help on CONNECT
connected_help()
return
sqlType,remainder = sqlParser(SQL1,local_ns) # What type of command do you have?
if (sqlType == "CONNECT"): # A connect request
parseConnect(SQL1,local_ns)
return
elif (sqlType == "DEFINE"): # Create a macro from the body
result = setMacro(SQL2,remainder)
return
elif (sqlType == "OPTION"):
setOptions(SQL1)
return
elif (sqlType == 'COMMIT' or sqlType == 'ROLLBACK' or sqlType == 'AUTOCOMMIT'):
parseCommit(remainder)
return
elif (sqlType == "PREPARE"):
pstmt = parsePExec(_hdbc, remainder)
return(pstmt)
elif (sqlType == "EXECUTE"):
result = parsePExec(_hdbc, remainder)
return(result)
elif (sqlType == "CALL"):
result = parseCall(_hdbc, remainder, local_ns)
return(result)
else:
pass
sql = SQL1
if (sql == ""): sql = SQL2
if (sql == ""): return # Nothing to do here
if (_connected == False):
if (db2_doConnect() == False):
errormsg('A CONNECT statement must be issued before issuing SQL statements.')
return
if _settings["maxrows"] == -1: # Set the return result size
pandas.reset_option('display.max_rows')
else:
pandas.options.display.max_rows = _settings["maxrows"]
runSQL = re.sub('.*?--.*$',"",sql,flags=re.M)
remainder = runSQL.replace("\n"," ")
if flag(["-d","-delim"]):
sqlLines = splitSQL(remainder,"@")
else:
sqlLines = splitSQL(remainder,";")
flag_cell = True
# For each line figure out if you run it as a command (db2) or select (sql)
for sqlin in sqlLines: # Run each command
sqlin = checkMacro(sqlin) # Update based on any macros
sqlType, sql = sqlParser(sqlin,local_ns) # Parse the SQL
if (sql.strip() == ""): continue
if flag(["-e","-echo"]): debug(sql,False)
if flag("-t"):
cnt = sqlTimer(_hdbc, _settings["runtime"], sql) # Given the sql and parameters, clock the time
if (cnt >= 0): print("Total iterations in %s second(s): %s" % (_settings["runtime"],cnt))
return(cnt)
else:
try: # See if we have an answer set
stmt = ibm_db.prepare(_hdbc,sql)
if (ibm_db.num_fields(stmt) == 0): # No, so we just execute the code
result = ibm_db.execute(stmt) # Run it
if (result == False): # Error executing the code
db2_error(flag(["-q","-quiet"]))
continue
rowcount = ibm_db.num_rows(stmt)
if (rowcount == 0 and flag(["-q","-quiet"]) == False):
errormsg("No rows found.")
continue # Continue running
elif flag(["-r","-array","-j","-json"]): # raw, json, format json
row_count = 0
resultSet = []
try:
result = ibm_db.execute(stmt) # Run it
if (result == False): # Error executing the code
db2_error(flag(["-q","-quiet"]))
return
if flag("-j"): # JSON single output
row_count = 0
json_results = []
while( ibm_db.fetch_row(stmt) ):
row_count = row_count + 1
jsonVal = ibm_db.result(stmt,0)
jsonDict = json.loads(jsonVal)
json_results.append(jsonDict)
flag_output = True
if (row_count == 0): sqlcode = 100
return(json_results)
else:
return(fetchResults(stmt))
except Exception as err:
db2_error(flag(["-q","-quiet"]))
return
else:
try:
df = pandas.read_sql(sql,_hdbi)
except Exception as err:
db2_error(False)
return
if (len(df) == 0):
sqlcode = 100
if (flag(["-q","-quiet"]) == False):
errormsg("No rows found")
continue
flag_output = True
if flag("-grid") or _settings['display'] == 'GRID': # Check to see if we can display the results
if (_environment['qgrid'] == False):
with pandas.option_context('display.max_rows', None, 'display.max_columns', None):
print(df.to_string())
else:
try:
pdisplay(qgrid.show_grid(df))
except:
errormsg("Grid cannot be used to display data with duplicate column names. Use option -a or %sql OPTION DISPLAY PANDAS instead.")
return
else:
if flag(["-a","-all"]) or _settings["maxrows"] == -1 : # All of the rows
pandas.options.display.max_rows = None
pandas.options.display.max_columns = None
return df # print(df.to_string())
else:
pandas.options.display.max_rows = _settings["maxrows"]
pandas.options.display.max_columns = None
return df # pdisplay(df) # print(df.to_string())
except:
db2_error(flag(["-q","-quiet"]))
continue # return
end_time = time.time()
sqlelapsed = end_time - start_time
if (flag_output == False and flag(["-q","-quiet"]) == False): print("Command completed.")
# Register the Magic extension in Jupyter
ip = get_ipython()
ip.register_magics(DB2)
load_settings()
success("Db2 Extensions Loaded.") | StarcoderdataPython |
1864490 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Program Name: mult_add.py
# <NAME>
# 07/14/2016
# Python Version 3.4
# Description: A function that takes in 3 parameters (a,b,c) and does the following operation a*b+c
# multAdd function
def multAdd(a,b,c):
return ((a*b)+c)
# Main function
def main():
a = 6
b = 10
c = 4
print("Num 1 = %i, Num 2 = %i, Num 3 = %i; Answer: %i" %(a,b,c,multAdd(a,b,c)))
main()
"""
Num 1 = 6, Num 2 = 10, Num 3 = 4; Answer: 64
"""
| StarcoderdataPython |
116212 | <gh_stars>0
from django.conf.urls import url
from pretix_covid_certificates.views import CovidCertificatesSettings
urlpatterns = [
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/covidcerts/$",
CovidCertificatesSettings.as_view(),
name="settings",
),
]
| StarcoderdataPython |
3527499 | <reponame>BjornChrisnach/Python_6hour_course
# Error handling
text = input("Username: ")
try:
number = int(text)
print(number)
except:
print("Invalid Username")
| StarcoderdataPython |
8106028 | # -*- coding: utf-8 -*-
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
from tutorial import styles
from server import app
def s(string_block):
return string_block.replace(' ', '')
# # # # # # #
# Authenticating to Dash Deployment Server with SSH
# # # # # # #
Ssh = html.Div(children=[
html.H1('Authenticating to Dash Deployment Server with SSH'),
dcc.Markdown(s('''
In Plotly Enterprise 2.4.0 and above, you can deploy your apps using
either HTTPS or SSH. If you are deploying with HTTPS, then you do not
need to set up an SSH key. Thus, you can skip this tutorial and go
straight to
[Initialize Dash Apps on Dash Deployment Server](/dash-deployment-server/initialize).
If you are deploying with SSH then you need to add a SSH Key to the
Dash Deployment Server. SSH Keys are used to authenticate your git
session with the server. Deploying with SSH takes a little bit more
time to set up but it allows you to deploy without typing in your
username and password each time. Continue below for instructions on
how to generate and add a SSH Key.
***
''')),
dcc.Markdown(s('''
#### Why Deploy with SSH?
We recommend deploying with HTTPS for most of our users. However, there
are a few cases where deploying with SSH is advantageous:
- If your Dash Deployment Server is using a **self-signed certificate**,
deploying with HTTPS
[requires some extra, challenging configuration](https://stackoverflow.com/questions/11621768/).
In these cases, it will be easier to set up deploying with SSH.
- If your Dash Deployment Server is configured with **SAML**, then the
HTTPS method will not work.
***
#### Already Have an SSH Key?
If you already have an SSH key that you've used in other
services, you can use that key instead of generating a new one.
For instructions on how to add an existing SSH Key to the Dash Deployment
Server, jump to **Copy and Add SSH Key**.
***
## Generate and Add an SSH Key
''')),
dcc.Markdown(s('''
#### Which OS Are You Using?
''')),
dcc.RadioItems(
id='platform',
options=[
{'label': i, 'value': i} for i in
['Windows', 'Mac', 'Linux']],
value='Windows',
labelStyle={'display': 'inline-block'}
),
html.Div(id='instructions')
])
@app.callback(Output('instructions', 'children'),
[Input('platform', 'value')])
def display_instructions(platform):
return [
(dcc.Markdown(s('''
These instructions assume that you are using
**Git Bash** on Windows, which is included in the
official [Git for Windows release](https://git-scm.com/download/win).
''')) if platform == 'Windows' else
''),
dcc.Markdown(s('''
***
#### Generate a New SSH Key
''')),
dcc.Markdown(s(
'**1. Open Git Bash**' if platform == 'Windows' else
'**1. Open Terminal**'
)),
dcc.Markdown(s('''
**2. Generate Key**
This command will walk you
through a few instructions.
''')),
dcc.SyntaxHighlighter(
('$ ssh-keygen -t rsa -b 4096 -C "<EMAIL>"'),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
***
#### Check the SSH-Agent
**1. Ensure the ssh-agent is running:**
''')),
dcc.SyntaxHighlighter(
('$ eval $(ssh-agent -s)' if platform == 'Windows' else
'$ eval "$(ssh-agent -s)"'),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
**2. Run `ssh-add`**
Replace `id_rsa` with the name of the key that you
created above if it is different.
''')),
dcc.SyntaxHighlighter(
('$ ssh-add ~/.ssh/id_rsa' if platform == 'Windows' else
'$ ssh-add -k ~/.ssh/id_rsa'),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
***
#### Copy and Add SSH Key
**1. Copy the SSH key to your clipboard.**
Replace `id_rsa.pub` with the name of the key that you
created above if it is different.
''')),
dcc.SyntaxHighlighter(
('$ clip < ~/.ssh/id_rsa.pub' if platform == 'Windows' else
'$ pbcopy < ~/.ssh/id_rsa.pub' if platform == 'Mac' else
'$ sudo apt-get install xclip\n$ xclip -sel clip < ~/.ssh/id_rsa.pub'),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
**2. Open the Dash Deployment Server UI**
You can find the Dash Deployment Server UI by selecting "Dash App"
from Plotly's "Create" menu.
> *The Dash App item in the Create menu takes you to the Dash
Deployment Server UI*
''')),
html.Img(
alt='Dash App Create Menu',
src='/assets/images/dds/open-dds-ui.png',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
**3. Add SSH Key**
Select **SSH Keys** in the top navigation menu of the Dash
Deployment Server UI. Here, select **Add Key** and in the 'Add
SSH Key' modal, paste in your SSH Key.
''')),
html.Img(
alt='Add SSH Key',
src='/assets/images/dds/add-ssh-key.png',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
**4. Confirm it Has Been Added**
Once you've added an SSH key, it should be added to your list of SSH
Keys like the image below.
''')),
html.Img(
alt='List of SSH Keys',
src='/assets/images/dds/list-of-ssh-keys.png',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
***
#### Modify SSH Config
Next, specify a custom port in your SSH config. By default, this
should be `3022` but your server administrator may have set it to
something different.
This file is located in `~/.ssh/config`. If it's not there, then
create it. Add the following lines to
this file, replacing `your-dash-deployment-server` with the domain of
your Dash Deployment Server (without `http://` or `https://`).
''')),
dcc.SyntaxHighlighter('''Host your-dash-deployment-server
Port 3022''', customStyle=styles.code_container),
(dcc.Markdown(s('''
If you're having trouble opening this file, you can run
`$ open ~/.ssh/config` which will open the file using your default
editor. If the file doesn't exist, then you can open that hidden
folder with just `$ open ~/.ssh`
''')) if platform == 'Mac' else ''),
(dcc.Markdown(s('''
Please be careful not to save your SSH config as a .txt file as
it will not be recognized by Git when deploying your applications.
If you are using Notepad to create your SSH config, you can force the
removal of the .txt extension by naming the file "config", including
the quotes, in the Save As dialog box.
''')) if platform == 'Windows' else ''),
dcc.Markdown(s('''
***
If you have successfully added your SSH Key, advance to
[**Part 1. Initialize Dash Apps on Dash Deployment Server**](/dash-deployment-server/initialize).
'''))
]
# # # # # # #
# Initialize
# # # # # # #
Initialize = html.Div(children=[
html.H1('Part 1. Initialize Dash Apps on Dash Deployment Server'),
dcc.Markdown(s('''
> This is the *1st* deployment chapter of the [Dash Deployment Server Documentation](/dash-deployment-server).
> The [next chapter](/dash-deployment-server/deployment) covers deploying a Dash App on Dash Deployment Server.
Before creating or deploying a dash app locally, you need to initialize
an app on Dash Deployment Server. This can be achieved using the Dash
Deployment Server UI.
''')),
dcc.Markdown(s('''
***
1. Navigate to the Dash Deployment Server UI by selecting **Dash App**
from the **+ Create** located in the top right-hand corner.
''')),
html.Img(
alt='Dash Deployment Server UI',
src='/assets/images/dds/open-dds-ui.png',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
2. In the top right-hand corner select **Create App**. The
'Create Dash App' modal should appear. Here, name your dash app
(app names must start with a lower case letter and may
contain only lower case letters, numbers, and -) and then
hit **Create**. It is important to keep in mind that this name is going
to be part of the URL for your application.
''')),
html.Img(
alt='Initialize App',
src='/assets/images/dds/add-app.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
3. After you have created the app, it should appear in your list of
apps.
''')),
html.Img(
alt='List of Apps',
src='/assets/images/dds/list-of-apps.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
4. Now, select the dash app to access the app overview.
''')),
html.Img(
alt='Dash App Overview',
src='/assets/images/dds/app-overview.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
If you have successfully initialized an app, advance to
[**Part 2. Deploy Dash Apps on Dash Deployment Server**](/dash-deployment-server/deployment).
If you have encountered any issues see [**Troubleshooting**](/dash-deployment-server)
for help.
''')),
])
# # # # # # #
# Requirements
# # # # # # #
Requirements = html.Div(children=[
html.H1('Application Structure'),
dcc.Markdown(s(
'''
To deploy dash apps to the Dash Deployment Server, there
are a few files required for successful deployment. Below is a common
dash app folder structure and a brief description of each files function.
***
## Folder Reference
```
Dash_App/
|-- assets/
|-- app.css
|-- app.py
|-- .gitignore
|-- Procfile
|-- requirements.txt
|-- runtime.txt
```
***
## Files Reference
`app.py`
This is the entry point to your application, it contains your Dash app code.
This file must contain a line that defines the `server` variable:
```server = app.server```
***
`.gitignore`
Determines which files and folders are ignored in git, and therefore
ignored (i.e. not copied to the server) when you deploy your application.
An example of its contents would be:
```
venv
*.pyc
.DS_Store
.env
```
***
`Procfile`
Declares what commands are run by app's containers. This is commonly,
```web: gunicorn app:server --workers 4``` where app refers to the file
`app.py` and server refers to the variable named server inside that file.
gunicorn is the web server that will run your application, make sure to
add this in your requirements.txt file.
***
`requirements.txt`
Describes the app's python dependencies. For example,
```
dash==0.21.1
dash-auth==1.0.1
dash-renderer==0.11.3
dash-core-components==0.22.1
dash-html-components==0.9.0
```
***
`runtime.txt`
This file specifies python runtime. For example, its contents would be
`python-2.7.15` or `python-3.6.6`.
***
`assets`
An optional folder that contains CSS stylesheets, images, or
custom JavaScript files. [Learn more about assets](/external-resources).
'''))
])
# # # # # # #
# Deploy App
# # # # # # #
Deploy = html.Div(children=[
html.H1('Part 2. Deploy Dash Apps on Dash Deployment Server'),
dcc.Markdown(s(
'''
> This is the *2nd* deployment chapter of the [Dash Deployment Server Documentation](/dash-deployment-server).
> The [previous chapter](/dash-deployment-server/initialize) covered initializing a Dash App on Dash Deployment Server.
To deploy an app to your Dash Deployment Server, you can either choose
to deploy a cloned sample app, create a new app following the tutorial,
or an existing app that you created locally and are ready to deploy.
''')),
dcc.Markdown(s(
'''
***
#### Which OS Are You Using?
''')),
dcc.RadioItems(
id='platform-2',
options=[
{'label': i, 'value': i} for i in
['Windows', 'Mac', 'Linux']],
value='Windows',
labelStyle={'display': 'inline-block'}
),
html.Div(id='instructions-2'),
dcc.RadioItems(
id='deploy-method',
options=[
{'label': i, 'value': i} for i in
['HTTPS', 'SSH']],
value='HTTPS',
labelStyle={'display': 'inline-block'}
),
html.Div(id='remote-and-deploy-instructions'),
])
@app.callback(Output('instructions-2', 'children'),
[Input('platform-2', 'value')])
def display_instructions2(platform):
return [
dcc.Markdown(s(
'''
***
#### What Would You Like To Do?
If you haven't deployed an app you can get started by selecting
**Clone Sample App** to clone our sample app, which is already setup
for deployment. Alternatively, you can select **Create New App** to
run through creating and deploying an app from the beginning.
Otherwise, if you already have an exisiting app locally that you would
like to deploy, then select **Deploy Existing App**.
''')),
dcc.Tabs(id="tabs", children=[
dcc.Tab(label='Clone Sample App', children=[
html.Div([
dcc.Markdown(s(
'''
#### Clone the [Dash On Premise Sample App](https://github.com/plotly/dash-on-premise-sample-app) from GitHub.
''')),
(dcc.Markdown(s('''
First, install [Git for Windows](https://git-scm.com/download/win).
Then, in Git Bash:
''')) if platform == 'Windows' else
''),
dcc.SyntaxHighlighter(s(
'''$ git clone https://github.com/plotly/dash-on-premise-sample-app.git'''),
customStyle=styles.code_container),
dcc.Markdown(s(
'''
***
#### Modify `config.py`
Read through `config.py` and modify the values as necessary.
If Dash Deployment Server was set up with "path-based routing"
(the default), then you will just need to change the
`DASH_APP_NAME` to be equal to the name of the Dash App that you
set earlier.
''')),
dcc.Markdown(s(
'''
***
#### Configure your Dash Deployment Server to be your Git remote
In the root of your folder, run the following command to create a
remote host to your new app on Dash Deployment Server.
##### Which Deployment Method Are You Using?
For most use cases, Plotly recommends using HTTPS as it
doesn't require any extra configuration. However, if
you are using self-signed certificates or if your server
has SAML enabled, then you should deploy with SSH.
[Configure SSH Authentication](/dash-deployment-server/ssh).
''')),
])
]),
dcc.Tab(label='Create New App', children=[
html.Div([
(dcc.Markdown(s('''
First, install [Git for Windows](https://git-scm.com/download/win).
Then, in Git Bash:
''')) if platform == 'Windows' else
''),
dcc.Markdown(s(
'''
#### Create a New Folder
''')),
dcc.SyntaxHighlighter(
'''$ mkdir dash_app_example
$ cd dash_app_example''', customStyle=styles.code_container),
dcc.Markdown(s(
'''
***
#### Initialize the Folder with `git` and a `virtualenv`
''')),
dcc.SyntaxHighlighter(
('''$ git init # initializes an empty git repo
$ virtualenv venv # creates a virtualenv called "venv"
$ source venv/bin/activate # uses the virtualenv''') if platform != 'Windows' else (
'''$ git init # initializes an empty git repo
$ virtualenv venv # creates a virtualenv called "venv"
$ source venv/Scripts/activate # uses the virtualenv'''), customStyle=styles.code_container),
dcc.Markdown(s(
'''
`virtualenv` creates a fresh Python instance. You will need
to reinstall your app's dependencies with this virtualenv:
''')),
dcc.SyntaxHighlighter(
'''$ pip install dash
$ pip install dash-renderer
$ pip install dash-core-components
$ pip install dash-html-components
$ pip install plotly''', customStyle=styles.code_container),
dcc.Markdown(s(
'''
You will also need a new dependency, `gunicorn`, for
deploying the app:
''')),
dcc.SyntaxHighlighter(
'''$ pip install gunicorn''', customStyle=styles.code_container),
dcc.Markdown(s(
'''
***
#### Create Relevant Files For Deployment
Create the following files in your project folder:
**`app.py`**
`app.py` This is the entry point to your application,
it contains your Dash app code. This file must contain a
line that defines the server variable: `server = app.server`
''')),
dcc.SyntaxHighlighter(
'''import os
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
app = dash.Dash(__name__, external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css'])
server = app.server
app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"})
app.layout = html.Div([
html.H2('Hello World'),
dcc.Dropdown(
id='dropdown',
options=[{'label': i, 'value': i} for i in ['LA', 'NYC', 'MTL']],
value='LA'
),
html.Div(id='display-value')
])
@app.callback(Output('display-value', 'children'),
[Input('dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)''',
customStyle=styles.code_container, language='python'),
dcc.Markdown(s('''
***
**`.gitignore`**
`.gitignore` Determines which files and folders are
ignored in git, and therefore ignored (i.e. not copied
to the server) when you deploy your application.
''')),
dcc.SyntaxHighlighter(
'''venv
*.pyc
.DS_Store
.env''', customStyle=styles.code_container),
dcc.Markdown(s(
'''
***
**`Procfile`**
Declares what commands are run by app's containers. This is
commonly, `web: gunicorn app:server --workers 4` where app
refers to the file `app.py` and server refers to the variable
named server inside that file. gunicorn is the web server
that will run your application, make sure to add this in
your requirements.txt file.
''')),
dcc.SyntaxHighlighter(
'''web: gunicorn app:server --workers 4''', customStyle=styles.code_container),
dcc.Markdown(s(
'''
***
**`requirements.txt`**
`requirements.txt` Describes the app's python dependencies.
You can fill this file in automatically with:
''')),
dcc.SyntaxHighlighter(
'''$ pip freeze > requirements.txt''', customStyle=styles.code_container),
dcc.Markdown(s(
'''
***
**`runtime.txt`**
`runtime.txt` This file specifies python runtime.
For example, its contents would be `python-2.7.15` or
`python-3.6.6`
''')),
dcc.Markdown(s(
'''
***
#### Configure your Dash Deployment Server to be your Git remote
In the root of your folder, run the following command to create a
remote host to your new app on Dash Deployment Server.
##### Which Deployment Method Are You Using?
For most use cases, Plotly recommends using HTTPS as it
doesn't require any extra configuration. However, if
you are using self-signed certificates or if your server
has SAML enabled, then you should deploy with SSH.
[Configure SSH Authentication](/dash-deployment-server/ssh).
''')),
])
]),
dcc.Tab(label='Deploy Existing App', children=[
html.Div([
(dcc.Markdown(s('''
First, install [Git for Windows](https://git-scm.com/download/win).
Then, in Git Bash:
''')) if platform == 'Windows' else
''),
dcc.Markdown(s(
'''
#### Initialize the Folder With Git
''')),
dcc.SyntaxHighlighter(
'''$ cd <your-folder-name>
$ git init # initializes an empty git repo''', customStyle=styles.code_container),
dcc.Markdown(s(
'''
***
#### Configure your Dash Deployment Server to be your Git remote
In the root of your folder, run the following command to create a
remote host to your new app on Dash Deployment Server.
##### Which Deployment Method Are You Using?
For most use cases, Plotly recommends using HTTPS as it
doesn't require any extra configuration. However, if
you are using self-signed certificates or if your server
has SAML enabled, then you should deploy with SSH.
[Configure SSH Authentication](/dash-deployment-server/ssh).
''')),
])
]),
]),
]
@app.callback(Output('remote-and-deploy-instructions', 'children'),
[Input('deploy-method', 'value')])
def display_instructions2(method):
return [
dcc.Markdown(s('''
''')),
dcc.SyntaxHighlighter(s(
'''$ git remote add plotly dokku@your-dash-deployment-server:your-dash-app-name''' if method == 'SSH' else
'''$ git remote add plotly https://your-dash-deployment-server/GIT/your-dash-app-name'''),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s(
'''
Replace `your-dash-app-name` with the name of your Dash App that you
supplied in the Dash Deployment Server and `your-dash-deployment-server`
with the domain of the Dash Deployment Server.
For example, if your Dash App name was `my-first-dash-app`
and the domain of your organizations Dash Deployment Server was
`dash.plotly.acme-corporation.com`, then this command would be
`git remote add plotly dokku@dash.plotly.acme-corporation.com:my-first-dash-app`.
''' if method == 'SSH' else '''
Replace `your-dash-app-name` with the name of your Dash App that
you supplied in the Dash Deployment Server and `your-dash-deployment-server`
with the domain of the Dash Deployment Server.
For example, if your Dash App name was `my-first-dash-app`
and the domain of your organizations Dash Deployment Server was
`dash.plotly.acme-corporation.com`, then this command would be
`git remote add plotly https://dash.plotly.acme-corporation.com/GIT/my-first-dash-app`.
''')),
dcc.Markdown(s(
'''
***
#### Deploying Changes
Now, you are ready to upload this folder to your Dash Deployment Server.
Files are transferred to the server using `git`:
''')),
dcc.SyntaxHighlighter(s(
'''$ git status # view the changed files
$ git diff # view the actual changed lines of code
$ git add . # add all the changes
$ git commit -m 'a description of the changes'
$ git push plotly master'''), customStyle=styles.code_container, language='python'),
dcc.Markdown(s(
'''
This commands will push the code in this folder to the
Dash Deployment Server and while doing so, will install the
necessary python packages and run your application
automatically.
Whenever you make changes to your Dash code,
you will need to run those `git` commands above.
If you install any other Python packages, add those packages to
the `requirements.txt` file. Packages that are included in this
file will be installed automatically by the Dash Deployment Server.
''')),
dcc.Markdown(s(
'''
***
#### Deploy Failed?
If your depoly has been unsuccesful, you can check that you have the
[necessary files required for deployment](/dash-deployment-server/application-structure),
or if you have a specific error, take a look at
[Common Errors](/dash-deployment-server/troubleshooting).
'''))
]
# # # # # # #
# Dash App Authentication
# # # # # # #
Authentication = html.Div(children=[
html.H1('Dash App Authentication'),
dcc.Markdown(s('''
The `dash-auth` package provides login through your Plotly
Enterprise accounts. For example, the discussion below describes how
`dash-auth` works in the
[On-Premise Sample App](https://github.com/plotly/dash-on-premise-sample-app/).
***
#### Modify the `config.py` File
This file contains several settings that are used in your app.
It's kept in a separate file so that it's easy for you to
transfer from app to app.
*Read through this file and modify the variables as appropriate.*
''')),
dcc.Markdown(s('''
***
#### Redeploy Your App
Your app should now have a Dash Deployment Server login screen.
You can manage the permissions of the app in your list of files
at `https://<your-plotly-domain>/organize`.
'''))
])
# # # # # # #
# Configuring System Dependencies
# # # # # # #
ConfigSys = html.Div(children=[
html.H1('Configuring System Dependencies'),
dcc.Markdown(s('''
In some cases you may need to install and configure system
dependencies. Examples include installing and configuring
database drivers or the Java JRE environment.
Dash Deployment Server supports these actions through an
`apt-packages` file and a `predeploy` script.
We have a collection of sample apps taht install common system
level dependencies. These applications are _ready to deploy_:
- [Oracle cx_Oracle Database](https://github.com/plotly/dash-on-premise-sample-app/pull/2#issue-144246327)
- [Pyodbc Database Driver](https://github.com/plotly/dash-on-premise-sample-app/pull/3#issue-144272510)
If you need help configuring complex system level dependencies, please
reach out to our [support](/dash-deployment-server/support) team.
***
#### Install Apt Packages
In the root of your application folder create a file called
`apt-packages`. Here you may specify apt packages to be
installed with one package per line. For example to install
the ODBC driver we could include an `apt-packages` file that
looks like:
''')),
dcc.SyntaxHighlighter(s('''unixodbc
unixodbc-dev
'''), customStyle=styles.code_container, language="text"),
dcc.Markdown(s('''
***
#### Configure System Dependencies
You may include a pre-deploy script that executes in
your Dash App's environment. For the case of adding an
ODBC driver we need to add ODBC initialization files into
the correct systems paths. To do so we include the ODBC
initialization files in the application folder and then
copy them into system paths in the pre-deploy script.
##### Add A Pre-Deploy Script
Let's generate a file to do this. Note that the file can
have any name as we must specify the name in an application
configuration file `app.json`.
For the purposes of this example we assume we have
named it `setup_pyodbc` and installed it in the root of our
application folder.
''')),
dcc.SyntaxHighlighter(s('''cp /app/odbc.ini /etc/odbc.ini
cp /app/odbcinst.ini /etc/odbcinst.ini
'''), customStyle=styles.code_container, language="text"),
dcc.Markdown(s('''
##### Run Pre-Deploy Script Using `app.json`
Next we must instruct Dash Deployment Server to run our `setup_pyodbc`
file by adding a JSON configuration file named `app.json`
into the root of our application folder.
''')),
dcc.SyntaxHighlighter(s('''{
\t"scripts": {
\t\t"dokku": {
\t\t\t"predeploy": "/app/setup_pyodbc"
\t\t}
\t}
}
'''), customStyle=styles.code_container, language='json'),
dcc.Markdown(s('''
***
Now when the application is deployed it will install the apt
packages specified in `apt-packages` and run the setup file
specified in `app.json`. In this case it allows us to install
and then configure the ODBC driver.
To see this example code in action
[check out our ODBC example](https://github.com/plotly/dash-on-premise-sample-app/pull/3#issue-144272510)
On-Premise application.
'''))
])
# # # # # # #
# Redis
# # # # # # #
Redis = html.Div(children=[
html.H1('Create and Link Redis Database'),
dcc.Markdown(s('''
Redis is a powerful in memory database that is well suited for many Dash
applications. In particular, you can use Redis to:
- Save application data
- Enable queued and background processes with Celery.
[Redis and Celery Demo App](https://github.com/plotly/dash-redis-demo)
- Cache data from your callbacks across processes.
[Caching in Dash with Redis](/performance)
While Redis is an _in memory database_, Dash Deployment Server regularly
backs up its data to the underlying server. So, it's safe for production
usage. Dash Deployment Server can dynamically spin up and manage secure
instances of Redis for your application.
''')),
dcc.Markdown(s('''
***
#### Enable Redis Databases
In Plotly Enterprise 2.5.0, Redis Databases are always enabled.
For previous versions, navigate to Plotly On-Premise Server Settings
(`https://<your.plotly.domain>:8800/settings`), then under **Special Options
& Customizations** select **Enable Dash Customizations** and **Enable Redis
Databases** for Dash Apps.
''')),
html.Img(
alt='Enable Redis Databases',
src='/assets/images/dds/enable-redis.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
***
#### Create and Link (via UI)
You can create one redis instance that is used by multiple apps or you
can create a unique Redis Database for each individual app.
To start, we recommending creating a unique Redis Database for each
Dash App. It will be easier for you to ensure that one application doesn't
override the data from a separate application.
In Plotly Enterprise 2.5.0 it is possible to create and link a Redis
Database to your Dash App using the Dash Deployment Server UI.
Here, you have two options:
**1.** Create a database before initializing an app.
**2.** Create and link a database after an app has been initialized.
##### Create a Database Before Initializing an App
If you haven't initialized an app yet, select **Databases** situated in the
top navigation menu. Next, select **Create Database**, then in the
'Create Database' modal, add the name of your database. We recommend using
a convention like using the name of your application and adding `-redis`
to the end, e.g. `my-dash-app-redis`.
Once your Redis Database has been created, you'll notice that it is
added to your list of databases.
''')),
html.Img(
alt='Create Database',
src='/assets/images/dds/create-redis-db.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
Next, navigate to **Apps** and create a new app (for more info see
['Part 1. Initialize Dash Apps on Dash Deployment Server'](/dash-deployment-server/initialize)),
in the 'Create App' modal you have the option of linking a database.
Here, use the dropdown to select the database that you created previously
(see image below).
''')),
html.Img(
alt='Link Database',
src='/assets/images/dds/link-redis-db.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
##### Create and Link a Database After an App Has Been Initialized.
In the Dash Deployment Server UI, select the app then navigate
to the settings page. In Databases, use the dropdown to select
**create and link database** then **Add**.
''')),
html.Img(
alt='Create and Link Database in App',
src='/assets/images/dds/create-and-link-redis-db.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
***
#### Create and Link (via Command Line)
While it is now possible to create and link Redis Databases via the
Dash Deployment Server UI, it is still possible to create and link a Redis
database via the command line (using ssh):
''')),
dcc.SyntaxHighlighter(s(
"""$ ssh dokku@YOUR_DASH_SERVER redis:create SERVICE-NAME
$ ssh dokku@YOUR_DASH_SERVER redis:link SERVICE-NAME APP-NAME"""),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
In the commands above, replace:
* `YOUR_DASH_SERVER` with the name of your Dash server
(same as when you run `git remote add`)
* `SERVICE-NAME` with the name you want for your Redis service
* `APP-NAME` with the name of your app (as specified in the
Dash App Manager).
''')),
dcc.Markdown(s('''
***
#### Referencing Redis in Your Code
You can reference your Redis Database with the `os.environ` module:
''')),
dcc.SyntaxHighlighter(s(
"""redis_instance = redis.StrictRedis.from_url(os.environ["REDIS_URL"])"""),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
***
#### Running Redis on Your Local Machine
To get started, see the [Redis documentation](https://redis.io/documentation)
to download Redis and set up a local instance.
By referencing Redis in our code, we'll need to add the variable to our
local environment as well. One easy way to do this is to define the
variable on-the-fly when you run `python app.py`.
''')),
dcc.SyntaxHighlighter(s("$ REDIS_URL=redis://<your-redis-instance-ip>:6379 python app.py"),
customStyle=styles.code_container,
language='python'),
dcc.Markdown(s('''
##### Windows Users
Installing Redis from source on windows can be tricky. If you have the
"Windows Subsystem for Linux", we recommend using that and installing
the Redis in that linux environment. Otherwise, we recommend installing
these [64-bit binary releases of Redis](https://github.com/ServiceStack/redis-windows#option-3-running-microsofts-native-port-of-redis).
'''))
])
# # # # # # #
# Linking a Celery Process
# # # # # # #
Celery = html.Div(children=[
html.H1('Linking a Celery Process'),
dcc.Markdown(s(
'''
Celery is a reliable asynchronous task queue/job queue that supports both
real-time processing and task scheduling in production systems. This makes
Celery well suited for Dash Applications. For example:
- Enable queued and background processes with Celery.
[Redis and Celery Demo App](https://github.com/dash-redis-demo)
- Periodically update an App's data.
[Redis and Celery Periodic Updates Demo App](https://github.com/plotly/dash-redis-celery-periodic-updates)
For more information about Celery, visit
[Celery's documentation](http://docs.celeryproject.org/en/latest/).
''')),
])
# # # # # # #
# Env Vars
# # # # # # #
EnvVars = html.Div(children=[
html.H1('Setting Environment Variables'),
dcc.Markdown(s('''
In Plotly Enterprise 2.5.0, you can store secrets as environment variables
instead of in your application. It's good practice to keep application
secrets like database passwords outside of your code so that they aren't
mistakenly exposed or shared. Instead of storing these secrets in code,
you can store them as environment variables and your Dash Application code
can reference them dynamically.
''')),
dcc.Markdown(s('''
***
#### Add Environment Variables
To add environment variables via the Dash Deployment Server UI,
navigate to the application settings. Here, use the text boxes to
add the environmental variable name and value. For example, `"DATABASE_USER"`
and `"DATABASE_PASSWORD"`.
''')),
html.Img(
alt='Add Environment Variables',
src='/assets/images/dds/add-env-variable.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
***
#### Referencing Environment Variables in Your Code
You can reference these variables with the `os.environ` module:
''')),
dcc.SyntaxHighlighter(s(
"""database_password = os.environ['DATABASE_PASSWORD']"""),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
Alternatively, if the variable isn't in your environment and you want
to fallback to some other value, use:
''')),
dcc.SyntaxHighlighter(s(
"""database_password = os.environ.get('DATABASE_PASSWORD', '<PASSWORD>')"""),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
***
#### Defining Environment Variables In Your Local Environment
By referencing these environment variables in our code, we'll need to add
these variables to our local environment as well. One easy way to do
this is to define the variables on-the-fly when you run `python app.py`.
That is, instead of running `python app.py`, run:
```
$ DATABASE_USER=chris DATABASE_PASSWORD=<PASSWORD> python app.py
```
Alternatively, you can define them for your session by "exporting" them:
''')),
dcc.SyntaxHighlighter(s("""$ export DATABASE_USER=chris
$ export DATABASE_PASSWORD=<PASSWORD>
$ python app.py"""),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
***
#### Delete Environment Variables
To remove an environment variable via the Dash Deployment Server UI,
navigate to the application settings. Here, simply click the red
cross situated to the right-hand side of the environment variable.
''')),
html.Img(
alt='Delete Environment Variables',
src='/assets/images/dds/remove-env-variable.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
])
# # # # # # #
# Local Directories
# # # # # # #
LocalDir = html.Div(children=[
html.H1('Mapping Local Directories Examples and Reference'),
dcc.Markdown(s('''
In Dash Deployment Server, Dash Apps are run in isolated containers.
Dash Deployment Server builds the entire system for each individual app
from scratch, including installing a fresh instance of Python, installing
dependencies, and more. This isolation and containerization is great: it
allows for one app's dependencies to not impact the next app's and,
from a security perspective, ensures that applications can't modify or
access the underlying server. One part of this isolation is that each app
has its own "ephemeral" filesystem. This means that:
- By default, files that are saved in the app's environment aren't
persisted across deploys.
- By default, files (even networked file systems) that are on the actual
physical server aren't actually accessible to the application.
Starting in Plotly Enterprise 2.5.0, you can map filesystems from the
underlying server into the application. This allows you to save files
persistently as well as read files from the underlying server, including
networked file systems.
Since this feature has security implications, only users with
admin/superuser privileges are allowed to map directories onto apps.
Before you get started, ask your current administrator to grant you
admin/superuser privileges as shown below.
***
#### Add Admin/Superuser Privileges
As administrator, navigate to the admin panel
`https://<your.plotly.domain>/admin/` and select **Users**. From the list
of users, select the user you wish to edit. Next, check both the
**Staff status** and **Superuser status** box to give the user
admin/superuser privileges, which will allow the user to map
directories onto apps.
''')),
html.Img(
alt='Add Admin/Superuser Status',
src='/assets/images/dds/add-superuser.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
***
#### Add Directory Mapping
To add a directory mapping via the Dash Deployment Server UI,
navigate to the application **Settings** and scroll down to
**Directory Mappings**. Here, use the text boxes to
add the **Host Path** and **App Path**. For example, `/srv/app-data`
and `/data`.
''')),
html.Img(
alt='Add Directory Mapping',
src='/assets/images/dds/add-dir-map.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
***
#### Referencing the File System in Your Code
If you have mapped the directory from `/srv` to `/srv/app-data`, then you
can read files from this folder in you application with the following code:
''')),
dcc.SyntaxHighlighter(s("""import os
file_pathname = os.path.join('data', 'some-file.csv')"""),
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
In some cases, the filesystems that you reference in your deployed
application may be different from those that you reference locally.
In your application code, you can check which environment you are in
with the following code:
''')),
dcc.SyntaxHighlighter(
"""if 'DASH_APP' in os.environ:
# this is a deployed app
filepath = os.path.join('data', 'my-dataset.csv')
else:
# local file path
filepath = os.path.join('Users', 'chris', 'data', 'my-dataset.csv')""",
customStyle=styles.code_container,
language='python'
),
dcc.Markdown(s('''
***
#### Recommendations
If you are mounting a filesystem, we have the following recommendations:
- Try to isolate the data that you need into its own, app-specific folder
- Do not mount the entire filesystem
- Do not mount system directories, like those under `/usr`.
- As per the
["Filesystem Hierarchy Standard (FHS)"](https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard),
folders inside the `/srv` folder would be a good, conventional place
to host app level data.
- This feature also works with networked filesystems. Note that this
requires some extra configuration in the underlying server by your
server administrator. In particular, the network filesystem should be
added to the `/etc/fstab` file on the underlying server. For more
information, see this
[RHEL7 and CentOS documentation on CIFS and NFS](https://www.certdepot.net/rhel7-mount-unmount-cifs-nfs-network-file-systems/)
, the official [Ubuntu NFS documentation](https://help.ubuntu.com/lts/serverguide/network-file-system.html.en),
the official [Ubuntu CIFS documentation](https://wiki.ubuntu.com/MountWindowsSharesPermanently)
or [contact our support team](/dash-deployment-server/support).
***
#### Remove Directory Mapping
To remove directory mappings via the Dash Deployment Server UI,
navigate to the application **Settings** and scroll down to
**Directory Mappings**. Next, use the red cross situated to the
right-hand side of the environment variable.
''')),
html.Img(
alt='Remove Directory Mapping',
src='/assets/images/dds/remove-dir-map.PNG',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
])
# # # # # # #
# Staging App
# # # # # # #
StagingApp = html.Div(children=[
html.H1('Create a Staging Dash App'),
dcc.Markdown(s(
'''
Once you have deployed your application, your end-users will expect that
it is stable and ready for consumption. So, what do you do if you want to
test out or share some changes on the server? We recommend creating
separate applications: one for "production" consumption and another one
for testing. You will share the URL of the "production" app to your end
users and you will use your "testing" app to try out different changes
before you send them to your production app. With Dash Deployment
Server, creating a separate testing app is easy:
***
### Initialize a New Dash App
[Initialize a new app](/dash-deployment-server/initialize) in the Dash
Deployment Server UI. We recommend giving it the same name as your
other app but appending `-stage` to it (e.g. `analytics-stage`).
***
### Configure a New Git Remote
Add a new remote that points to this URL. In this example,
we'll name the remote "stage":
''')),
dcc.SyntaxHighlighter(s(
'''$ git add remote stage https://your-dash-deployment-server/GIT/your-dash-app-name-stage'''),
customStyle=styles.code_container, language='python'),
dcc.Markdown(s(
'''
***
### Deploy Changes to Your Staging App
Now, you can deploy your changes to this app just like you would
with your other app. Instead of `$ git push plotly master`, you'll deploy
to your staging app with:
''')),
dcc.SyntaxHighlighter(s(
'''$ git push stage master'''),
customStyle=styles.code_container, language='python'),
])
# # # # # # #
# Common Errors
# # # # # # #
Troubleshooting = html.Div(children=[
html.H1('Common Errors'),
dcc.Markdown(s(
'''
This section describes some of the common errors you may encounter when
trying to deploy to the Dash Deployment Server, and provides information
about how to resolve these errors. If you can't find the information
you're looking for, or need help, [contact our support team](/dash-deployment-server/support).
***
''')),
dcc.Markdown(s(
'''
#### Deploying with Self-Signed Certificates?
''')),
dcc.SyntaxHighlighter(s(
'''fatal: unable to access 'https://<your-dash-deployment-server>/GIT/your-dash-app-name/': SSL certificate problem: self signed certificate'''),
customStyle=styles.code_container, language='python'),
dcc.Markdown(s(
'''
We recommend deploying with HTTPS for most of our users.
However, if your Dash Deployment Server is using a **self-signed
certificate**, deploying with HTTPS
[requires some extra, challenging configuration](https://stackoverflow.com/questions/11621768/).
In these cases, it will be easier to set up deploying with SSH.
***
#### Deployment Failing?
''')),
html.Details([
html.Summary("Could not find a version that satisfies the requirement"),
dcc.SyntaxHighlighter(
'''...
remote: -----> Cleaning up...
remote: -----> Building my-dash-app from herokuish...
remote: -----> Injecting apt repositories and packages ...
remote: -----> Adding BUILD_ENV to build environment...
remote: -----> Python app detected
remote: ! The latest version of Python 2 is python-2.7.15 (you are using python-2.7.13, which is unsupported).
remote: ! We recommend upgrading by specifying the latest version (python-2.7.15).
remote: Learn More: https://devcenter.heroku.com/articles/python-runtimes
remote: -----> Installing python-2.7.13
remote: -----> Installing pip
remote: -----> Installing requirements with pip
remote: Collecting dash==0.29.1 (from -r /tmp/build/requirements.txt (line 1))
remote: Could not find a version that satisfies the requirement dash==0.29.1 (from -r /tmp/build/requirements.txt (line 1)) (from versions: 0.17.4, 0.17.5, 0.17.7, 0.17.8rc1, 0.17.8rc2, 0.17.8rc3, 0.18.0, 0.18.1, 0.18.2, 0.18.3rc1, 0.18.3, 0.19.0, 0.20.0, 0.21.0, 0.21.1, 0.22.0rc1, 0.22.0rc2, 0.22.0, 0.23.1, 0.24.0, 0.24.1rc1, 0.24.1, 0.24.2, 0.25.0)
remote: No matching distribution found for dash==0.29.1 (from -r /tmp/build/requirements.txt (line 1))''',
customStyle=styles.code_container, language='python'),
dcc.Markdown(s(
'''
If you're seeing the error above, it is likely that there is an error in
your `requirements.txt` file. To resolve, check the versioning in your
`requirements.txt` file. For example, the above failed because
`dash==29.1` isn't a version of dash. If you're working in a virtualenv then
you can check your versioning with the command:
''')),
dcc.SyntaxHighlighter('$ pip list', customStyle=styles.code_container, language='python'),
dcc.Markdown(s(
'''
if it is differs to your `requirements.txt`, you can update it with the command:
''')),
dcc.SyntaxHighlighter('$ pip freeze > requirements.txt', customStyle=styles.code_container, language='python'),
dcc.Markdown(s(
'''
For more information see [Application Structure](/dash-deployment-server/application-structure).
'''))
]),
html.Details([
html.Summary("Failed to find application object 'server' in 'app"),
dcc.SyntaxHighlighter(
'''...
remote: Failed to find application object 'server' in 'app'
remote: [2018-08-16 16:00:49 +0000] [181] [INFO] Worker exiting (pid: 181)
remote: [2018-08-16 16:00:49 +0000] [12] [INFO] Shutting down: Master
remote: [2018-08-16 16:00:49 +0000] [12] [INFO] Reason: App failed to load.
remote: [2018-08-16 16:00:51 +0000] [12] [INFO] Starting gunicorn 19.9.0
remote: [2018-08-16 16:00:51 +0000] [12] [INFO] Listening at: http://0.0.0.0:5000 (12)
remote: [2018-08-16 16:00:51 +0000] [12] [INFO] Using worker: sync
remote: [2018-08-16 16:00:51 +0000] [179] [INFO] Booting worker with pid: 179
remote: [2018-08-16 16:00:51 +0000] [180] [INFO] Booting worker with pid: 180''',
customStyle=styles.code_container, language='python'),
dcc.Markdown(s(
'''
Deployment fails with the above message when you have failed to declare
`server` in your `app.py` file. Check your `app.py` file and confirm that
you have `server = app.server`.
For more information see
[Application Structure](/dash-deployment-server/application-structure).
'''))
]),
html.Details([
html.Summary("Got permission denied while trying to connect to the Docker daemon socket"),
dcc.SyntaxHighlighter(s(
'''$ Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock: Get http://%2Fvar%2Frun%2Fdocker.sock/v1.38/containers/json?all=1&filters=%7B%22label%22%3A%7B%22dokku%22%3Atrue%7D%2C%22status%22%3A%7B%22exited%22%3Atrue%7D%7D: dial unix /var/run/docker.sock: connect: permission denied'''),
customStyle=styles.code_container, language='python'),
dcc.Markdown(s(
'''
If you're receiving the above user permission error, please
[contact support](/dash-deployment-server/support)
'''))
]),
])
# # # # # # #
# Analytics
# # # # # # #
Analytics = html.Div(children=[
html.H1('Dash App Analytics'),
dcc.Markdown(s('''
#### Dash App Analytics
After you have successfully deployed a Dash App to the Dash Deployment
Server, you can monitor app performance via the app analytics and logs.
Here, navigate to the Dash Deployment Server UI and select the app to
display analytics.
''')),
html.Img(
alt='App Analytics',
src='/assets/images/dds/analytics.png',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
])
# # # # # # #
# Logs
# # # # # # #
Logs = html.Div(children=[
html.H1('Dash App Logs'),
dcc.Markdown(s('''
***
#### Dash App Logs (via UI)
If you have successfully deployed a Dash App to the Dash Deployment
Server, you can view the app's logs via the Dash Deployment Server UI.
From your list of apps, open the app and then select **Logs**.
''')),
html.Img(
alt='App Logs',
src='/assets/images/dds/logs.png',
style={
'width': '100%', 'border': 'thin lightgrey solid',
'border-radius': '4px'
}
),
dcc.Markdown(s('''
***
#### Dash App Logs (via Command Line)
Alternatively, the above can be accomplished via the command line.
To view the logs for a specific Dash App run the following command
in your terminal:
''')),
dcc.SyntaxHighlighter(s(
'''$ ssh dokku@<your-dash-domain> logs <your-app-name> --num -1'''),
customStyle=styles.code_container, language='python'),
dcc.Markdown(s('''
This will work for any application that you own. This command
authenticates with the server with ssh.
[Configure SSH Authentication](/dash-deployment-server/ssh).
**Options**
- `--num`, `-n`: The number of lines to display. By default, 100
lines are displayed.
Set to -1 to display _all_ of the logs. Note that we only store logs
from the latest app deploy.
- `--tail`, `-t`: Continuously stream the logs.
- `--quiet`, `-q`: Display the raw logs without colors, times, and names.
''')),
])
# # # # # # #
# Support
# # # # # # #
Support = html.Div(children=[
html.H1('Plotly Enterprise Support'),
dcc.Markdown(s('''
***
#### Need to Contact Support?
If you encounter any issues deploying your app you can email
`<EMAIL>`. It is helpful to include any error
messages you encounter as well as available logs. See [App Logs](/dash-deployment-server/logs) on how
to obtain Dash App logs. Additionally, see below for the Plotly Enterprise support
bundle.
''')),
dcc.Markdown(s('''
***
#### Enterprise Support Bundle
If you're requested to send the full support bundle you can
download this from your Plotly Enterprise Server Manager
(e.g. `https://<your.plotly.domain>:8800`). Please note you
will need admin permissions to access the Server Manager.
Navigate to the Server Manager and then select the Support tab.
There you will see the option to download the support bundle.
'''))
])
| StarcoderdataPython |
3368370 | <filename>day_14_a.py
def insert_masked_data(memory: dict, bitmask: list, t: int, b: list):
value = None
if t in memory:
value = memory[t]
else:
value = [0] * 36
for i, m, d in zip(range(36), bitmask, b):
if m == "X":
value[i] = d
else:
value[i] = m
#print(str(t) + " -> " + "".join(value))
memory[t] = value
def read_docking_data(s: str):
d = [l.strip() for l in s.strip("\n").splitlines()]
bitmask = None
memory = {}
for l in d:
s, e = l.split(" = ")
if s == "mask":
bitmask = list(e)
else:
t = int("".join(list(s)[4:-1]))
b = list(bin(int(e))[2:].zfill(36))
insert_masked_data(memory, bitmask, t, b)
s = 0
for i in memory:
s += int("".join(memory[i]), 2)
return s
def run_tests():
test_input = """mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0"""
test_output = 165
assert read_docking_data(test_input) == test_output
def run() -> int:
with open("inputs/input_14.txt") as file:
data = file.read()
return read_docking_data(data)
if __name__ == "__main__":
run_tests()
print(run())
| StarcoderdataPython |
1989376 | import json
import urllib2
import requests
url = 'http://192.168.3.11:8080/post'
def main():
f = open("test_vector.json", "r")
test_vec = json.load(f)
f.close()
for d in test_vec:
print "\nRequest:\n %s"%d
data = json.dumps(d)
clen = len(data)
req = urllib2.Request(url, data, {'Content-Type': 'application/json', 'Content-Length': clen})
f = urllib2.urlopen(req)
response = f.read()
print "\nResponse:\n %s"%response
f.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
245144 | import typing
def py_sorted(numbers: typing.List[int]) -> typing.List[int]:
"""The Python sorted algorithm.
Args:
numbers (typing.List[int]): A list of integers.
Returns:
typing.List[int]: A list of integers ordered from smallest value
to largest value.
"""
return sorted(numbers)
def selection_sort(numbers: typing.List[int]) -> typing.List[int]:
"""The Selection Sort algorithm.
Args:
numbers (typing.List[int]): A list of integers.
Returns:
typing.List[int]: A list of integers ordered from smallest value
to largest value.
"""
# For every list element:
for i in range(0, len(numbers), 1):
# Assume the list element at index i has the smallest value.
minimum_index = i
# For every other element in the unsorted list:
for j in range(i + 1, len(numbers), 1):
# If the list element at index j is smaller than what is
# currently the minimum, then set j as the new minimum.
if numbers[j] < numbers[minimum_index]:
minimum_index = j
# Finally, swap whatever is at index i with the smallest value.
numbers[i], numbers[minimum_index] = (
numbers[minimum_index],
numbers[i],
)
return numbers
def insertion_sort(numbers: typing.List[int]) -> typing.List[int]:
"""The Insertion Sort algorithm.
Args:
numbers (typing.List[int]): A list of integers.
Returns:
typing.List[int]: A list of integers ordered from smallest value
to largest value.
"""
# For every list element:
for i in range(0, len(numbers), 1):
key = numbers[i]
j = i - 1
# Move numbers that have a greater value than key to one index
# ahead.
while j >= 0 and key < numbers[j]:
numbers[j + 1] = numbers[j]
j -= 1
numbers[j + 1] = key
return numbers
# Dictionary of valid algorithms. Should be more secure than using
# eval().
algorithms_dict = {
"insertion_sort": insertion_sort,
"py_sorted": py_sorted,
"selection_sort": selection_sort,
}
| StarcoderdataPython |
1943790 | <reponame>NursultanBeken/leetcode_practice
"""
Sliding window technique
"""
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
char_set = set()
result = 0
left = 0
for i in range(len(s)):
# remove duplicate char while it is in the set
while s[i] in char_set:
char_set.remove(s[left])
left = left+1
char_set.add(s[i])
result = max(result, i - left +1)
return result | StarcoderdataPython |
6574634 | """Shared functions for fixes."""
import logging
import os
from functools import lru_cache
import dask.array as da
import iris
import pandas as pd
from cf_units import Unit
from scipy.interpolate import interp1d
from esmvalcore.iris_helpers import var_name_constraint
logger = logging.getLogger(__name__)
def add_aux_coords_from_cubes(cube, cubes, coord_dict):
"""Add auxiliary coordinate to cube from another cube in list of cubes.
Parameters
----------
cube : iris.cube.Cube
Input cube to which the auxiliary coordinates will be added.
cubes : iris.cube.CubeList
List of cubes which contains the desired coordinates as single cubes.
coord_dict : dict
Dictionary of the form ``coord_name: coord_dims``, where ``coord_name``
is the ``var_name`` (:obj:`str`) of the desired coordinates and
``coord_dims`` a :obj:`tuple` of :obj:`int` describing the coordinate
dimensions in ``cube``.
Raises
------
ValueError
``cubes`` do not contain a desired coordinate or multiple copies of
it.
"""
for (coord_name, coord_dims) in coord_dict.items():
coord_cube = cubes.extract(var_name_constraint(coord_name))
if len(coord_cube) != 1:
raise ValueError(
f"Expected exactly one coordinate cube '{coord_name}' in "
f"list of cubes {cubes}, got {len(coord_cube):d}")
coord_cube = coord_cube[0]
aux_coord = cube_to_aux_coord(coord_cube)
cube.add_aux_coord(aux_coord, coord_dims)
cubes.remove(coord_cube)
def add_plev_from_altitude(cube):
"""Add pressure level coordinate from altitude coordinate.
Parameters
----------
cube : iris.cube.Cube
Input cube.
Raises
------
ValueError
``cube`` does not contain coordinate ``altitude``.
"""
if cube.coords('altitude'):
height_coord = cube.coord('altitude')
if height_coord.units != 'm':
height_coord.convert_units('m')
altitude_to_pressure = get_altitude_to_pressure_func()
pressure_points = altitude_to_pressure(height_coord.core_points())
if height_coord.core_bounds() is None:
pressure_bounds = None
else:
pressure_bounds = altitude_to_pressure(height_coord.core_bounds())
pressure_coord = iris.coords.AuxCoord(pressure_points,
bounds=pressure_bounds,
standard_name='air_pressure',
units='Pa')
cube.add_aux_coord(pressure_coord, cube.coord_dims(height_coord))
return
raise ValueError(
"Cannot add 'air_pressure' coordinate, 'altitude' coordinate not "
"available")
def add_altitude_from_plev(cube):
"""Add altitude coordinate from pressure level coordinate.
Parameters
----------
cube : iris.cube.Cube
Input cube.
Raises
------
ValueError
``cube`` does not contain coordinate ``air_pressure``.
"""
if cube.coords('air_pressure'):
plev_coord = cube.coord('air_pressure')
if plev_coord.units != 'Pa':
plev_coord.convert_units('Pa')
pressure_to_altitude = get_pressure_to_altitude_func()
altitude_points = pressure_to_altitude(plev_coord.core_points())
if plev_coord.core_bounds() is None:
altitude_bounds = None
else:
altitude_bounds = pressure_to_altitude(plev_coord.core_bounds())
altitude_coord = iris.coords.AuxCoord(altitude_points,
bounds=altitude_bounds,
standard_name='altitude',
units='m')
cube.add_aux_coord(altitude_coord, cube.coord_dims(plev_coord))
return
raise ValueError(
"Cannot add 'altitude' coordinate, 'air_pressure' coordinate not "
"available")
def add_scalar_depth_coord(cube, depth=0.0):
"""Add scalar coordinate 'depth' with value of `depth`m."""
logger.debug("Adding depth coordinate (%sm)", depth)
depth_coord = iris.coords.AuxCoord(depth,
var_name='depth',
standard_name='depth',
long_name='depth',
units=Unit('m'),
attributes={'positive': 'down'})
try:
cube.coord('depth')
except iris.exceptions.CoordinateNotFoundError:
cube.add_aux_coord(depth_coord, ())
return cube
def add_scalar_height_coord(cube, height=2.0):
"""Add scalar coordinate 'height' with value of `height`m."""
logger.debug("Adding height coordinate (%sm)", height)
height_coord = iris.coords.AuxCoord(height,
var_name='height',
standard_name='height',
long_name='height',
units=Unit('m'),
attributes={'positive': 'up'})
try:
cube.coord('height')
except iris.exceptions.CoordinateNotFoundError:
cube.add_aux_coord(height_coord, ())
return cube
def add_scalar_typeland_coord(cube, value='default'):
"""Add scalar coordinate 'typeland' with value of `value`."""
logger.debug("Adding typeland coordinate (%s)", value)
typeland_coord = iris.coords.AuxCoord(value,
var_name='type',
standard_name='area_type',
long_name='Land area type',
units=Unit('no unit'))
try:
cube.coord('area_type')
except iris.exceptions.CoordinateNotFoundError:
cube.add_aux_coord(typeland_coord, ())
return cube
def add_scalar_typesea_coord(cube, value='default'):
"""Add scalar coordinate 'typesea' with value of `value`."""
logger.debug("Adding typesea coordinate (%s)", value)
typesea_coord = iris.coords.AuxCoord(value,
var_name='type',
standard_name='area_type',
long_name='Ocean area type',
units=Unit('no unit'))
try:
cube.coord('area_type')
except iris.exceptions.CoordinateNotFoundError:
cube.add_aux_coord(typesea_coord, ())
return cube
def add_scalar_typesi_coord(cube, value='sea_ice'):
"""Add scalar coordinate 'typesi' with value of `value`."""
logger.debug("Adding typesi coordinate (%s)", value)
typesi_coord = iris.coords.AuxCoord(value,
var_name='type',
standard_name='area_type',
long_name='Sea Ice area type',
units=Unit('no unit'))
try:
cube.coord('area_type')
except iris.exceptions.CoordinateNotFoundError:
cube.add_aux_coord(typesi_coord, ())
return cube
def cube_to_aux_coord(cube):
"""Convert cube to iris AuxCoord."""
return iris.coords.AuxCoord(
points=cube.core_data(),
var_name=cube.var_name,
standard_name=cube.standard_name,
long_name=cube.long_name,
units=cube.units,
)
@lru_cache(maxsize=None)
def get_altitude_to_pressure_func():
"""Get function converting altitude [m] to air pressure [Pa].
Returns
-------
callable
Function that converts altitude to air pressure.
"""
base_dir = os.path.dirname(os.path.abspath(__file__))
source_file = os.path.join(base_dir, 'us_standard_atmosphere.csv')
data_frame = pd.read_csv(source_file, comment='#')
func = interp1d(data_frame['Altitude [m]'],
data_frame['Pressure [Pa]'],
kind='cubic',
fill_value='extrapolate')
return func
def get_bounds_cube(cubes, coord_var_name):
"""Find bound cube for a given variable in a :class:`iris.cube.CubeList`.
Parameters
----------
cubes : iris.cube.CubeList
List of cubes containing the coordinate bounds for the desired
coordinate as single cube.
coord_var_name : str
``var_name`` of the desired coordinate (without suffix ``_bnds`` or
``_bounds``).
Returns
-------
iris.cube.Cube
Bounds cube.
Raises
------
ValueError
``cubes`` do not contain the desired coordinate bounds or multiple
copies of them.
"""
for bounds in ('bnds', 'bounds'):
bound_var = f'{coord_var_name}_{bounds}'
cube = cubes.extract(var_name_constraint(bound_var))
if len(cube) == 1:
return cube[0]
if len(cube) > 1:
raise ValueError(
f"Multiple cubes with var_name '{bound_var}' found")
raise ValueError(
f"No bounds for coordinate variable '{coord_var_name}' available in "
f"cubes\n{cubes}")
@lru_cache(maxsize=None)
def get_pressure_to_altitude_func():
"""Get function converting air pressure [Pa] to altitude [m].
Returns
-------
callable
Function that converts air pressure to altitude.
"""
base_dir = os.path.dirname(os.path.abspath(__file__))
source_file = os.path.join(base_dir, 'us_standard_atmosphere.csv')
data_frame = pd.read_csv(source_file, comment='#')
func = interp1d(data_frame['Pressure [Pa]'],
data_frame['Altitude [m]'],
kind='cubic',
fill_value='extrapolate')
return func
def fix_bounds(cube, cubes, coord_var_names):
"""Fix bounds for cube that could not be read correctly by :mod:`iris`.
Parameters
----------
cube : iris.cube.Cube
Input cube whose coordinate bounds will be fixed.
cubes : iris.cube.CubeList
List of cubes which contains the desired coordinate bounds as single
cubes.
coord_var_names : list of str
``var_name``s of the desired coordinates (without suffix ``_bnds`` or
``_bounds``).
Raises
------
ValueError
``cubes`` do not contain a desired coordinate bounds or multiple copies
of them.
"""
for coord_var_name in coord_var_names:
coord = cube.coord(var_name=coord_var_name)
if coord.bounds is not None:
continue
bounds_cube = get_bounds_cube(cubes, coord_var_name)
cube.coord(var_name=coord_var_name).bounds = bounds_cube.core_data()
logger.debug("Fixed bounds of coordinate '%s'", coord_var_name)
def round_coordinates(cubes, decimals=5, coord_names=None):
"""Round all dimensional coordinates of all cubes in place.
Cubes can be a list of Iris cubes, or an Iris `CubeList`.
Cubes are modified *in place*. The return value is simply for
convenience.
Parameters
----------
cubes : iris.cube.CubeList or list of iris.cube.Cube
Cubes which are modified in place.
decimals : int
Number of decimals to round to.
coord_names : list of str or None
If ``None`` (or a falsey value), all dimensional coordinates will be
rounded. Otherwise, only coordinates given by the names in
``coord_names`` are rounded.
Returns
-------
iris.cube.CubeList or list of iris.cube.Cube
The modified input ``cubes``.
"""
for cube in cubes:
if not coord_names:
coords = cube.coords(dim_coords=True)
else:
coords = [cube.coord(c) for c in coord_names if cube.coords(c)]
for coord in coords:
coord.points = da.round(da.asarray(coord.core_points()), decimals)
if coord.bounds is not None:
coord.bounds = da.round(da.asarray(coord.core_bounds()),
decimals)
return cubes
def fix_ocean_depth_coord(cube):
"""Fix attributes of ocean vertical level coordinate.
Parameters
----------
cube : iris.cube.Cube
Input cube.
"""
depth_coord = cube.coord(axis='Z')
depth_coord.standard_name = 'depth'
depth_coord.var_name = 'lev'
depth_coord.units = 'm'
depth_coord.long_name = 'ocean depth coordinate'
depth_coord.attributes = {'positive': 'down'}
| StarcoderdataPython |
4968566 | <filename>src/toil/utils/toilStatus.py
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for reporting on job status."""
import logging
import os
import sys
from functools import reduce
from typing import Any, Dict, List, Optional, Set
from toil.common import Config, Toil, parser_with_common_options
from toil.job import JobDescription, JobException, ServiceJobDescription
from toil.jobStores.abstractJobStore import (NoSuchFileException,
NoSuchJobStoreException)
from toil.statsAndLogging import StatsAndLogging, set_logging_from_options
logger = logging.getLogger(__name__)
class ToilStatus:
"""Tool for reporting on job status."""
def __init__(self, jobStoreName: str, specifiedJobs: Optional[List[str]] = None):
self.jobStoreName = jobStoreName
self.jobStore = Toil.resumeJobStore(jobStoreName)
if specifiedJobs is None:
rootJob = self.fetchRootJob()
logger.info('Traversing the job graph gathering jobs. This may take a couple of minutes.')
self.jobsToReport = self.traverseJobGraph(rootJob)
else:
self.jobsToReport = self.fetchUserJobs(specifiedJobs)
def print_dot_chart(self) -> None:
"""Print a dot output graph representing the workflow."""
print("digraph toil_graph {")
print("# This graph was created from job-store: %s" % self.jobStoreName)
# Make job IDs to node names map
jobsToNodeNames: Dict[str, str] = dict(
map(lambda job: (str(job.jobStoreID), job.jobName), self.jobsToReport)
)
# Print the nodes
for job in set(self.jobsToReport):
print(
'{} [label="{} {}"];'.format(
jobsToNodeNames[str(job.jobStoreID)], job.jobName, job.jobStoreID
)
)
# Print the edges
for job in set(self.jobsToReport):
for level, jobList in enumerate(job.stack):
for childJob in jobList:
# Check, b/c successor may be finished / not in the set of jobs
if childJob in jobsToNodeNames:
print(
'%s -> %s [label="%i"];'
% (
jobsToNodeNames[str(job.jobStoreID)],
jobsToNodeNames[childJob],
level,
)
)
print("}")
def printJobLog(self) -> None:
"""Takes a list of jobs, finds their log files, and prints them to the terminal."""
for job in self.jobsToReport:
if job.logJobStoreFileID is not None:
with job.getLogFileHandle(self.jobStore) as fH:
# TODO: This looks intended to be machine-readable, but the format is
# unspecified and no escaping is done. But keep these tags around.
print(StatsAndLogging.formatLogStream(fH, job_name=f"LOG_FILE_OF_JOB:{job} LOG:"))
else:
print(f"LOG_FILE_OF_JOB: {job} LOG: Job has no log file")
def printJobChildren(self) -> None:
"""Takes a list of jobs, and prints their successors."""
for job in self.jobsToReport:
children = "CHILDREN_OF_JOB:%s " % job
for level, jobList in enumerate(job.stack):
for childJob in jobList:
children += "\t(CHILD_JOB:%s,PRECEDENCE:%i)" % (childJob, level)
print(children)
def printAggregateJobStats(self, properties: List[str], childNumber: int) -> None:
"""Prints a job's ID, log file, remaining tries, and other properties."""
for job in self.jobsToReport:
def lf(x: str) -> str:
return "{}:{}".format(x, str(x in properties))
print("\t".join(("JOB:%s" % job,
"LOG_FILE:%s" % job.logJobStoreFileID,
"TRYS_REMAINING:%i" % job.remainingTryCount,
"CHILD_NUMBER:%s" % childNumber,
lf("READY_TO_RUN"), lf("IS_ZOMBIE"),
lf("HAS_SERVICES"), lf("IS_SERVICE"))))
def report_on_jobs(self) -> Dict[str, Any]:
"""
Gathers information about jobs such as its child jobs and status.
:returns jobStats: Pairings of a useful category and a list of jobs which fall into it.
:rtype dict:
"""
hasChildren = []
readyToRun = []
zombies = []
hasLogFile: List[JobDescription] = []
hasServices = []
services: List[ServiceJobDescription] = []
properties = set()
for job in self.jobsToReport:
if job.logJobStoreFileID is not None:
hasLogFile.append(job)
childNumber = reduce(lambda x, y: x + y, list(map(len, job.stack)) + [0])
if childNumber > 0: # Total number of successors > 0
hasChildren.append(job)
properties.add("HAS_CHILDREN")
elif job.command is not None:
# Job has no children and a command to run. Indicates job could be run.
readyToRun.append(job)
properties.add("READY_TO_RUN")
else:
# Job has no successors and no command, so is a zombie job.
zombies.append(job)
properties.add("IS_ZOMBIE")
if job.services:
hasServices.append(job)
properties.add("HAS_SERVICES")
if isinstance(job, ServiceJobDescription):
services.append(job)
properties.add("IS_SERVICE")
jobStats = {'hasChildren': hasChildren,
'readyToRun': readyToRun,
'zombies': zombies,
'hasServices': hasServices,
'services': services,
'hasLogFile': hasLogFile,
'properties': properties,
'childNumber': childNumber}
return jobStats
@staticmethod
def getPIDStatus(jobStoreName: str) -> str:
"""
Determine the status of a process with a particular pid.
Checks to see if a process exists or not.
:return: A string indicating the status of the PID of the workflow as stored in the jobstore.
:rtype: str
"""
try:
jobstore = Toil.resumeJobStore(jobStoreName)
except NoSuchJobStoreException:
return 'QUEUED'
except NoSuchFileException:
return 'QUEUED'
try:
pid = jobstore.read_leader_pid()
try:
os.kill(pid, 0) # Does not kill process when 0 is passed.
except OSError: # Process not found, must be done.
return 'COMPLETED'
else:
return 'RUNNING'
except NoSuchFileException:
pass
return 'QUEUED'
@staticmethod
def getStatus(jobStoreName: str) -> str:
"""
Determine the status of a workflow.
If the jobstore does not exist, this returns 'QUEUED', assuming it has not been created yet.
Checks for the existence of files created in the toil.Leader.run(). In toil.Leader.run(), if a workflow completes
with failed jobs, 'failed.log' is created, otherwise 'succeeded.log' is written. If neither of these exist,
the leader is still running jobs.
:return: A string indicating the status of the workflow. ['COMPLETED', 'RUNNING', 'ERROR', 'QUEUED']
:rtype: str
"""
try:
jobstore = Toil.resumeJobStore(jobStoreName)
except NoSuchJobStoreException:
return 'QUEUED'
except NoSuchFileException:
return 'QUEUED'
try:
with jobstore.read_shared_file_stream('succeeded.log') as successful:
pass
return 'COMPLETED'
except NoSuchFileException:
try:
with jobstore.read_shared_file_stream('failed.log') as failed:
pass
return 'ERROR'
except NoSuchFileException:
pass
return 'RUNNING'
def fetchRootJob(self) -> JobDescription:
"""
Fetches the root job from the jobStore that provides context for all other jobs.
Exactly the same as the jobStore.loadRootJob() function, but with a different
exit message if the root job is not found (indicating the workflow ran successfully
to completion and certain stats cannot be gathered from it meaningfully such
as which jobs are left to run).
:raises JobException: if the root job does not exist.
"""
try:
return self.jobStore.load_root_job()
except JobException:
print('Root job is absent. The workflow has may have completed successfully.', file=sys.stderr)
raise
def fetchUserJobs(self, jobs: List[str]) -> List[JobDescription]:
"""
Takes a user input array of jobs, verifies that they are in the jobStore
and returns the array of jobsToReport.
:param list jobs: A list of jobs to be verified.
:returns jobsToReport: A list of jobs which are verified to be in the jobStore.
"""
jobsToReport = []
for jobID in jobs:
try:
jobsToReport.append(self.jobStore.load_job(jobID))
except JobException:
print('The job %s could not be found.' % jobID, file=sys.stderr)
raise
return jobsToReport
def traverseJobGraph(
self,
rootJob: JobDescription,
jobsToReport: Optional[List[JobDescription]] = None,
foundJobStoreIDs: Optional[Set[str]] = None,
) -> List[JobDescription]:
"""
Find all current jobs in the jobStore and return them as an Array.
:param rootJob: The root job of the workflow.
:param list jobsToReport: A list of jobNodes to be added to and returned.
:param set foundJobStoreIDs: A set of jobStoreIDs used to keep track of
jobStoreIDs encountered in traversal.
:returns jobsToReport: The list of jobs currently in the job graph.
"""
if jobsToReport is None:
jobsToReport = []
if foundJobStoreIDs is None:
foundJobStoreIDs = set()
if str(rootJob.jobStoreID) in foundJobStoreIDs:
return jobsToReport
foundJobStoreIDs.add(str(rootJob.jobStoreID))
jobsToReport.append(rootJob)
# Traverse jobs in stack
for jobs in rootJob.stack:
for successorJobStoreID in jobs:
if successorJobStoreID not in foundJobStoreIDs and self.jobStore.job_exists(successorJobStoreID):
self.traverseJobGraph(self.jobStore.load_job(successorJobStoreID), jobsToReport, foundJobStoreIDs)
# Traverse service jobs
for jobs in rootJob.services:
for serviceJobStoreID in jobs:
if self.jobStore.job_exists(serviceJobStoreID):
if serviceJobStoreID in foundJobStoreIDs:
raise RuntimeError('Service job was unexpectedly found while traversing ')
foundJobStoreIDs.add(serviceJobStoreID)
jobsToReport.append(self.jobStore.load_job(serviceJobStoreID))
return jobsToReport
def main() -> None:
"""Reports the state of a Toil workflow."""
parser = parser_with_common_options()
parser.add_argument("--failIfNotComplete", action="store_true",
help="Return exit value of 1 if toil jobs not all completed. default=%(default)s",
default=False)
parser.add_argument("--noAggStats", dest="stats", action="store_false",
help="Do not print overall, aggregate status of workflow.",
default=True)
parser.add_argument("--printDot", action="store_true",
help="Print dot formatted description of the graph. If using --jobs will "
"restrict to subgraph including only those jobs. default=%(default)s",
default=False)
parser.add_argument("--jobs", nargs='+',
help="Restrict reporting to the following jobs (allows subsetting of the report).",
default=None)
parser.add_argument("--printPerJobStats", action="store_true",
help="Print info about each job. default=%(default)s",
default=False)
parser.add_argument("--printLogs", action="store_true",
help="Print the log files of jobs (if they exist). default=%(default)s",
default=False)
parser.add_argument("--printChildren", action="store_true",
help="Print children of each job. default=%(default)s",
default=False)
options = parser.parse_args()
set_logging_from_options(options)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
config = Config()
config.setOptions(options)
try:
status = ToilStatus(config.jobStore, options.jobs)
except NoSuchJobStoreException:
print('No job store found.')
return
except JobException: # Workflow likely complete, user informed in ToilStatus()
return
jobStats = status.report_on_jobs()
# Info to be reported.
hasChildren = jobStats['hasChildren']
readyToRun = jobStats['readyToRun']
zombies = jobStats['zombies']
hasServices = jobStats['hasServices']
services = jobStats['services']
hasLogFile = jobStats['hasLogFile']
properties = jobStats['properties']
childNumber = jobStats['childNumber']
if options.printPerJobStats:
status.printAggregateJobStats(properties, childNumber)
if options.printLogs:
status.printJobLog()
if options.printChildren:
status.printJobChildren()
if options.printDot:
status.print_dot_chart()
if options.stats:
print('Of the %i jobs considered, '
'there are %i jobs with children, '
'%i jobs ready to run, '
'%i zombie jobs, '
'%i jobs with services, '
'%i services, '
'and %i jobs with log files currently in %s.' %
(len(status.jobsToReport), len(hasChildren), len(readyToRun), len(zombies),
len(hasServices), len(services), len(hasLogFile), status.jobStore))
if len(status.jobsToReport) > 0 and options.failIfNotComplete:
# Upon workflow completion, all jobs will have been removed from job store
exit(1)
| StarcoderdataPython |
3405984 | import os
import numpy as np
import nibabel as nib
from nilabels.tools.aux_methods.utils_path import connect_path_tail_head
from nilabels.tools.aux_methods.utils_nib import set_new_data
class LabelsFuser(object):
"""
Facade of the methods in tools, for work with paths to images rather than
with data.
"""
def __init__(self, input_data_folder=None, output_data_folder=None):
self.pfo_in = input_data_folder
self.pfo_out = output_data_folder
def create_stack_for_labels_fusion(self, pfi_target, pfi_result, list_pfi_segmentations, list_pfi_warped=None,
seg_output_name='res_4d_seg', warp_output_name='res_4d_warp', output_tag=''):
"""
Stack and fuse anatomical images and segmentations in a single command.
:param pfi_target: path to file to the target of the segmentation
:param pfi_result: path to file where to store the result.
:param list_pfi_segmentations: list of the segmentations to fuse
:param list_pfi_warped: list of the warped images to fuse
:param seg_output_name:
:param warp_output_name:
:param output_tag: additional tag output.
:return: if prepare_data_only is True it returns the path to files prepared to be provided to nifty_seg,
in the following order
[pfi_target, pfi_result, pfi_4d_seg, pfi_4d_warp]
"""
pfi_target = connect_path_tail_head(self.pfo_in, pfi_target)
pfi_result = connect_path_tail_head(self.pfo_out, pfi_result)
# save 4d segmentations in stack_seg
list_pfi_segmentations = [connect_path_tail_head(self.pfo_in, j) for j in list_pfi_segmentations]
#
list_stack_seg = [nib.load(pfi).get_data() for pfi in list_pfi_segmentations]
stack_seg = np.stack(list_stack_seg, axis=3)
del list_stack_seg
im_4d_seg = set_new_data(nib.load(list_pfi_segmentations[0]), stack_seg)
pfi_4d_seg = connect_path_tail_head(self.pfo_out, '{0}_{1}.nii.gz'.format(seg_output_name, output_tag))
nib.save(im_4d_seg, pfi_4d_seg)
# save 4d warped if available
if list_pfi_warped is None:
pfi_4d_warp = None
else:
list_pfi_warped = [connect_path_tail_head(self.pfo_in, j) for j in list_pfi_warped]
#
list_stack_warp = [nib.load(pfi).get_data() for pfi in list_pfi_warped]
stack_warp = np.stack(list_stack_warp, axis=3)
del list_stack_warp
im_4d_warp = set_new_data(nib.load(list_pfi_warped[0]), stack_warp)
pfi_4d_warp = connect_path_tail_head(self.pfo_out, '{0}_{1}.nii.gz'.format(warp_output_name, output_tag))
nib.save(im_4d_warp, pfi_4d_warp)
return pfi_target, pfi_result, pfi_4d_seg, pfi_4d_warp
| StarcoderdataPython |
6671680 | from .talk import Talk
| StarcoderdataPython |
1785153 | <reponame>mengalong/bter
# Copyright 2017~ mengalong <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import daiquiri
from six.moves.urllib import parse as urlparse
from stevedore import driver
logger = daiquiri.getLogger(__name__)
class DataBasePublisher(object):
"""Publisher metering data to file.
The file publisher pushes metering data into a file. The file name and
location should be configured in bter pipeline configuration file.
If a file name and location is not specified, this File Publisher will not
log any meters other than log a warning in bter log file.
To enable this publisher, add the following section to the
/etc/bter/pipeline.yaml file or simply add it to an existing
pipeline::
-
name: meter_file
interval: 600
counters:
- "*"
transformers:
publishers:
- file:///var/test?max_bytes=10000000&backup_count=5
File path is required for this publisher to work properly. If max_bytes
or backup_count is missing, FileHandler will be used to save the metering
data. If max_bytes and backup_count are present, RotatingFileHandler will
be used to save the metering data.
"""
def __init__(self, conf):
self.conf = conf
self.url = self.conf.get('database', 'connection')
parsed_url = urlparse.urlparse(self.url)
logger.debug("The database url is:%s" % self.url)
d = driver.DriverManager(
namespace="bter.storage",
name=parsed_url.scheme,
invoke_on_load=True,
invoke_args=(parsed_url,)).driver
self.database = d
logger.debug("the database obj is:%s" % self.database)
def record_metering_sample(self, sample):
self.database.insert(sample)
| StarcoderdataPython |
8179770 | <gh_stars>1-10
# 1 Gold Star
# The built-in <string>.split() procedure works
# okay, but fails to find all the words on a page
# because it only uses whitespace to split the
# string. To do better, we should also use punctuation
# marks to split the page into words.
# Define a procedure, split_string, that takes two
# inputs: the string to split and a string containing
# all of the characters considered separators. The
# procedure should return a list of strings that break
# the source string up by the characters in the
# splitlist.
def drop_empty(result):
res = []
for i in result:
if i != '':
res.append(i)
return res
def custom_split(source, char):
if char == ' ':
result = source.split()
else:
result = source.split(char)
#result = list(filter(lambda x: x != '', result))
result = drop_empty(result)
return result
def split_string(source, splitlist):
splited = []
for i in splitlist:
if not splited:
splited = custom_split(source, i)
else:
temp = []
for j in splited:
temp += custom_split(j, i)
splited = temp
return splited
def main():
out = split_string("This is a test-of the,string separation-code!", " ,!-")
print(out)
#>>> ['This', 'is', 'a', 'test', 'of', 'the', 'string', 'separation', 'code']
out = split_string("After the flood ... all the colors came out.", " .")
print(out)
#>>> ['After', 'the', 'flood', 'all', 'the', 'colors', 'came', 'out']
out = split_string("First Name,Last Name,Street Address,City,State,Zip Code", ",")
print(out)
#>>>['First Name', 'Last Name', 'Street Address', 'City', 'State', 'Zip Code']
if __name__ == '__main__':
main()
| StarcoderdataPython |
6616563 | <reponame>rob-blackbourn/bareasgi<filename>bareasgi/http/http_callbacks.py
"""The http callbacks"""
from typing import Awaitable, Callable
from .http_request import HttpRequest
from .http_response import HttpResponse
HttpRequestCallback = Callable[
[HttpRequest],
Awaitable[HttpResponse]
]
HttpMiddlewareCallback = Callable[
[HttpRequest, HttpRequestCallback],
Awaitable[HttpResponse]
]
| StarcoderdataPython |
3526288 | <reponame>yaya-cheng/FGNM<filename>attack_method.py
import numpy as np
import tensorflow as tf
import scipy.stats as st
from utils import *
slim = tf.contrib.slim
def project_kern(kern_size):
kern = np.ones((kern_size, kern_size), dtype=np.float32) / (kern_size ** 2 - 1)
kern[kern_size // 2, kern_size // 2] = 0.0
kern = kern.astype(np.float32)
stack_kern = np.stack([kern, kern, kern]).swapaxes(0, 2)
stack_kern = np.expand_dims(stack_kern, 3)
return stack_kern, kern_size // 2
def project_noise(x, stack_kern, kern_size):
x = tf.pad(x, [[0,0],[kern_size,kern_size],[kern_size,kern_size],[0,0]], "CONSTANT")
x = tf.nn.depthwise_conv2d(x, stack_kern, strides=[1, 1, 1, 1], padding='VALID')
return x
def gkern(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
import scipy.stats as st
x = np.linspace(-nsig, nsig, kernlen)
kern1d = st.norm.pdf(x)
kernel_raw = np.outer(kern1d, kern1d)
kernel = kernel_raw / kernel_raw.sum()
kernel = kernel.astype(np.float32)
stack_kernel = np.stack([kernel, kernel, kernel]).swapaxes(2, 0)
stack_kernel = np.expand_dims(stack_kernel, 3)
return stack_kernel
def input_diversity(FLAGS, input_tensor):
"""Input diversity: https://arxiv.org/abs/1803.06978"""
rnd = tf.random_uniform((), FLAGS.image_width, FLAGS.image_resize, dtype=tf.int32)
rescaled = tf.image.resize_images(input_tensor, [rnd, rnd], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
h_rem = FLAGS.image_resize - rnd
w_rem = FLAGS.image_resize - rnd
pad_top = tf.random_uniform((), 0, h_rem, dtype=tf.int32)
pad_bottom = h_rem - pad_top
pad_left = tf.random_uniform((), 0, w_rem, dtype=tf.int32)
pad_right = w_rem - pad_left
padded = tf.pad(rescaled, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], constant_values=0.)
padded.set_shape((input_tensor.shape[0], FLAGS.image_resize, FLAGS.image_resize, 3))
return tf.cond(tf.random_uniform(shape=[1])[0] < tf.constant(FLAGS.prob), lambda: padded, lambda: input_tensor)
| StarcoderdataPython |
3586211 | <gh_stars>1-10
import json
import xml.dom.minidom
# 读取json文件内容,返回字典格式
# with open('./answer.json', 'r', encoding='utf8')as fp:
# json_data = json.load(fp)
# print('这是文件中的json数据:', json_data)
# print('这是读取到文件数据的数据类型:', type(json_data))
# for every_img_ans in json_data:
# # print(every_img_ans)
# detect_info = every_img_ans["detect_info"]
# img_name = every_img_ans["img_name"]
# num_detInfo = len(detect_info)
# print(num_detInfo)
# if num_detInfo > 1:
# print("-------------------")
# break
# for i in range(num_detInfo):
# label = detect_info[i]["label"] # str
# score = detect_info[i]["score"] # float
# boxes = detect_info[i]["boxes"] # list
# 测试方法一:将测试结果可视化展示
def visResult(testTxt, testImgDir, testAnnoDir):
"""
将测试结果可视化
:param testTxt:测试图片名称txt地址
:param testImgDir: 测试图片地址
:param testAnnoDir: 测试图片Anno地址
:return:
"""
with open(testTxt, 'r') as testTxt_f:
testTxtLines = testTxt_f.readlines()
for testTxtLine in testTxtLines:
print(testTxtLine)
testImgPath = testImgDir + "/" + testTxtLine + ".jpg"
testAnnoPath = testAnnoDir + "/" + testTxtLine + ".xml"
# dom变量
curDom = xml.dom.minidom.parse(testAnnoPath)
# 得到文档元素对象
root = curDom.documentElement
# 根据元素名字进行查找 object
objects = root.getElementsByTagName('object')
for j in range(len(objects)):
object = objects[j]
name = object.getElementsByTagName('name')[0]
cls = name.firstChild.data
# 确定不同类别的颜色
if cls == "car":
color_turple = (0, 0, 0)
elif cls == "truck":
color_turple = (255, 255, 0)
elif cls == "bus":
color_turple = (255, 255, 255)
return 0
testTxt = '/media/zzc/Backup Plus/数据集/车辆目标检测/临港相关数据集/LG/ImageSets/Main/test.txt'
testImgDir = '/media/zzc/Backup Plus/数据集/车辆目标检测/临港相关数据集/LG/JPEGImages'
testAnnoDir = '/media/zzc/Backup Plus/数据集/车辆目标检测/临港相关数据集/LG/Annotations'
visResult(testTxt, testImgDir, testAnnoDir)
| StarcoderdataPython |
3476406 | '''Common get info functions for ping'''
import logging
# Genie
from genie.utils import Dq
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
def get_ping_message(
device,
interface,
address,
source,
size,
count,
):
""" Get ntp peer information
Args:
device (`obj`): Device object
interface (`str`): Given interface for the output
address (`str`): Interface used in command
source (`str`): Interface used in command
size (`int`): Size value used in command
count (`int`): Count value used in command
Returns:
result (`str`): message
Raises:
N/A
"""
try:
output = device.parse('\
ping {address} source {source} size {size} do-not-fragment count {count}'.format(
address=address,
source=source,
size=size,
count=count,
))
except SchemaEmptyParserError as e:
raise Exception("Failed to parse output with error.") from e
# Example output
# {
# 'ping':
# {'address': '1.1.1.1',
# 'data-bytes': 1400,
# 'result': [
# {'bytes': 1408,
# 'from': '1.1.1.1',
# 'message': 'expected message',
# ...
rout=output.q.contains(f'message|{interface}',regex=True).reconstruct()
# {'ping': {'address': '1.1.1.1',
# 'result': [{'message': '...', 'from': 'interface'},
# ...
result_list = Dq(rout).get_values('result')
message = result_list[0]['message']
return message
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.