id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1746097 | import torch
import math
from Net import ActorCritic
from Utils import buffer
class PPOAgent():
def __init__(self, state_dim, action_dim, gamma, std, eps_clip, kepoch, lr, device):
self.gamma = gamma
self.std = std
self.eps_clip = eps_clip
self.kepoch = kepoch
self.device = device
self.declare_net(state_dim, action_dim)
self.opt = torch.optim.Adam(self.net.parameters(), lr=lr)
self.loss = torch.nn.MSELoss()
self.buffer = buffer()
def declare_net(self, state_dim, action_dim):
self.net = ActorCritic(state_dim, action_dim, self.std)
self.old_net = ActorCritic(state_dim, action_dim, self.std)
self.old_net.load_state_dict(self.net.state_dict())
@torch.no_grad()
def choose_action(self, state):
state = torch.FloatTensor(state).view(1, -1).to(self.device)
mu, cov_mat, _ = self.old_net(state)
cov_mat = cov_mat.to(self.device)
dist = self.old_net.dist(mu, cov_mat)
action = dist.sample()
log_prob = dist.log_prob(action)
self.buffer.bs.append(state)
self.buffer.ba.append(action)
self.buffer.blogp.append(log_prob)
return action.cpu().data.detach().flatten()
def act(self, old_state, old_action):
mu, cov_mat, state_value = self.net(old_state)
cov_mat = cov_mat.to(self.device)
dist = self.net.dist(mu, cov_mat)
action_prob = dist.log_prob(old_action)
entropy = dist.entropy()
return action_prob, state_value, entropy
def learn(self):
self.net.train()
bs, ba, br, blogp, bd = self.buffer.get_atr()
# Monte Carlo estimate of rewards:
rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(br), reversed(bd)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
# Normalizing the rewards:
rewards = torch.tensor(rewards, dtype=torch.float32).to(self.device).view((-1,1))
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
old_state = torch.stack(bs).to(self.device).detach()
old_action = torch.stack(ba).to(self.device).detach()
old_logp = torch.stack(blogp).to(self.device).detach()
for e in range(self.kepoch):
new_logp, state_value, entropy = self.act(old_state, old_action)
state_value = state_value.squeeze(2)
ratio = torch.exp(new_logp - old_logp.detach())
advs = rewards - state_value.detach()
surr1 = ratio * advs
surr2 = torch.clamp(ratio, 1 - self.eps_clip, 1 + self.eps_clip)
loss = -torch.min(surr1, surr2) + 0.5 * self.loss(state_value, state_value) - 0.01 * entropy
self.opt.zero_grad()
loss.mean().backward()
self.opt.step()
self.old_net.load_state_dict(self.net.state_dict())
| StarcoderdataPython |
1763320 | <reponame>cybrnode/zkt-sdk-rest-api
from typing import List
from fastapi import websockets
from fastapi.testclient import TestClient
from app.main import app
from pyzkaccess.data import User
client = TestClient(app)
class TestIDKWHATTOCALLTHISCLASS:
TEST_DEVICE_IP = "192.168.10.201"
def setup(self):
self.handle = self.connect(self.TEST_DEVICE_IP)
def connect(self, ip):
test_device_connstr = {"protocol": "TCP", "ip_address": ip, "port": 14370, "timeout": 4000, "passwd": ""}
response = client.post("/devices/connect", json=test_device_connstr)
assert response.status_code == 200
assert response.json().get("status", "") == "success"
assert response.json().get("handle", "") != ""
return response.json()["handle"]
def get_all_users(self) -> List[User]:
response = client.get(f"/devices/{self.handle}/users")
assert response.status_code == 200
assert isinstance(response.json(), list)
users = list(map(lambda u: User(**u), response.json()))
return users
def test_get_users_should_have_200_status_code(self):
self.get_all_users()
def test_delete_user_by_pin_should_delete_user(self):
users = self.get_all_users()
for user in users:
r = client.delete(f"/devices/{self.handle}/users/{user.Pin}").json()
assert r.get("status") == "success"
assert len(self.get_all_users()) == 0
def test_get_doors_should___well___get_doors(self):
response = client.get(f"/devices/{self.handle}/config/doors")
print(response.text)
assert response.status_code == 200
assert isinstance(response.json(), list)
async def hello(self):
print("HAAALOOO")
uri = f"ws://localhost:8000/{self.handle}/realtimelogs"
async with websockets.connect(uri) as websocket:
await websocket.send("Hello world!")
await websocket.recv()
def test_hello(self):
print("HAAALOOO")
with client.websocket_connect(f"/devices/{self.handle}/realtimelogs") as websocket:
expected_headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate",
"connection": "upgrade",
"host": "testserver",
"user-agent": "testclient",
"sec-websocket-key": "testserver==",
"sec-websocket-version": "13",
}
while True:
data = websocket.receive_json()
print(data)
assert data == {"headers": expected_headers}
# def test_update_user_should_update_the_user(self):
# raise NotImplementedError()
| StarcoderdataPython |
3245752 | <reponame>jonepatr/lets_face_it<filename>code/glow_pytorch/glow/modules.py
import numpy as np
import scipy.linalg
import torch
import torch.nn as nn
import torch.nn.functional as F
from glow_pytorch.glow import thops
class ActNorm2d(nn.Module):
"""
Activation Normalization
Initialize the bias and scale with a given minibatch,
so that the output per-channel have zero mean and unit variance for that.
After initialization, `bias` and `logs` will be trained as parameters.
"""
def __init__(self, num_features, scale=1.0):
super().__init__()
# register mean and scale
size = [1, num_features]
self.register_parameter("bias", nn.Parameter(torch.zeros(*size)))
self.register_parameter("logs", nn.Parameter(torch.zeros(*size)))
self.num_features = num_features
self.scale = float(scale)
self.inited = False
def _check_input_dim(self, input):
return NotImplemented
def initialize_parameters(self, input):
if not self.training:
return
assert input.device == self.bias.device
with torch.no_grad():
bias = thops.mean(input.clone(), dim=0, keepdim=True) * -1.0
vars = thops.mean((input.clone() + bias) ** 2, dim=0, keepdim=True)
logs = torch.log(self.scale / (torch.sqrt(vars) + 1e-6))
self.bias.data.copy_(bias.data)
self.logs.data.copy_(logs.data)
self.inited = True
def _center(self, input, reverse=False):
if not reverse:
return input + self.bias
else:
return input - self.bias
def _scale(self, input, logdet=None, reverse=False):
logs = self.logs
if not reverse:
input = input * torch.exp(logs)
else:
input = input * torch.exp(-logs)
if logdet is not None:
"""
logs is log_std of `mean of channels`
so we need to multiply on the channel length
"""
dlogdet = thops.sum(logs) * input.size(1)
if reverse:
dlogdet *= -1
logdet = logdet + dlogdet
return input, logdet
def forward(self, input, logdet=None, reverse=False):
if not self.inited:
self.initialize_parameters(input)
# no need to permute dims as old version
if not reverse:
# center and scale
input = self._center(input, reverse)
input, logdet = self._scale(input, logdet, reverse)
else:
# scale and center
input, logdet = self._scale(input, logdet, reverse)
input = self._center(input, reverse)
return input, logdet
class LinearZeros(nn.Linear):
def __init__(self, in_channels, out_channels, logscale_factor=3):
super().__init__(in_channels, out_channels)
self.logscale_factor = logscale_factor
# set logs parameter
self.register_parameter("logs", nn.Parameter(torch.zeros(out_channels)))
# init
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
output = super().forward(input)
return output * torch.exp(self.logs * self.logscale_factor)
class Permute2d(nn.Module):
def __init__(self, num_channels, shuffle):
super().__init__()
self.num_channels = num_channels
self.indices = np.arange(self.num_channels - 1).astype(np.long)
self.indices_inverse = np.zeros((self.num_channels), dtype=np.long)
for i in range(self.num_channels):
self.indices_inverse[self.indices[i]] = i
if shuffle:
self.reset_indices()
def reset_indices(self):
np.random.shuffle(self.indices)
for i in range(self.num_channels):
self.indices_inverse[self.indices[i]] = i
def forward(self, input, reverse=False):
assert len(input.size()) == 4
if not reverse:
return input[:, self.indices]
else:
return input[:, self.indices_inverse]
class InvertibleConv1x1(nn.Module):
def __init__(self, num_channels, LU_decomposed=False):
super().__init__()
w_shape = [num_channels, num_channels]
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(np.float32)
if not LU_decomposed:
# Sample a random orthogonal matrix:
self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
else:
np_p, np_l, np_u = scipy.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_shape, dtype=np.float32), -1)
eye = np.eye(*w_shape, dtype=np.float32)
self.register_buffer("p", torch.Tensor(np_p.astype(np.float32)))
self.register_buffer("sign_s", torch.Tensor(np_sign_s.astype(np.float32)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(np.float32)))
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(np.float32)))
self.u = nn.Parameter(torch.Tensor(np_u.astype(np.float32)))
self.l_mask = torch.Tensor(l_mask)
self.eye = torch.Tensor(eye)
self.w_shape = w_shape
self.LU = LU_decomposed
def get_weight(self, input, reverse):
w_shape = self.w_shape
if not self.LU:
dlogdet = torch.slogdet(self.weight)[1] * input.size(1)
if not reverse:
weight = self.weight.view(w_shape[0], w_shape[1])
else:
weight = (
torch.inverse(self.weight.double())
.float()
.view(w_shape[0], w_shape[1])
)
return weight, dlogdet
else:
self.p = self.p.to(input.device)
self.sign_s = self.sign_s.to(input.device)
self.l_mask = self.l_mask.to(input.device)
self.eye = self.eye.to(input.device)
l = self.l * self.l_mask + self.eye
u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(
self.sign_s * torch.exp(self.log_s)
)
dlogdet = thops.sum(self.log_s) * input.size(1)
if not reverse:
w = torch.matmul(self.p, torch.matmul(l, u))
else:
l = torch.inverse(l.double()).float()
u = torch.inverse(u.double()).float()
w = torch.matmul(u, torch.matmul(l, self.p.inverse()))
return w.view(w_shape[0], w_shape[1]), dlogdet
def forward(self, input, logdet=None, reverse=False):
"""
log-det = log|abs(|W|)| * pixels
"""
weight, dlogdet = self.get_weight(input, reverse)
if not reverse:
z = torch.matmul(input, weight)
if logdet is not None:
logdet = logdet + dlogdet
return z, logdet
else:
z = torch.matmul(input, weight)
if logdet is not None:
logdet = logdet - dlogdet
return z, logdet
class GaussianDiag:
Log2PI = float(np.log(2 * np.pi))
@staticmethod
def likelihood_simplified(x):
"""
lnL = -1/2 * { ln|Var| + ((X - Mu)^T)(Var^-1)(X - Mu) + kln(2*PI) }
k = 1 (Independent)
Var = logs ** 2
"""
return -0.5 * ((x ** 2) + GaussianDiag.Log2PI)
@staticmethod
def logp_simplified(x):
likelihood = GaussianDiag.likelihood_simplified(x)
return torch.sum(likelihood, dim=1)
@staticmethod
def likelihood(mean, logs, x):
"""
lnL = -1/2 * { ln|Var| + ((X - Mu)^T)(Var^-1)(X - Mu) + kln(2*PI) }
k = 1 (Independent)
Var = logs ** 2
"""
return -0.5 * (
logs * 2.0 + ((x - mean) ** 2) / torch.exp(logs * 2.0) + GaussianDiag.Log2PI
)
@staticmethod
def logp(mean, logs, x):
likelihood = GaussianDiag.likelihood(mean, logs, x)
return thops.sum(likelihood, dim=[1])
@staticmethod
def sample(output_shape, eps_std=1):
return torch.normal(
mean=torch.zeros_like(output_shape),
std=torch.ones_like(output_shape) * eps_std,
)
| StarcoderdataPython |
28691 | <gh_stars>0
from infra.controllers.contracts import HttpResponse
class NotFoundError(HttpResponse):
def __init__(self, message) -> None:
status_code = 404
self.message = message
body = {
'message': self.message
}
super().__init__(body, status_code)
| StarcoderdataPython |
157727 | <filename>xpdf_python/debug.py
from wrapper import *
if __name__ == '__main__':
if len(sys.argv) > 1:
pdf_loc = sys.argv[1]
else:
pdf_loc = '/path/to/pdf'
test = to_text(pdf_loc)
print(test) | StarcoderdataPython |
198041 | import skimage
from skimage.measure import label, regionprops
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os
from imageio import imwrite
# following previous labeling method
lesion_class_label = 0
def convert(warped_mask):
bboxes = []
if warped_mask.max() == 1:
mask_contour, _ = cv2.findContours(warped_mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
label_slice = label(warped_mask)
props = regionprops(label_slice)
for prop in props:
# additionally construct bbox coordinates format matching miccai2018 notations
x_start, y_start, x_end, y_end = prop.bbox[1], prop.bbox[0], prop.bbox[3], prop.bbox[2]
# use [x_min, y_min, x_max, y_max] type
coordinate = [x_start, y_start, x_end, y_end, lesion_class_label]
# append zero class label for lesion
coordinate.append(lesion_class_label)
bboxes.append(coordinate)
# print(len(props))
else:
bboxes.append(None)
return bboxes
if __name__ == '__main__':
metadata_path = os.path.join('/home/eunji/hdd/eunji/Data/liver_year1_dataset_extended_1904_preprocessed/ml_ready_phaselabel', "metadata.txt")
src_path = '/home/eunji/hdd/eunji/Data/liver_year1_dataset_extended_1904_preprocessed/ml_ready_phaselabel_align'
out_path = '/home/eunji/hdd/eunji/Data/liver_year1_dataset_extended_1904_preprocessed/ml_ready_phaselabel_align_clean'
data = []
with open(metadata_path, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip().split("|")
data.append(line[0])
os.makedirs(out_path, exist_ok=True)
for i in range(len(data)):
os.makedirs(os.path.join(out_path, data[i].split('/')[0]), exist_ok=True)
datapoint_name_relative = data[i]
datapoint_name_ct = datapoint_name_relative + '_ct.npy'
datapoint_name_mask = datapoint_name_relative + '_mask.npy'
datapoint_name_bbox = datapoint_name_relative + '_bbox.npy'
if os.path.isfile(os.path.join(src_path, datapoint_name_ct)):
ct_data = np.load(os.path.join(src_path, datapoint_name_ct))
mask_data = np.load(os.path.join(src_path, datapoint_name_mask))
orig_bbox = np.load(
os.path.join('/home/eunji/hdd/eunji/Data/liver_year1_dataset_extended_1904_preprocessed/ml_ready_phaselabel',
datapoint_name_bbox))
else:
continue
ct_data[ct_data < 0] = 0
ct_data[ct_data > 1] = 1
mask_data[mask_data > 0.5] = 1
mask_data[mask_data < 1] = 0
bbox_data = convert(mask_data)
if (len(bbox_data) != orig_bbox.shape[0]):
print("{}: {} -> {}".format(data[i], orig_bbox.shape[0], len(bbox_data)))
np.save(os.path.join(out_path, datapoint_name_ct), ct_data)
np.save(os.path.join(out_path, datapoint_name_mask), mask_data)
np.save(os.path.join(out_path, datapoint_name_bbox), bbox_data)
| StarcoderdataPython |
124453 | import logging
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.http import HttpResponseRedirect
from django.template.loader import render_to_string
from django.utils.encoding import smart_text
from django.contrib.auth.models import AnonymousUser
assert 'django.contrib.auth' in settings.INSTALLED_APPS
assert 'django.contrib.sessions' in settings.INSTALLED_APPS
assert 'viewas' in settings.INSTALLED_APPS
if hasattr(settings, 'AUTH_USER_MODEL'):
from django.contrib.auth import get_user_model
User = get_user_model()
else:
from django.contrib.auth.models import User
# tilda (192) as default key
VIEWAS_TOGGLE_KEY = getattr(settings, 'VIEWAS_TOGGLE_KEY', 192)
_HTML_TYPES = ('text/html', 'application/xhtml+xml')
def replace_insensitive(string, target, replacement):
"""
Similar to string.replace() but is case insensitive
Code borrowed from: http://forums.devshed.com/python-programming-11/case-insensitive-string-replace-490921.html
"""
no_case = string.lower()
index = no_case.rfind(target.lower())
if index >= 0:
return string[:index] + replacement + string[index + len(target):]
# no results so return the original string
return string
class BaseMiddleware(object):
def can_run(self, request):
if not hasattr(request, 'user'):
return False
user = getattr(request, 'actual_user', request.user)
return user.is_superuser
class ViewAsHookMiddleware(BaseMiddleware):
"""
Authenticates a superuser as another user assuming a session variable is present.
"""
logger = logging.getLogger('viewas')
def get_user(self, username):
selector = User.USERNAME_FIELD + '__iexact'
query = {selector: username}
try:
return User.objects.get(**query)
except ObjectDoesNotExist:
# try to look up by email
if '@' in username:
try:
return User.objects.get(email__iexact=username)
except (MultipleObjectsReturned, ObjectDoesNotExist):
return None
return None
def login_as(self, request, username):
if request.user.get_username().lower() == username.lower():
return
if username == '':
if 'login_as' in request.session:
del request.session['login_as']
return
self.logger.info(
'User %r forced a login as %r at %s',
request.user.get_username(), username, request.get_full_path(),
extra={'request': request})
user = self.get_user(username)
if user:
request.user = user
request.session['login_as'] = request.user.get_username()
else:
messages.warning(request, "Did not find a user matching '%s'" % (username,))
if 'login_as' in request.session:
del request.session['login_as']
def process_request(self, request):
if not self.can_run(request):
return
request.actual_user = request.user
if 'login_as' in request.POST:
self.login_as(request, request.POST['login_as'])
return HttpResponseRedirect(request.get_full_path())
elif 'login_as' in request.session:
self.login_as(request, request.session['login_as'])
class ViewAsRenderMiddleware(BaseMiddleware):
tag = u'</body>'
def process_response(self, request, response):
if not self.can_run(request):
return response
if ('gzip' not in response.get('Content-Encoding', '') and
response.get('Content-Type', '').split(';')[0] in _HTML_TYPES):
response.content = replace_insensitive(
smart_text(response.content),
self.tag,
smart_text(self.render(request) + self.tag))
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response
def render(self, request):
if not isinstance(request.user, AnonymousUser):
request.user.username = request.user.get_username()
if hasattr(request, 'actual_user'):
request.actual_user.username = request.actual_user.get_username()
return render_to_string('viewas/header.html', {
'VIEWAS_TOGGLE_KEY': VIEWAS_TOGGLE_KEY,
'request': request,
})
class ViewAsMiddleware(ViewAsHookMiddleware, ViewAsRenderMiddleware):
pass
| StarcoderdataPython |
4833703 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: forever.py command args
"""
import sys
import time
import subprocess
def main():
try:
cmd = ' '.join(sys.argv[1:])
i = 1
while True:
print '=== Iteration {} ==='.format(i)
status = subprocess.call(cmd, shell=True)
print '=== exit status {} ==='.format(status)
i += 1
time.sleep(1)
except KeyboardInterrupt:
print '^C detected'
if __name__ == '__main__':
main()
| StarcoderdataPython |
3270743 | <reponame>rraval/velocity-bingo
import random
from writer import Safe, runWriter, latex_escape
def writeMain(phrases, pages):
yield Safe(r'''
\documentclass[11pt]{article}
\usepackage[margin=0.25in,landscape]{geometry}
\usepackage{fontspec}
\usepackage{polyglossia}
\usepackage{array}
\defaultfontfeatures{Scale=MatchUppercase,Ligatures=TeX}
\setmainfont{TeX <NAME>}
\newlength{\bMargin}
\newlength{\bWidth}
\newlength{\bHeight}
\newlength{\bCellWidth}
\newlength{\bCellHeight}
\setlength{\parindent}{0pt}
\setlength{\parskip}{0pt}
\setlength{\tabcolsep}{0pt}
\setlength{\arrayrulewidth}{1pt}
\pagestyle{empty}
\newcolumntype{?}{!{\vrule width \bBorder}}
\newcolumntype{P}{%
@{\parbox[c][\bCellHeight]{0pt}{}}
>{\centering\arraybackslash}
m{\dimexpr\bCellWidth}
}
\begin{document}
\setlength{\bMargin}{0.25in}
\setlength{\bWidth}{0.5\dimexpr\textwidth-\bMargin}
\setlength{\bHeight}{0.5\dimexpr\textheight-\bMargin}
\setlength{\bCellWidth}{0.2\dimexpr\bWidth-6\arrayrulewidth}
\setlength{\bCellHeight}{0.2\dimexpr\bHeight-6\arrayrulewidth}
''')
for i in range(pages * 4):
yield from writeTable(phrases)
yield Safe(r'\allowbreak\hfill{}')
if (i % 2) == 1:
yield Safe(r'\par\vspace{\bMargin}')
yield Safe(r'\end{document}')
def writeTable(phrases):
sample = random.sample(phrases, 24)
sample = sample[:12] + ['"Um" or awkward pause'] + sample[12:]
yield Safe(r'\begin{tabular}{|P|P|P|P|P|}')
yield Safe(r'\hline{}')
for i, w in enumerate(sample):
yield Safe(r'\parbox{\dimexpr\bCellWidth-6pt\relax}{\centering{}')
yield w
yield Safe(r'}')
if (i % 5) == 4:
yield Safe(r'\\')
yield Safe(r'\hline ')
else:
yield Safe(r'&')
yield Safe(r'\end{tabular}')
if __name__ == '__main__':
import sys
with open('phrases.txt') as fp:
phrases = [l.strip() for l in fp.readlines()]
runWriter(sys.stdout, writeMain(phrases, pages=20), latex_escape)
| StarcoderdataPython |
134325 | <reponame>AurelienGasser/substra-backend
import os
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.throttling import AnonRateThrottle
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from libs.expiry_token_authentication import token_expire_handler, expires_at
from libs.user_login_throttle import UserLoginThrottle
from rest_framework.views import APIView
from substrapp.views.utils import get_channel_name
from django.conf import settings
class ExpiryObtainAuthToken(ObtainAuthToken):
authentication_classes = []
throttle_classes = [AnonRateThrottle, UserLoginThrottle]
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
if os.environ.get('TOKEN_STRATEGY', 'unique') == 'reuse':
token, created = Token.objects.get_or_create(user=user)
# token_expire_handler will check, if the token is expired it will generate new one
is_expired, token = token_expire_handler(token)
else:
# token should be new each time, remove the old one
Token.objects.filter(user=user).delete()
token = Token.objects.create(user=user)
return Response({
'token': token.key,
'expires_at': expires_at(token)
})
class Info(APIView):
def get(self, request, *args, **kwargs):
channel_name = get_channel_name(request)
channel = settings.LEDGER_CHANNELS[channel_name]
return Response({
'host': settings.DEFAULT_DOMAIN,
'channel': channel_name,
'config': {
'model_export_enabled': channel['model_export_enabled'],
}
})
obtain_auth_token = ExpiryObtainAuthToken.as_view()
info_view = Info.as_view()
| StarcoderdataPython |
3361406 | <reponame>RensDimmendaal/scikit-lego
import inspect
import numpy as np
from scipy.optimize import minimize_scalar
from sklearn.base import BaseEstimator, ClassifierMixin, OutlierMixin
from sklearn.mixture import GaussianMixture
from sklearn.utils import check_X_y
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted, check_array, FLOAT_DTYPES
from scipy.stats import gaussian_kde
def _check_gmm_keywords(kwargs):
for key in kwargs.keys():
if key not in inspect.signature(GaussianMixture).parameters.keys():
raise ValueError(f"Keyword argument {key} is not in `sklearn.mixture.GaussianMixture`")
class GMMClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, **gmm_kwargs):
self.gmm_kwargs = gmm_kwargs
def fit(self, X: np.array, y: np.array) -> "GMMClassifier":
"""
Fit the model using X, y as training data.
:param X: array-like, shape=(n_columns, n_samples, ) training data.
:param y: array-like, shape=(n_samples, ) training data.
:return: Returns an instance of self.
"""
X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
X = np.expand_dims(X, 1)
_check_gmm_keywords(self.gmm_kwargs)
self.gmms_ = {}
self.classes_ = unique_labels(y)
for c in self.classes_:
subset_x, subset_y = X[y == c], y[y == c]
self.gmms_[c] = GaussianMixture(**self.gmm_kwargs).fit(subset_x, subset_y)
return self
def predict(self, X):
check_is_fitted(self, ['gmms_', 'classes_'])
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
return self.classes_[self.predict_proba(X).argmax(axis=1)]
def predict_proba(self, X):
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
check_is_fitted(self, ['gmms_', 'classes_'])
res = np.zeros((X.shape[0], self.classes_.shape[0]))
for idx, c in enumerate(self.classes_):
res[:, idx] = self.gmms_[c].score_samples(X)
return np.exp(res)/np.exp(res).sum(axis=1)[:, np.newaxis]
class GMMOutlierDetector(OutlierMixin, BaseEstimator):
"""
The GMMDetector trains a Gaussian Mixture Model on a dataset X. Once
a density is trained we can evaluate the likelihood scores to see if
it is deemed likely. By giving a threshold this model might then label
outliers if their likelihood score is too low.
:param threshold: the limit at which the model thinks an outlier appears, must be between (0, 1)
:param gmm_kwargs: features that are passed to the `GaussianMixture` from sklearn
:param method: the method that the threshold will be applied to, possible values = [stddev, default=quantile]
If you select method="quantile" then the threshold value represents the
quantile value to start calling something an outlier.
If you select method="stddev" then the threshold value represents the
numbers of standard deviations before calling something an outlier.
"""
def __init__(self, threshold=0.99, method='quantile', random_state=42, **gmm_kwargs):
self.gmm_kwargs = gmm_kwargs
self.threshold = threshold
self.method = method
self.random_state = random_state
self.allowed_methods = ["quantile", "stddev"]
def fit(self, X: np.array, y=None) -> "GMMOutlierDetector":
"""
Fit the model using X, y as training data.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:param y: ignored but kept in for pipeline support
:return: Returns an instance of self.
"""
# GMM sometimes throws an error if you don't do this
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
if len(X.shape) == 1:
X = np.expand_dims(X, 1)
if (self.method == "quantile") and ((self.threshold > 1) or (self.threshold < 0)):
raise ValueError(f"Threshold {self.threshold} with method {self.method} needs to be 0 < threshold < 1")
if (self.method == "stddev") and (self.threshold < 0):
raise ValueError(f"Threshold {self.threshold} with method {self.method} needs to be 0 < threshold ")
if self.method not in self.allowed_methods:
raise ValueError(f"Method not recognised. Method must be in {self.allowed_methods}")
_check_gmm_keywords(self.gmm_kwargs)
self.gmm_ = GaussianMixture(**self.gmm_kwargs, random_state=self.random_state).fit(X)
score_samples = self.gmm_.score_samples(X)
if self.method == "quantile":
self.likelihood_threshold_ = np.quantile(score_samples, 1 - self.threshold)
if self.method == "stddev":
density = gaussian_kde(score_samples)
max_x_value = minimize_scalar(lambda x: -density(x)).x
mean_likelihood = score_samples.mean()
new_likelihoods = score_samples[score_samples < max_x_value]
new_likelihoods_std = np.std(new_likelihoods - mean_likelihood)
self.likelihood_threshold_ = mean_likelihood - (self.threshold * new_likelihoods_std)
return self
def score_samples(self, X):
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
check_is_fitted(self, ['gmm_', 'likelihood_threshold_'])
if len(X.shape) == 1:
X = np.expand_dims(X, 1)
return self.gmm_.score_samples(X) * -1
def decision_function(self, X):
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) + self.likelihood_threshold_
def predict(self, X):
"""
Predict if a point is an outlier. If the output is 0 then
the model does not think it is an outlier.
:param X: array-like, shape=(n_columns, n_samples, ) training data.
:return: array, shape=(n_samples,) the predicted data. 1 for inliers, -1 for outliers.
"""
predictions = (self.decision_function(X) >= 0).astype(np.int)
predictions[predictions == 0] = -1
return predictions
| StarcoderdataPython |
84895 | #!/usr/bin/env python
import numpy as np
from tqdm import tqdm
from astropy.constants import G as Ggrav
from .low_level_utils import fast_dist
G = Ggrav.to('kpc Msun**-1 km**2 s**-2').value
def all_profiles(bins, positions, velocities, masses, two_dimensional=False, zcut=None,
ages=None, pbar_msg='Making profiles"', nexpr=False):
"""
assumes all positions and velocities are rotated in the same way, such
that the angular momentum axis aligns with the z axis
if two_dimensional == False, then compute:
M(<r), M(r), rho = M(r)/dV, Vcirc = sqrt(GM(<r)/r), mag J(r), mag J(<r), J_z(r), J_z(<r)
if two_dimensional == True, then compute:
M(<R), M(R), rho = M(R)/dA, Vcirc = mean(vx**2 + vy**2), mag J(R), mag J(<R), J_z(R), J_z(<R)
:bins : array-like : sorted (from small to large) bin edges to use
:positions : array-like : particle positions, rotated such that z aligns with angular momentum axis
:velocities : array-like : particle velocities, rotated in the same way as the positions
:masses : array-like : particle masses, in the same order as positions and velocities
:two_dimensional : bool : whether or not to do 2D profiles
:pbar_msg: str : what to print for the pbar (total mass and number of particles is appended)
:nexpr : bool : whether or not to try to use numexpr to try to speed up the calculation
"""
if nexpr:
from numexpr import evaluate
print("Using numexpr for the masking and summing masses")
# work from outside in, throwing away particles as I no longer need them
assert positions.shape[0] == velocities.shape[0] == masses.shape[0]
m_of_r = np.empty(bins.size)
J_of_r = np.empty(bins.size)
Jz_of_r = np.empty(bins.size)
Jz_inside_r = np.empty(bins.size)
JinsideR = np.empty(bins.size)
specJinsideR = np.zeros(bins.size)
specJ_of_r = np.zeros(bins.size)
specJz_of_r = np.zeros(bins.size)
specJz_insideR = np.zeros(bins.size)
if ages is not None:
age_of_r = np.zeros(bins.size)
density = np.empty_like(m_of_r)
if two_dimensional:
vcirc = np.zeros(bins.size)
if two_dimensional:
x, y, z = positions.T
# distances are in the plane of the galaxy
distances = np.sqrt(x**2 + y**2)
else:
distances = fast_dist(positions) # center assumed to be at (0,0,0)
# throw away any particles beyond my last bin edge
msk = distances <= bins.max()
if two_dimensional:
msk = msk & (np.abs(z) <= zcut)
positions = positions[msk]
velocities = velocities[msk]
masses = masses[msk]
distances = distances[msk]
if ages is not None:
ages = ages[msk]
if two_dimensional:
x = x[msk]
y = y[msk]
# compute (angular) momenta for the particles:
# velocities should already have the halo at
pvec = (velocities.T*masses).T
# J = r cross p, and pos is assumed to have the halo at 0,0,0
Jvec = np.cross(positions, pvec)
del pvec
Jz = Jvec[:, 2]
if two_dimensional:
# calculate circular velocities:
# velocities in the plane of the disk
vx, vy = velocities[:, 0], velocities[:, 1]
V = np.vstack((vx, vy)).T # velocity vector in the plane of the disk
R = np.vstack((x, y)).T # distance vector in the plane of the disk
# use the definition of the dot product to find the angle between R and V, theta
# a dot b == mag(a) * mag(b) * cos(theta)
# => cos(theta) == a dot b / (mag(a) * mag(b))
# checked by hand -- does the dot product of R[ii] w/ V[ii]
R_dot_V = np.sum(R*V, axis=1)
mag_V = np.linalg.norm(V, axis=1)
# checked by hand -- gives the magnitdue of R[ii]
mag_R = np.linalg.norm(R, axis=1)
if careful:
assert (mag_R == distances).all() # should be identically true
theta = np.arccos(R_dot_V / (mag_R * mag_V))
# now that I know the angle, the circular velocity of each particle is going to be
# the magnitude of each velocity in the plane of the disk times the sin of angle between R and V
# -- if the angle is 0, then all the velocity is radial; if it's pi/2, then all the velocity is tangential (circular)
circular_velocities = mag_V*np.sin(theta)
# handle any nan (i.e. either R or V == 0) by replacing with a 0
print("Replacing {} NaNs with 0".format(
np.count_nonzero(np.isnan(circular_velocities))))
circular_velocities[np.isnan(circular_velocities)] = 0
# clean up to save memory
del R, V, theta
# make sure this is true because otherwise return will be nonsense since I use cumsum at the end
assert (np.sort(bins) == bins).all()
rev_bins = bins[::-1]
if two_dimensional:
pbar_msg += '; Mtot(R < {:.0f} kpc, Z < {:.1f} kpc)'.format(bins.max(), zcut)
else:
pbar_msg += '; Mtot(r < {:.0f} kpc)'.format(bins.max())
pbar_msg += ' = {:.2g} Msun, {:,} particles)'.format(
np.sum(masses), masses.size)
for ii in tqdm(range(len(rev_bins)), pbar_msg):
rhigh = rev_bins[ii]
if ii == len(rev_bins)-1:
rlow = 0
else:
rlow = rev_bins[ii+1]
assert rlow < rhigh
if two_dimensional:
shell_vol = 4.*np.pi*(rhigh**2 - rlow**2)
else:
shell_vol = 4./3.*np.pi*(rhigh**3 - rlow**3)
if nexpr:
# within_rhigh = evaluate("(distances <= rhigh)") #No need to do this -- I trim the particles before the loop and within the loop, so everything is within rhigh trivially
minsider = evaluate("sum(masses)")
inbin = evaluate("(distances > rlow)")
# sum up the masses where inbin, 0 otherwise
thism = evaluate("sum(where(inbin,masses,0))")
Jz_of_r[ii] = evaluate("sum(where(inbin,Jz,0))")
Jz_inside_r[ii] = evaluate("sum(Jz)")
# particles that are within rhigh but not in the bin. equivalent to (within_rhigh) & (logical_not( (distances>rlow) & (within_rhigh) )
# equivalent to False if not within_rhigh, so throws away outer particles
# equivalent to True & logical_not(True & True) = True & not(True) = True & False = False if distances > rlow and distances < rhigh
# equivalent to True & not(False & True) = True & not(False) = True if distances <= rlow
# keep = evaluate("~inbin") #but since I trim the particles so within_rhigh is trivially true (see above), this just reduces to not inbin, so no reason to calculate/store that
else:
# within_rhigh = distances <= rhigh
# &(within_rhigh) #works for both 2D and 3D
inbin = (distances > rlow)
minsider = np.sum(masses)
thism = np.sum(masses[inbin])
# keep = within_rhigh & (~inbin) #save logic as above
# just the z angular momentum for the particles int he bin, allowed to cancel
Jz_of_r[ii] = np.sum(Jz[inbin])
# Jz of all the particles inside R. should be smoother.
Jz_inside_r[ii] = np.sum(Jz)
m_of_r[ii] = thism
density[ii] = thism/shell_vol
# norm of the vector sum (sum(Jx), sum(Jy), sum(Jz)) of the angular momentum in the bin -- no need to mass weight because J is mass weighted
J_of_r[ii] = np.linalg.norm(np.sum(Jvec[inbin], axis=0))
# Do the same for all the particles inside the max of this bin; different because these can cancel differently
# remember that everything is within the max of this bin
JinsideR[ii] = np.linalg.norm(np.sum(Jvec, axis=0))
# normalize all those to the approrpiate specific value if m > 0.
if thism > 0:
specJ_of_r[ii] = J_of_r[ii]/thism
specJz_of_r[ii] = Jz_of_r[ii]/thism
if two_dimensional:
vcirc[ii] = np.average(
circular_velocities[inbin], weights=masses[inbin])
if ages is not None:
age_of_r[ii] = np.average(ages[inbin], weights=masses[inbin])
if minsider > 0:
specJinsideR[ii] = JinsideR[ii]/minsider
specJz_insideR[ii] = Jz_inside_r[ii]/minsider
distances = distances[~inbin]
masses = masses[~inbin]
positions = positions[~inbin]
velocities = velocities[~inbin]
Jvec = Jvec[~inbin]
Jz = Jz[~inbin]
if two_dimensional:
circular_velocities = circular_velocities[~inbin]
if ages is not None:
ages = ages[~inbin]
# swap everything back around so that I go from the inside out so that I can cumsum. remember bins is already sorted because I didn't swap it; I created rev_bins.
density = density[::-1]
m_of_r = m_of_r[::-1]
J_of_r = J_of_r[::-1]
Jz_of_r = Jz_of_r[::-1]
JinsideR = JinsideR[::-1]
Jz_inside_r = Jz_inside_r[::-1]
specJ_of_r = specJ_of_r[::-1]
specJz_of_r = specJz_of_r[::-1]
specJinsideR = specJinsideR[::-1]
specJz_insideR = specJz_insideR[::-1]
if ages is not None:
age_of_r = age_of_r[::-1]
mltr = np.cumsum(m_of_r)
Jltr = np.cumsum(J_of_r)
Jzltr = np.cumsum(Jz_of_r)
specJltr = np.cumsum(specJ_of_r)
specJzltr = np.cumsum(specJz_of_r)
# don't cumsum the "inside R" lines -- doesn't make much sense
if two_dimensional == False:
# calculate keplerian circular velocity
vcirc = np.sqrt(G*mltr/bins) # remember that bins didn't get reversed
else:
vcirc = vcirc[::-1]
# remember this gets saved directly, so be good about naming!
end = 'R' if two_dimensional else 'r'
toreturn = {
'density': density,
'M.of.'+end: m_of_r,
'J.of.'+end: J_of_r,
'Jz.of.'+end: Jz_of_r,
'J.inside'+end: JinsideR,
'Jz.inside'+end: Jz_inside_r,
'spec.J.of.'+end: specJ_of_r,
'spec.Jz.of.'+end: specJz_of_r,
'spec.Jinside'+end: specJinsideR,
'spec.Jz.insideR'+end: specJz_insideR,
'M.lt.'+end: mltr,
'J.lt.'+end: Jltr,
'Jz.lt.'+end: Jzltr,
'spec.J.lt.'+end: specJltr,
'spec.Jz.lt.'+end: specJzltr,
'vcirc': vcirc,
}
if ages is not None:
toreturn['age.of.'+end] = age_of_r
return toreturn
def particle_mass_profiles(part, species='all', bins=None, center_position=None, **kwargs):
'''
given part (a particle dictionary), call mass_profiles on the particles
bins can be either:
* None -- defaults to logspace(-2, 0.5, 150)
* raw bin edges -- passed directly
* single integer -- defaults to logspace(-2, 0.5, bins)
'''
import utilities as ut
species = ut.particle.parse_species(part, species)
center_position = ut.particle.parse_property(
part, 'center_position', center_position)
npart = np.sum([part[spec]['mass'].size for spec in species])
positions = np.empty((npart, 3))
masses = np.empty(npart)
left = 0
for spec in species:
right = left + part[spec]['mass'].size
positions[left:right] = part[spec]['position']
masses[left:right] = part[spec]['mass']
left = right
# shift so that the center is at [0, 0, 0]:
positions -= center_position
# now handle the bins:
if bins is None:
bins = np.logspace(-2, 0.5, 150)
elif isinstance(bins, int):
bins = np.logspace(-2, 0.5, bins)
elif len(bins) == 3:
bins = np.logspace(bins[0], bins[1], bins[2])
assert not np.isscalar(bins)
return mass_profiles(bins, positions, masses, **kwargs)
def mass_profiles(bins, positions, masses, pbar_msg='Making mass profiles', nexpr=False):
"""
computes:
M(<r), M(r), rho = M(r)/dV, Vcirc = sqrt(GM(<r)/r)
:bins : array-like : sorted (from small to large) bin edges to use
:positions : array-like : particle positions, with the center at 0,0,0
:masses : array-like : particle masses, in the same order as positions and velocities
:pbar_msg: str : what to print for the pbar (total mass and number of particles is appended)
:nexpr : bool : whether or not to try to use numexpr to try to speed up the calculation
"""
if nexpr:
from numexpr import evaluate
print("Using numexpr for the masking and summing masses")
# work from outside in, throwing away particles as I no longer need them
assert positions.shape[0] == masses.shape[0]
m_of_r = np.empty(bins.size)
density = np.empty_like(m_of_r)
distances = fast_dist(positions) # center assumed to be at (0,0,0)
# throw away any particles beyond my last bin edge
msk = distances <= bins.max()
positions = positions[msk]
masses = masses[msk]
distances = distances[msk]
# make sure this is true because otherwise return will be nonsense since I use cumsum at the end
assert (np.sort(bins) == bins).all()
rev_bins = bins[::-1]
pbar_msg += '; Mtot(r < {:.0f} kpc)'.format(bins.max())
pbar_msg += ' = {:.2g} Msun, {:,} particles)'.format(
np.sum(masses), masses.size)
for ii in tqdm(range(len(rev_bins)), pbar_msg):
rhigh = rev_bins[ii]
if ii == len(rev_bins)-1:
rlow = 0
else:
rlow = rev_bins[ii+1]
assert rlow <= rhigh
shell_vol = 4./3.*np.pi*(rhigh**3 - rlow**3)
if nexpr:
# within_rhigh = evaluate("(distances <= rhigh)") #No need to do this -- I trim the particles before the loop and within the loop, so everything is within rhigh trivially
minsider = evaluate("sum(masses)")
inbin = evaluate("(distances > rlow)")
# sum up the masses where inbin, 0 otherwise
thism = evaluate("sum(where(inbin,masses,0))")
# particles that are within rhigh but not in the bin. equivalent to (within_rhigh) & (logical_not( (distances>rlow) & (within_rhigh) )
# equivalent to False if not within_rhigh, so throws away outer particles
# equivalent to True & logical_not(True & True) = True & not(True) = True & False = False if distances > rlow and distances < rhigh
# equivalent to True & not(False & True) = True & not(False) = True if distances <= rlow
# keep = evaluate("~inbin") #but since I trim the particles so within_rhigh is trivially true (see above), this just reduces to not inbin, so no reason to calculate/store that
else:
# within_rhigh = distances <= rhigh
# &(within_rhigh) #works for both 2D and 3D
inbin = (distances > rlow)
minsider = np.sum(masses)
thism = np.sum(masses[inbin])
# keep = within_rhigh & (~inbin) #save logic as above
m_of_r[ii] = thism
density[ii] = thism/shell_vol
distances = distances[~inbin]
masses = masses[~inbin]
positions = positions[~inbin]
if pbar is not None:
pbar.update(ii)
if pbar is not None:
pbar.finish()
# swap everything back around so that I go from the inside out so that I can cumsum. remember bins is already sorted because I didn't swap it; I created rev_bins.
density = density[::-1]
m_of_r = m_of_r[::-1]
mltr = np.cumsum(m_of_r)
# calculate keplerian circular velocity
vcirc = np.sqrt(G*mltr/bins) # remember that bins didn't get reversed
# remember this gets saved directly, so be good about naming!
end = 'r'
toreturn = {
'density': density,
'M.of.'+end: m_of_r,
'M.lt.'+end: mltr,
'vcirc': vcirc,
'bins': bins,
}
return toreturn
def mass_profiles_nopair(bins, positions, masses, pair_distance, pbar_msg='Making mass profiles', nexpr=False):
"""
computes:
M(<r), M(r), rho = M(r)/dV, Vcirc = sqrt(GM(<r)/r)
assumes that particles closer to second host (whcih is pair_distance from main
host) are removed already. removes the volume in that region from density
calculations.
:bins : array-like : sorted (from small to large) bin edges to use
:positions : array-like : particle positions, with the center at 0,0,0
:masses : array-like : particle masses, in the same order as positions and velocities
:pbar_msg: str : what to print for the pbar (total mass and number of particles is appended)
:nexpr : bool : whether or not to try to use numexpr to try to speed up the calculation
"""
if nexpr:
from numexpr import evaluate
print("Using numexpr for the masking and summing masses")
pair_midpoint_distance = pair_distance / 2.0
# work from outside in, throwing away particles as I no longer need them
assert positions.shape[0] == masses.shape[0]
m_of_r = np.empty(bins.size)
density = np.empty_like(m_of_r)
distances = fast_dist(positions) # center assumed to be at (0,0,0)
# throw away any particles beyond my last bin edge
msk = distances <= bins.max()
positions = positions[msk]
masses = masses[msk]
distances = distances[msk]
# make sure this is true because otherwise return will be nonsense since I use cumsum at the end
assert (np.sort(bins) == bins).all()
rev_bins = bins[::-1]
pbar_msg += '; Mtot(r < {:.0f} kpc)'.format(bins.max())
pbar_msg += ' = {:.2g} Msun, {:,} particles)'.format(
np.sum(masses), masses.size)
for ii in tqdm(range(len(rev_bins)), pbar_msg):
rhigh = rev_bins[ii]
if ii == len(rev_bins)-1:
rlow = 0
else:
rlow = rev_bins[ii+1]
assert rlow < rhigh
if rhigh <= pair_midpoint_distance:
shell_vol = 4./3.*np.pi*(rhigh**3 - rlow**3)
else:
# ok, more complicated because I need to subtract out the volume where the particles are trimmed
# from wikipedia's article on spherical caps:
# f the radius of the sphere is r and the height of the cap is h, then the volume of the spherical cap is:
# V= pi/3 * h^2 * (3r - h)
def cap_vol(r, h): return (np.pi/3.) * (h**2) * (3*r - h)
if rlow <= pair_midpoint_distance:
# then rhigh is over the border, but rlow is under it
vol_low = 4./3. * np.pi * rlow**3
else:
height_of_low_cap = rlow - pair_midpoint_distance
vol_of_low_cap = cap_vol(rlow, height_of_low_cap)
low_vol_total = 4./3. * np.pi * rlow**3
vol_low = low_vol_total - vol_of_low_cap
height_of_high_cap = rhigh - pair_midpoint_distance
vol_of_high_cap = cap_vol(rhigh, height_of_high_cap)
vol_high_total = (4./3.) * np.pi * rhigh**3
vol_high = vol_high_total - vol_of_high_cap
shell_vol = vol_high - vol_low
if nexpr:
# within_rhigh = evaluate("(distances <= rhigh)") #No need to do this -- I trim the particles before the loop and within the loop, so everything is within rhigh trivially
minsider = evaluate("sum(masses)")
inbin = evaluate("(distances > rlow)")
# sum up the masses where inbin, 0 otherwise
thism = evaluate("sum(where(inbin,masses,0))")
# particles that are within rhigh but not in the bin. equivalent to (within_rhigh) & (logical_not( (distances>rlow) & (within_rhigh) )
# equivalent to False if not within_rhigh, so throws away outer particles
# equivalent to True & logical_not(True & True) = True & not(True) = True & False = False if distances > rlow and distances < rhigh
# equivalent to True & not(False & True) = True & not(False) = True if distances <= rlow
# keep = evaluate("~inbin") #but since I trim the particles so within_rhigh is trivially true (see above), this just reduces to not inbin, so no reason to calculate/store that
else:
# within_rhigh = distances <= rhigh
# &(within_rhigh) #works for both 2D and 3D
inbin = (distances > rlow)
minsider = np.sum(masses)
thism = np.sum(masses[inbin])
# keep = within_rhigh & (~inbin) #save logic as above
m_of_r[ii] = thism
density[ii] = thism/shell_vol
distances = distances[~inbin]
masses = masses[~inbin]
positions = positions[~inbin]
# swap everything back around so that I go from the inside out so that I can cumsum. remember bins is already sorted because I didn't swap it; I created rev_bins.
density = density[::-1]
m_of_r = m_of_r[::-1]
mltr = np.cumsum(m_of_r)
# calculate keplerian circular velocity
vcirc = np.sqrt(G*mltr/bins) # remember that bins didn't get reversed
# remember this gets saved directly, so be good about naming!
end = 'r'
toreturn = {
'density': density,
'M.of.'+end: m_of_r,
'M.lt.'+end: mltr,
'vcirc': vcirc,
'bins': bins,
}
return toreturn
| StarcoderdataPython |
1612996 | from __future__ import division
'''
***********************************************************
File: softmaxModels.py
Allows for the creation, and use of Softmax functions
Version 1.3.0: Added Discretization function
Version 1.3.1: Added Likelihood weighted Importance sampling
***********************************************************
'''
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Cohrint"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.3.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import numpy as np;
import random;
from random import random;
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal as mvn
import warnings
import math
import copy
import time
from numpy.linalg import inv,det,svd,solve
from gaussianMixtures import Gaussian
from gaussianMixtures import GM
from mpl_toolkits.mplot3d import Axes3D
from scipy import compress
import scipy.linalg as linalg
from copy import deepcopy
from scipy import sparse
from sklearn.linear_model import LogisticRegression
class Softmax:
def __init__(self,weights= None,bias = None):
'''
Initialize with either:
1. Nothing, for empty softmax model
2. Vector of weights (n x d) and bias (nx1)
'''
self.weights = weights;
self.bias = bias;
if(self.weights is not None):
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def nullspace(self,A,atol=1e-13,rtol=0):
'''
Finds the nullspace of a matrix
'''
A = np.atleast_2d(A)
u, s, vh = svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return ns;
def distance(self,x1,y1,x2,y2):
'''
The distance formula for 2d systems
'''
dist = (x1-x2)*(x1-x2) + (y1-y2)*(y1-y2);
dist = math.sqrt(dist);
return dist;
def buildRectangleModel(self,recBounds,steepness = 1):
'''
Builds a softmax model in 2 dimensions with a rectangular interior class
Inputs
recBounds: A 2x2 list, with the coordinates of the lower left and upper right corners of the rectangle
steepness: A scalar determining how steep the bounds between softmax classes are
'''
B = np.matrix([-1,0,recBounds[0][0],1,0,-recBounds[1][0],0,1,-recBounds[1][1],0,-1,recBounds[0][1]]).T;
M = np.zeros(shape=(12,15));
#Boundry: Left|Near
rowSB = 0;
classNum1 = 1;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
#Boundry: Right|Near
rowSB = 1;
classNum1 = 2;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
#Boundry: Up|Near
rowSB = 2;
classNum1 = 3;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
#Boundry: Down|Near
rowSB = 3;
classNum1 = 4;
classNum2 = 0;
for i in range(0,3):
M[3*rowSB+i,3*classNum2+i] = -1;
M[3*rowSB+i,3*classNum1+i] = 1;
A = np.hstack((M,B));
# print(np.linalg.matrix_rank(A))
# print(np.linalg.matrix_rank(M))
Theta = linalg.lstsq(M,B)[0].tolist();
weight = [];
bias = [];
for i in range(0,len(Theta)//3):
weight.append([Theta[3*i][0],Theta[3*i+1][0]]);
bias.append(Theta[3*i+2][0]);
steep = steepness;
self.weights = (np.array(weight)*steep).tolist();
self.bias = (np.array(bias)*steep).tolist();
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def buildOrientedRecModel(self,centroid,orient,length,width,steepness = 1):
'''
Builds a rectangular model at the specified centroid with the parameters given
'''
theta1 = orient*math.pi/180;
h = math.sqrt((width/2)*(width/2) + (length/2)*(length/2));
theta2 = math.asin((width/2)/h);
s1 = h*math.sin(theta1+theta2);
s2 = h*math.cos(theta1+theta2);
s3 = h*math.sin(theta1-theta2);
s4 = h*math.cos(theta1-theta2);
points = [];
points = [[centroid[0]+s2,centroid[1]+s1],[centroid[0]+s4,centroid[1]+s3],[centroid[0]-s2,centroid[1]-s1],[centroid[0]-s4,centroid[1]-s3]];
self.buildPointsModel(points,steepness=steepness);
def buildGeneralModel(self,dims,numClasses,boundries,B,steepness=1):
'''
Builds a softmax model according to the full specification of boudries and a normal vector
Inputs
dims: the dimensionality of the model
numClasses: the number of classes in the model
boundries: a list of [2x1] lists which spec out the boundries required in the model
B: a list of normals and constants for each boundry
steepness: A scalar determining how steep the bounds between softmax classes are
'''
M = np.zeros(shape=(len(boundries)*(dims+1),numClasses*(dims+1)));
for j in range(0,len(boundries)):
for i in range(0,dims+1):
M[(dims+1)*j+i,(dims+1)*boundries[j][1]+i] = -1;
M[(dims+1)*j+i,(dims+1)*boundries[j][0]+i] = 1;
A = np.hstack((M,B));
Theta = linalg.lstsq(M,B)[0].tolist();
weight = [];
bias = [];
for i in range(0,len(Theta)//(dims+1)):
wtmp=[];
for j in range(0,dims):
wtmp.append(Theta[(dims+1)*i+j][0])
weight.append(wtmp);
bias.append(Theta[(dims+1)*i+dims][0]);
steep = steepness;
self.weights = (np.array(weight)*steep).tolist();
self.bias = (np.array(bias)*steep).tolist();
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def buildPointsModel(self,points,steepness=1):
'''
Builds a 2D softmax model by constructing an interior class from the given points
Inputs
points: list of 2D points that construct a convex polygon
steepness: A scalar determining how steep the bounds between softmax classes are
'''
dims = 2;
pointsx = [p[0] for p in points];
pointsy = [p[1] for p in points];
centroid = [sum(pointsx)/len(points),sum(pointsy)/len(points)];
#for each point to the next, find the normal between them.
B = [];
for i in range(0,len(points)):
p1 = points[i];
if(i == len(points)-1):
p2 = points[0];
else:
p2 = points[i+1];
mid = [];
for i in range(0,len(p1)):
mid.append((p1[i]+p2[i])/2)
H = np.matrix([[p1[0],p1[1],1],[p2[0],p2[1],1],[mid[0],mid[1],1]]);
Hnull = (self.nullspace(H)).tolist();
distMed1 = self.distance(mid[0]+Hnull[0][0],mid[1]+Hnull[1][0],centroid[0],centroid[1]);
distMed2 = self.distance(mid[0]-Hnull[0][0],mid[1]-Hnull[1][0],centroid[0],centroid[1]);
if(distMed1 < distMed2):
Hnull[0][0] = -Hnull[0][0];
Hnull[1][0] = -Hnull[1][0];
Hnull[2][0] = -Hnull[2][0];
for j in Hnull:
B.append(j[0]);
B = np.matrix(B).T;
numClasses = len(points)+1;
boundries = [];
for i in range(1,numClasses):
boundries.append([i,0]);
M = np.zeros(shape=(len(boundries)*(dims+1),numClasses*(dims+1)));
for j in range(0,len(boundries)):
for i in range(0,dims+1):
M[(dims+1)*j+i,(dims+1)*boundries[j][1]+i] = -1;
M[(dims+1)*j+i,(dims+1)*boundries[j][0]+i] = 1;
A = np.hstack((M,B));
#print(np.linalg.matrix_rank(A))
#print(np.linalg.matrix_rank(M))
Theta = linalg.lstsq(M,B)[0].tolist();
weight = [];
bias = [];
for i in range(0,len(Theta)//(dims+1)):
weight.append([Theta[(dims+1)*i][0],Theta[(dims+1)*i+1][0]]);
bias.append(Theta[(dims+1)*i+dims][0]);
steep = steepness;
self.weights = (np.array(weight)*steep).tolist();
self.bias = (np.array(bias)*steep).tolist();
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def buildTriView(self,pose,length = 3,steepness = 2):
l = length;
#Without Cutting
triPoints = [[pose[0],pose[1]],[pose[0]+l*math.cos(2*-0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*-0.261799+math.radians(pose[2]))],[pose[0]+l*math.cos(2*0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*0.261799+math.radians(pose[2]))]];
#With Cutting
lshort = 0.5
triPoints = [[pose[0]+lshort*math.cos(2*0.261799+math.radians(pose[2])),pose[1]+lshort*math.sin(2*0.261799+math.radians(pose[2]))],[pose[0]+lshort*math.cos(2*-0.261799+math.radians(pose[2])),pose[1]+lshort*math.sin(2*-0.261799+math.radians(pose[2]))],[pose[0]+l*math.cos(2*-0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*-0.261799+math.radians(pose[2]))],[pose[0]+l*math.cos(2*0.261799+math.radians(pose[2])),pose[1]+l*math.sin(2*0.261799+math.radians(pose[2]))]];
self.buildPointsModel(triPoints,steepness=steepness);
def Estep(self,weight,bias,prior_mean,prior_var,alpha = 0.5,zeta_c = 1,softClassNum=0):
'''
Runs the Expectation step of the Variational Bayes algorithm
'''
#start the VB EM step
lamb = [0]*len(weight);
for i in range(0,len(weight)):
lamb[i] = self._lambda(zeta_c[i]);
hj = 0;
suma = 0;
for c in range(0,len(weight)):
if(softClassNum != c):
suma += weight[c];
tmp2 = 0;
for c in range(0,len(weight)):
tmp2+=lamb[c]*(alpha-bias[c])*weight[c];
hj = 0.5*(weight[softClassNum]-suma)+2*tmp2;
Kj = 0;
for c in range(0,len(weight)):
Kj += lamb[c]*weight[c]*weight[c];
Kj = Kj*2;
Kp = prior_var**-1;
hp = Kp*prior_mean;
Kl = Kp+Kj;
hl = hp+hj;
mean = (Kl**-1)*hl;
var = Kl**-1;
yc = [0]*len(weight);
yc2= [0]*len(weight);
for c in range(0,len(weight)):
yc[c] = weight[c]*mean + bias[c];
yc2[c] = weight[c]*(var + mean*mean)*weight[c] + 2*weight[c]*mean*bias[c] + bias[c]**2;
return [mean,var,yc,yc2];
def Mstep(self,m,yc,yc2,zeta_c,alpha,steps):
'''
Runs the Maximization Step of the Variational Bayes algorithm
'''
z = zeta_c;
a = alpha;
for i in range(0,steps):
for c in range(0,len(yc)):
z[c] = math.sqrt(yc2[c] + a**2 - 2*a*yc[c]);
num_sum = 0;
den_sum = 0;
for c in range(0,len(yc)):
num_sum += self._lambda(z[c])*yc[c];
den_sum += self._lambda(z[c]);
a = ((m-2)/4 + num_sum)/den_sum;
return [z,a]
def _lambda(self, zeta_c):
return 1 / (2 * zeta_c) * ( (1 / (1 + np.exp(-zeta_c))) - 0.5)
def calcCHat(self,prior_mean,prior_var,mean,var,alpha,zeta_c,yc,yc2,mod):
prior_var = np.matrix(prior_var);
prior_mean = np.matrix(prior_mean);
var_hat = np.matrix(var);
mu_hat = np.matrix(mean);
#KLD = 0.5*(np.log(prior_var/var) + prior_var**-1*var + (prior_mean-mean)*(prior_var**-1)*(prior_mean-mean));
KLD = 0.5 * (np.log(det(prior_var) / det(var_hat)) +
np.trace(inv(prior_var) .dot (var_hat)) +
(prior_mean - mu_hat).T .dot (inv(prior_var)) .dot
(prior_mean - mu_hat));
suma = 0;
for c in range(0,len(zeta_c)):
suma += 0.5 * (alpha + zeta_c[c] - yc[c]) \
- self._lambda(zeta_c[c]) * (yc2[c] - 2 * alpha
* yc[c] + alpha ** 2 - zeta_c[c] ** 2) \
- np.log(1 + np.exp(zeta_c[c]))
return yc[mod] - alpha + suma - KLD + 1;
def numericalProduct(self,prior,meas,low=0,high=5,res =100,vis = True):
'''
Multiplies a 1D softmax model by a 1D gaussian mixture over a range
For comparison to VB
'''
[x,softmax] = self.plot1D(low,high,res,vis = False);
prod = [0 for i in range(0,len(x))];
for i in range(0,len(x)):
prod[i] = prior.pointEval(x[i])*softmax[meas][i];
if(vis == False):
return [x,prod];
else:
plt.plot(x,prod);
plt.show();
def vb_update(self, measurement, prior_mean,prior_var):
'''
Runs the variational Bayes update
'''
w = np.array(self.weights)
b = np.array(self.bias)
m = len(w);
j = measurement;
xis = self.zeta_c;
alpha = self.alpha;
prior_var = np.array(prior_var);
prior_mean = np.array(prior_mean);
converged = False
EM_step = 0
while not converged and EM_step < 10000:
################################################################
# STEP 1 - EXPECTATION
################################################################
# PART A #######################################################
# find g_j
sum1 = 0
for c in range(m):
if c != j:
sum1 += b[c]
sum2 = 0
for c in range(m):
sum2 = xis[c] / 2 \
+ self._lambda(xis[c]) * (xis[c] ** 2 - (b[c] - alpha) ** 2) \
- np.log(1 + np.exp(xis[c]))
g_j = 0.5 * (b[j] - sum1) + alpha * (m / 2 - 1) + sum2
# find h_j
sum1 = 0
for c in range(m):
if c != j:
sum1 += w[c]
sum2 = 0
for c in range(m):
sum2 += self._lambda(xis[c]) * (alpha - b[c]) * w[c]
h_j = 0.5 * (w[j] - sum1) + 2 * sum2
# find K_j
sum1 = 0
for c in range(m):
sum1 += self._lambda(xis[c]) * np.outer(w[c], (w[c]))
K_j = 2 * sum1
K_p = inv(prior_var)
g_p = -0.5 * (np.log(np.linalg.det(2 * np.pi * prior_var))) \
+ prior_mean.T .dot (K_p) .dot (prior_var)
h_p = K_p .dot (prior_mean)
g_l = g_p + g_j
h_l = h_p + h_j
K_l = K_p + K_j
mu_hat = inv(K_l) .dot (h_l)
var_hat = inv(K_l)
# PART B #######################################################
y_cs = np.zeros(m)
y_cs_squared = np.zeros(m)
for c in range(m):
y_cs[c] = w[c].T .dot (mu_hat) + b[c]
y_cs_squared[c] = w[c].T .dot \
(var_hat + np.outer(mu_hat, mu_hat.T)) .dot (w[c]) \
+ 2 * w[c].T .dot (mu_hat) * b[c] + b[c] ** 2
################################################################
# STEP 2 - MAXIMIZATION
################################################################
for i in range(100): # n_{lc}
# PART A ######################################################
# Find xis
for c in range(m):
xis[c] = np.sqrt(y_cs_squared[c] + alpha ** 2 - 2 * alpha
* y_cs[c])
# PART B ######################################################
# Find alpha
num_sum = 0
den_sum = 0
for c in range(m):
num_sum += self._lambda(xis[c]) * y_cs[c]
den_sum += self._lambda(xis[c])
alpha = ((m - 2) / 4 + num_sum) / den_sum
################################################################
# STEP 3 - CONVERGENCE CHECK
################################################################
if EM_step == 0:
prev_log_c_hat = -1000 # Arbitrary value
KLD = 0.5 * (np.log(det(prior_var) / det(var_hat)) +
np.trace(inv(prior_var) .dot (var_hat)) +
(prior_mean - mu_hat).T .dot (inv(prior_var)) .dot
(prior_mean - mu_hat))
sum1 = 0
for c in range(m):
sum1 += 0.5 * (alpha + xis[c] - y_cs[c]) \
- self._lambda(xis[c]) * (y_cs_squared[c] - 2 * alpha
* y_cs[c] + alpha ** 2 - xis[c] ** 2) \
- np.log(1 + np.exp(xis[c]))
# <>TODO: don't forget Mun - unobserved parents!
# <>CHECK - WHY DO WE ADD +1 HERE??
log_c_hat = y_cs[j] - alpha + sum1 - KLD + 1
if np.abs(log_c_hat - prev_log_c_hat) < 0.001:
break
prev_log_c_hat = log_c_hat
EM_step += 1
# Resize parameters
if mu_hat.size == 1:
mu_post = mu_hat[0]
else:
mu_post = mu_hat
if var_hat.size == 1:
var_post = var_hat[0][0]
else:
var_post = var_hat
return mu_post, var_post, log_c_hat
def runVB(self,prior,softClassNum):
#For the one dimensional case only
post = GM();
weight = self.weights;
bias = self.bias;
alpha = self.alpha;
zeta_c = self.zeta_c;
for g in prior.Gs:
prevLogCHat = -1000;
count = 0;
while(count < 100000):
count = count+1;
[mean,var,yc,yc2] = self.Estep(weight,bias,g.mean,g.var,alpha,zeta_c,softClassNum = softClassNum);
[zeta_c,alpha] = self.Mstep(len(weight),yc,yc2,zeta_c,alpha,steps = 100);
logCHat = self.calcCHat(g.mean,g.var,mean,var,alpha,zeta_c,yc,yc2,mod=softClassNum);
if(abs(prevLogCHat - logCHat) < 0.00001):
break;
else:
prevLogCHat = logCHat;
post.addG(Gaussian(mean,var,g.weight*np.exp(logCHat).tolist()[0][0]))
return post;
def runVBND(self,prior,softClassNum):
#For the N dimensional Case
#Note: Cannot run 1D
post = GM();
for g in prior.Gs:
[mu,var,logCHat] = self.vb_update(softClassNum,g.mean,g.var);
mu = mu.tolist();
var = var.tolist();
post.addG(Gaussian(mu,var,g.weight*np.exp(logCHat)));
return post;
def pointEvalND(self,softClass,point):
#Evaluates the function at a point in any dimensionality.
topIn = 0;
for i in range(0,len(self.weights[0])):
topIn+=self.weights[softClass][i]*point[i];
top = np.exp(topIn+self.bias[softClass]);
bottom = 0;
for i in range(0,self.size):
bottomIn = 0;
for j in range(0,len(self.weights[0])):
bottomIn += self.weights[i][j]*point[j];
bottom+=np.exp(bottomIn + self.bias[i]);
return top/bottom;
def plot1D(self,low=0,high = 5,res = 100,labels = None,vis = True):
x = [(i*(high-low)/res + low) for i in range(0,res)];
suma = [0]*len(x);
softmax = [[0 for i in range(0,len(x))] for j in range(0,len(self.weights))];
for i in range(0,len(x)):
tmp = 0;
for j in range(0,len(self.weights)):
tmp += math.exp(self.weights[j]*x[i] + self.bias[j]);
for j in range(0,len(self.weights)):
softmax[j][i] = math.exp(self.weights[j]*x[i] + self.bias[j]) /tmp;
if(vis ==True):
for i in range(0,len(self.weights)):
plt.plot(x,softmax[i]);
plt.ylim([0,1.1])
plt.xlim([low,high]);
if(labels is not None):
plt.legend(labels);
plt.show();
else:
return [x,softmax];
def plot2D(self,low = [0,0],high = [5,5],labels = None,vis = True,delta=0.1):
x, y = np.mgrid[low[0]:high[0]:delta, low[1]:high[1]:delta]
pos = np.dstack((x, y))
resx = int((high[0]-low[0])//delta)+1;
resy = int((high[1]-low[1])//delta)+1;
model = [[[0 for i in range(0,resy)] for j in range(0,resx)] for k in range(0,len(self.weights))];
for m in range(0,len(self.weights)):
for i in range(0,resx):
xx = (i*(high[0]-low[0])/resx + low[0]);
for j in range(0,resy):
yy = (j*(high[1]-low[1])/resy + low[1])
dem = 0;
for k in range(0,len(self.weights)):
dem+=np.exp(self.weights[k][0]*xx + self.weights[k][1]*yy + self.bias[k]);
model[m][i][j] = np.exp(self.weights[m][0]*xx + self.weights[m][1]*yy + self.bias[m])/dem;
dom = [[0 for i in range(0,resy)] for j in range(0,resx)];
for m in range(0,len(self.weights)):
for i in range(0,resx):
for j in range(0,resy):
dom[i][j] = np.argmax([model[h][i][j] for h in range(0,len(self.weights))]);
if(vis):
plt.contourf(x,y,dom,cmap = 'inferno');
fig = plt.figure()
ax = fig.gca(projection='3d');
colors = ['b','g','r','c','m','y','k','w','b','g'];
for i in range(0,len(model)):
ax.plot_surface(x,y,np.array(model[i]),color = colors[i]);
ax.set_xlabel('X/East Location (m)');
ax.set_ylabel('Y/West Location (m)');
ax.set_zlabel('Likelihood');
plt.show();
else:
return x,y,dom;
def plot3D(self,low=[-5,-5,-5],high=[5,5,5]):
fig = plt.figure();
ax = fig.add_subplot(111,projection='3d');
ax.set_xlabel('X Axis');
ax.set_ylabel('Y Axis');
ax.set_zlabel('Z Axis');
ax.set_xlim([low[0],high[0]]);
ax.set_ylim([low[1],high[1]]);
ax.set_zlim([low[2],high[2]]);
ax.set_title("3D Scatter of Dominant Softmax Classes")
for clas in range(1,self.size):
shapeEdgesX = [];
shapeEdgesY = [];
shapeEdgesZ = [];
#-5 to 5 on all dims
data = np.zeros(shape=(21,21,21));
for i in range(0,21):
for j in range(0,21):
for k in range(0,21):
data[i][j][k] = self.pointEvalND(clas,[(i-10)/2,(j-10)/2,(k-10)/2]);
if(data[i][j][k] > 0.1):
shapeEdgesX.append((i-10)/2);
shapeEdgesY.append((j-10)/2);
shapeEdgesZ.append((k-10)/2);
ax.scatter(shapeEdgesX,shapeEdgesY,shapeEdgesZ);
plt.show();
def logRegress(self,X,t,steepness = 1):
dim = len(X[0]);
fitter = LogisticRegression(solver = 'newton-cg',multi_class = 'multinomial');
fitter.fit(X,t);
newCoef = fitter.coef_.tolist();
weights = [];
for i in range(0,len(newCoef)):
weights.append(newCoef[i]);
bias = [];
newBias = fitter.intercept_.tolist();
for i in range(0,len(newBias)):
bias.append(newBias[i]);
ze = [0]*dim;
weights.append(ze);
bias.append(0);
self.weights = (np.array(weights)*steepness).tolist();
self.bias = (np.array(bias)*steepness).tolist();
if(self.weights is not None):
self.size = len(self.weights);
self.alpha = 3;
self.zeta_c = [0]*len(self.weights);
for i in range(0,len(self.weights)):
self.zeta_c[i] = random()*10;
def discretize2D(self,softClass,low = [0,0],high = [5,5],delta=0.1):
x, y = np.mgrid[low[0]:high[0]:delta, low[1]:high[1]:delta]
pos = np.dstack((x, y))
resx = int((high[0]-low[0])//delta)+1;
resy = int((high[1]-low[1])//delta)+1;
likelihood = [[0 for i in range(0,resy)] for j in range(0,resx)];
for m in softClass:
for i in range(0,resx):
xx = (i*(high[0]-low[0])/resx + low[0]);
for j in range(0,resy):
yy = (j*(high[1]-low[1])/resy + low[1])
dem = 0;
for k in range(0,len(self.weights)):
dem+=np.exp(self.weights[k][0]*xx + self.weights[k][1]*yy + self.bias[k]);
likelihood[i][j] += np.exp(self.weights[m][0]*xx + self.weights[m][1]*yy + self.bias[m])/dem;
return likelihood;
def lwisUpdate(self,prior,softClass,numSamples,inverse = False):
#Runs a likelihood weighted importance sampling update on a given gaussian
q = GM();
q.addG(Gaussian(prior.mean,prior.var,1));
p = GM();
p.addG(prior);
x = q.sample(numSamples);
w = np.zeros(numSamples);
for i in range(0,numSamples):
if(not inverse):
w[i] = p.pointEval(x[i])*self.pointEvalND(softClass,x[i])/q.pointEval(x[i]);
else:
w[i] = p.pointEval(x[i])*(1-self.pointEvalND(softClass,x[i]))/q.pointEval(x[i]);
suma = sum(w);
for i in range(0,len(w)):
w[i] = w[i]/suma;
muHat = np.zeros(len(prior.mean));
for i in range(0,numSamples):
muHat = muHat + np.dot(x[i],w[i]);
varHat = np.zeros(shape = (len(prior.mean),len(prior.mean)));
for i in range(0,numSamples):
xi = np.asarray(x[i]);
varHat = varHat + w[i]*np.outer(xi,xi);
varHat = varHat - np.outer(muHat,muHat);
muHat = muHat.tolist();
varHat = varHat.tolist();
if(len(prior.mean) == 1):
muHat = muHat[0];
if(len(prior.var)==1):
varHat = varHat[0][0];
#Calculate Weights
#sample a bunch from the prior
tmp = GM();
tmp.addG(Gaussian(prior.mean,prior.var,1));
tmpSamps = tmp.sample(500);
#Find the likelihood at each sampled point
probs = np.zeros(500).tolist()
for i in range(0,500):
if(not inverse):
probs[i] = self.pointEvalND(softClass,tmpSamps[i]);
else:
probs[i] = 1-self.pointEvalND(softClass,tmpSamps[i]);
#Find the average likelihood, which is the weight factor
sumSamp = sum(probs)/500;
#Multiply the sampled weight factor by the previous weight
#or add in log space
logSamps = np.log(sumSamp);
logWeight = np.log(prior.weight)+logSamps;
#Extract final weight
weight = np.exp(logWeight);
post = Gaussian(muHat,varHat,weight);
return post;
def test1DSoftmax():
# weight = [-30,-20,-10,0];
# bias = [60,50,30,0];
weight = [-5,0];
bias = [5,0];
softClass = 0;
low = 0;
high = 5;
res = 100;
#Define Likelihood Model
a = Softmax(weight,bias);
#build a prior gaussian
prior = GM([2,4],[1,0.5],[1,0.5]);
#Get the posterior
post = a.runVB(prior,softClassNum = softClass);
a.plot1D(res=res,low = 0, high = 5);
#Plot Everything
[x0,classes] = a.plot1D(res = res,vis = False);
[x1,numApprox] = a.numericalProduct(prior,softClass,low=low,high=high,res = res,vis= False);
softClassLabels = ['Far left','Left','Far Right','Right'];
labels = ['likelihood','prior','VB Posterior','Numerical Posterior'];
[x2,pri] = prior.plot(low = low, high = high,num = res,vis = False);
[x3,pos] = post.plot(low = low, high = high,num = res,vis = False);
plt.plot(x0,classes[softClass]);
plt.plot(x2,pri);
plt.plot(x3,pos);
plt.plot(x1,numApprox);
plt.ylim([0,1.1])
plt.xlim([low,high])
plt.title("Fusion of prior with: " + softClassLabels[softClass]);
plt.legend(labels);
plt.show();
def test2DSoftmax():
#Specify Parameters
#2 1D robots obs model
#weight = [[0.6963,-0.6963],[-0.6963,0.6963],[0,0]];
#bias = [-0.3541,-0.3541,0];
#Colinear Problem
weight = [[-1.3926,1.3926],[-0.6963,0.6963],[0,0]];
bias = [0,.1741,0];
low = [0,0];
high = [5,5];
#Differencing Problem
#weight = [[0,1],[-1,1],[1,1],[0,2],[0,0]]
#bias = [1,0,0,0,0];
# low = [-5,-5];
# high = [5,5];
MMS = True;
softClass = 2;
detect = 0;
res = 100;
steep = 2;
for i in range(0,len(weight)):
for j in range(0,len(weight[i])):
weight[i][j] = weight[i][j]*steep;
bias[i] = bias[i]*steep;
#Define Likelihood Model
a = Softmax(weight,bias);
[x1,y1,dom] = a.plot2D(low=low,high=high,res=res,vis=False);
a.plot2D(low=low,high=high,res=res,vis=True);
#Define a prior
prior = GM();
prior.addG(Gaussian([2,4],[[1,0],[0,1]],1));
prior.addG(Gaussian([4,2],[[1,0],[0,1]],1));
prior.addG(Gaussian([1,3],[[1,0],[0,1]],1));
[x2,y2,c2] = prior.plot2D(low = low,high = high,res = res, vis = False);
if(MMS):
#run Variational Bayes
if(detect == 0):
post1 = a.runVBND(prior,0);
post2 = a.runVBND(prior,2);
post1.addGM(post2);
else:
post1 = a.runVBND(prior,1);
else:
post1 = a.runVBND(prior,softClass)
post1.normalizeWeights();
[x3,y3,c3] = post1.plot2D(low = low,high = high,res = res, vis = False);
post1.display();
softClassLabels = ['Near','Left','Right','Up','Down'];
detectLabels = ['No Detection','Detection']
#plot everything together
fig,axarr = plt.subplots(3,sharex= True,sharey = True);
axarr[0].contourf(x2,y2,c2,cmap = 'viridis');
axarr[0].set_title('Prior GM');
axarr[1].contourf(x1,y1,dom,cmap = 'viridis');
axarr[1].set_title('Likelihood Softmax');
axarr[2].contourf(x3,y3,c3,cmap = 'viridis');
if(MMS):
axarr[2].set_title('Posterior GM with observation:' + detectLabels[detect]);
else:
axarr[2].set_title('Posterior GM with observation:' + softClassLabels[softClass]);
fig.suptitle('2D Fusion of a Gaussian Prior with a Softmax Likelihood')
plt.show();
def testRectangleModel():
pz = Softmax();
pz.buildRectangleModel([[2,2],[3,4]],1);
#print('Plotting Observation Model');
#pz.plot2D(low=[0,0],high=[10,5],vis=True);
prior = GM();
for i in range(0,10):
for j in range(0,5):
prior.addG(Gaussian([i,j],[[1,0],[0,1]],1));
# prior.addG(Gaussian([4,3],[[1,0],[0,1]],1));
# prior.addG(Gaussian([7,2],[[4,1],[1,4]],3))
prior.normalizeWeights();
dela = 0.1;
x, y = np.mgrid[0:10:dela, 0:5:dela]
fig,axarr = plt.subplots(6);
axarr[0].contourf(x,y,prior.discretize2D(low=[0,0],high=[10,5],delta=dela));
axarr[0].set_title('Prior');
titles = ['Inside','Left','Right','Up','Down'];
for i in range(0,5):
post = pz.runVBND(prior,i);
c = post.discretize2D(low=[0,0],high=[10,5],delta=dela);
axarr[i+1].contourf(x,y,c,cmap='viridis');
axarr[i+1].set_title('Post: ' + titles[i]);
plt.show();
def testGeneralModel():
pz = Softmax();
pz.buildGeneralModel(2,4,[[1,0],[2,0],[3,0]],np.matrix([-1,1,-1,1,1,-1,0,-1,-1]).T);
#print('Plotting Observation Model');
#pz.plot2D(low=[0,0],high=[10,5],vis=True);
prior = GM();
for i in range(0,10):
for j in range(0,5):
prior.addG(Gaussian([i,j],[[1,0],[0,1]],1));
# prior.addG(Gaussian([4,3],[[1,0],[0,1]],1));
# prior.addG(Gaussian([7,2],[[4,1],[1,4]],3))
prior.normalizeWeights();
dela = 0.1;
x, y = np.mgrid[0:10:dela, 0:5:dela]
fig,axarr = plt.subplots(5);
axarr[0].contourf(x,y,prior.discretize2D(low=[0,0],high=[10,5],delta=dela));
axarr[0].set_title('Prior');
titles = ['Inside','Left','Right','Down'];
for i in range(0,4):
post = pz.runVBND(prior,i);
c = post.discretize2D(low=[0,0],high=[10,5],delta=dela);
axarr[i+1].contourf(x,y,c,cmap='viridis');
axarr[i+1].set_title('Post: ' + titles[i]);
plt.show();
def testPointsModel():
dims = 2;
#points = [[2,2],[2,4],[3,4],[3,2]];
#points = [[-2,-2],[-2,-1],[0,1],[2,-1],[2,-2]];
points = [[1,1],[1,2],[3,2],[6,1],[4,-1]];
#points = [[1,1],[3,5],[4,1],[3,0],[4,-2]];
pz = Softmax();
pz.buildPointsModel(points,steepness=5);
pz.plot2D(low=[-10,-10],high=[10,10],delta = 0.1,vis=True);
def testPlot3D():
dims = 3;
steep = 10;
'''
#Trapezoidal Pyramid Specs
numClasses = 7;
boundries = [[1,0],[2,0],[3,0],[4,0],[5,0],[6,0]];
B = np.matrix([0,0,-1,-1,-1,0,.5,-1,0,1,.5,-1,1,0,.5,-1,0,-1,.5,-1,0,0,1,-1]).T;
'''
#Octohedron Specs
numClasses = 9;
boundries = [];
for i in range(1,numClasses):
boundries.append([i,0]);
B = np.matrix([-1,-1,0.5,-1,-1,1,0.5,-1,1,1,0.5,-1,1,-1,0.5,-1,-1,-1,-0.5,-1,-1,1,-0.5,-1,1,1,-0.5,-1,1,-1,-0.5,-1]).T;
pz = Softmax();
pz.buildGeneralModel(dims=dims,numClasses=numClasses,boundries=boundries,B=B,steepness=steep);
pz2 = Softmax(deepcopy(pz.weights),deepcopy(pz.bias));
pz3 = Softmax(deepcopy(pz.weights),deepcopy(pz.bias));
pz4 = Softmax(deepcopy(pz.weights),deepcopy(pz.bias));
for i in range(0,len(pz2.weights)):
pz2.weights[i] = [pz2.weights[i][0],pz2.weights[i][2]]
for i in range(0,len(pz3.weights)):
pz3.weights[i] = [pz3.weights[i][1],pz3.weights[i][2]]
for i in range(0,len(pz4.weights)):
pz4.weights[i] = [pz4.weights[i][0],pz4.weights[i][1]]
fig = plt.figure();
[x,y,c] = pz2.plot2D(low=[-5,-5],high=[5,5],vis = False);
plt.contourf(x,y,c);
plt.xlabel('X Axis');
plt.ylabel('Z Axis');
plt.title('Slice Across Y Axis')
fig = plt.figure();
[x,y,c] = pz3.plot2D(low=[-5,-5],high=[5,5],vis = False);
plt.contourf(x,y,c);
plt.xlabel('Y Axis');
plt.ylabel('Z Axis');
plt.title('Slice Across X axis')
fig = plt.figure();
[x,y,c] = pz4.plot2D(low=[-5,-5],high=[5,5],vis = False);
plt.contourf(x,y,c);
plt.xlabel('X Axis');
plt.ylabel('Y Axis');
plt.title('Slice Across Z Axis');
pz.plot3D();
def testOrientRecModel():
cent = [4,4];
length = 3;
width = 2;
orient = 0;
pz = Softmax();
pz.buildOrientedRecModel(cent,orient,length,width);
pz.plot2D(low=[0,0],high=[10,10]);
def testTriView():
pz = Softmax();
pose = [2,1.4,15.3];
pz.buildTriView(pose,length=2,steepness=5);
pz.plot2D(low=[0,0],high=[10,10]);
def testMakeNear():
pzIn = Softmax();
pzOut = Softmax();
cent = [4,4];
orient = 0;
nearness = 2;
lengthIn = 3;
lengthOut = lengthIn+nearness;
widthIn = 2;
widthOut = widthIn+nearness;
pzIn.buildOrientedRecModel(cent,orient,lengthIn,widthIn,steepness=10);
pzOut.buildOrientedRecModel(cent,orient,lengthOut,widthOut,steepness=10);
#pzIn.plot2D(low=[0,0],high=[10,10]);
#pzOut.plot2D(low=[0,0],high=[10,10]);
b = GM();
for i in range(0,10):
for j in range(0,10):
b.addG(Gaussian([i,j],[[1,0],[0,1]],1));
b.normalizeWeights();
b1 = GM();
for i in range(1,5):
b1.addGM(pzIn.runVBND(b,i));
b1.normalizeWeights();
b2 = GM();
b2.addGM(pzOut.runVBND(b1,0));
b2.normalizeWeights();
fig,axarr = plt.subplots(3);
[x,y,c] = b.plot2D(low=[0,0],high=[10,10],vis=False);
axarr[0].contourf(x,y,c);
[x,y,c] = b1.plot2D(low=[0,0],high=[10,10],vis=False);
axarr[1].contourf(x,y,c);
[x,y,c] = b2.plot2D(low=[0,0],high=[10,10],vis=False);
axarr[2].contourf(x,y,c);
plt.show();
def testLogisticRegression():
X = [[1,3],[2,4],[2,2],[4,3]];
t = [0,0,1,1];
cols = ['r','b','g','y','w','k','m'];
a = Softmax();
a.logRegress(X,t,1);
#a.plot2D(vis = True);
[x,y,c] = a.plot2D(vis = False);
plt.contourf(x,y,c);
for i in range(0,len(X)):
plt.scatter(X[i][0],X[i][1],c=cols[t[i]]);
testPoint = [1,2];
winPercent = a.pointEvalND(1,testPoint);
lossPercent = a.pointEvalND(0,testPoint);
print('Win:' + str(winPercent),'Loss:' + str(lossPercent));
plt.show();
def testDiscritization():
centroid = [0,0];
orientation = 35;
steep = 10;
length = 3;
width = 2;
softClass = [1];
pz = Softmax();
pz.buildOrientedRecModel(centroid,orientation,length,width,steepness=steep);
[x,y,c] = pz.plot2D(low=[-5,-5],high=[5,5],vis=False);
fig,axarr = plt.subplots(2);
axarr[0].contourf(x,y,c);
c=pz.discretize2D(softClass,low=[-5,-5],high=[5,5]);
axarr[1].contourf(x,y,c);
plt.show();
def testLWIS():
pz = Softmax();
pose = [0,0,0];
pz.buildTriView(pose,length=2,steepness=10);
prior = GM();
#prior.addG(Gaussian([1,0],[[1,0],[0,1]],1));
for i in range(0,100):
prior.addG(Gaussian([np.random.random()*4-2,np.random.random()*4-2],[[0.1,0],[0,0.1]],1))
prior.normalizeWeights();
post = GM();
for g in prior:
post.addG(pz.lwisUpdate(g,0,500,inverse=True));
#post.display();
[x1,y1,c1] = prior.plot2D(low=[-5,-5],high=[5,5],vis=False);
[x3,y3,c3] = pz.plot2D(low=[-5,-5],high=[5,5],vis=False);
[x2,y2,c2] = post.plot2D(low=[-5,-5],high=[5,5],vis=False);
diffs = c2-c1;
print(np.amax(c2));
print(np.amax(diffs));
print(np.amin(diffs));
fig,axarr = plt.subplots(4);
axarr[0].contourf(x1,y1,c1);
axarr[0].set_title('Prior');
axarr[1].contourf(x3,y3,c3);
axarr[1].set_title('Likelihood');
axarr[2].contourf(x2,y2,c2);
axarr[2].set_title('Posterior');
axarr[3].contourf(x2,y2,diffs);
axarr[3].set_title('Diffs');
plt.show();
if __name__ == "__main__":
test1DSoftmax();
#test2DSoftmax();
#test4DSoftmax();
#testRectangleModel();
#testGeneralModel();
#testPointsModel();
#testPlot3D();
#testOrientRecModel();
#testTriView();
#testMakeNear();
#testLogisticRegression();
#testDiscritization();
#testLWIS();
| StarcoderdataPython |
4823791 | <gh_stars>1-10
from .utils import Atom, Residue, ActiveSite
import numpy as np
import pandas as pd
from sklearn.metrics import jaccard_similarity_score
from .k_means import *
from .agglomerative import *
aa3 = "ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR".split()
def compute_similarity(site_a, site_b):
"""
Compute the similarity between two given ActiveSite instances.
This will calculate the Euclidean distance between the numeric vector
representation of two active sites.
The numeric vector representation is based on counts of each
amino acid present at a given active site.
Input: two ActiveSite instances
Output: the similarity between them (a floating point number)
"""
return np.linalg.norm(site_a - site_b)
def cluster_by_partitioning(active_sites):
"""
Cluster a given set of ActiveSite instances using a partitioning method.
Will call to k means clustering implementation that is housed in another script
Input: a list of ActiveSite instances
Output: a clustering of ActiveSite instances
(this is really a list of clusters, each of which is list of
ActiveSite instances)
"""
cls, sc = k_means(active_sites)
return cls
def cluster_hierarchically(active_sites):
"""
Cluster the given set of ActiveSite instances using a hierarchical algorithm. #
Calls to agglomerative clustering algorithm housed in another script
Input: a list of ActiveSite instances
Output: a list of clusterings
(each clustering is a list of lists of Sequence objects)
"""
cls, sc = agglomerative(active_sites)
return cls
def jacaard(clusters_a, clusters_b):
"""
Jacaard score from sklearn
Input: two lists of cluster assignments
Output: score
(each clustering is a list of lists of Sequence objects)
"""
return jaccard_similarity_score(clusters_a, clusters_b)
| StarcoderdataPython |
141588 | <filename>tests/test_options.py
# SPDX-License-Identifier: Apache-2.0
"""
Tests topology.
"""
import unittest
import numpy
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn import datasets
from skl2onnx import to_onnx, update_registered_converter
from skl2onnx.algebra.onnx_ops import OnnxIdentity, OnnxAdd
from test_utils import TARGET_OPSET
class DummyTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
TransformerMixin.__init__(self)
BaseEstimator.__init__(self)
def fit(self, X, y, sample_weight=None):
return self
def transform(self, X):
return X
def dummy_shape_calculator(operator):
op_input = operator.inputs[0]
operator.outputs[0].type.shape = op_input.type.shape
def dummy_converter(scope, operator, container):
X = operator.inputs[0]
out = operator.outputs
opv = container.target_opset
options = container.get_options(operator.raw_operator)
if len(options) == 0:
cst = numpy.array([57777], dtype=numpy.float32)
elif len(options) == 1:
opts = list(options.items())
if opts[0][0] == 'opt1':
if opts[0][1] is None:
cst = numpy.array([57789], dtype=numpy.float32)
elif opts[0][1]:
cst = numpy.array([57778], dtype=numpy.float32)
elif not opts[0][1]:
cst = numpy.array([57779], dtype=numpy.float32)
else:
raise AssertionError("Issue with %r." % options)
elif opts[0][0] == 'opt3':
if opts[0][1] is None:
cst = numpy.array([51789], dtype=numpy.float32)
elif opts[0][1] == 'r':
cst = numpy.array([56779], dtype=numpy.float32)
elif opts[0][1] == 't':
cst = numpy.array([58779], dtype=numpy.float32)
else:
raise AssertionError("Issue with %r." % options)
elif opts[0][0] == 'opt2':
if opts[0][1] is None:
cst = numpy.array([44444], dtype=numpy.float32)
elif isinstance(opts[0][1], int):
cst = numpy.array([opts[0][1]], dtype=numpy.float32)
else:
raise AssertionError("Issue with %r." % options)
else:
raise NotImplementedError()
else:
raise NotImplementedError()
id1 = OnnxIdentity(X, op_version=opv)
op = OnnxAdd(id1, cst, op_version=opv)
id2 = OnnxIdentity(op, output_names=out[:1],
op_version=opv)
id2.add_to(scope, container)
class TestOptions(unittest.TestCase):
@classmethod
def setUpClass(cls):
update_registered_converter(
DummyTransformer, "IdentityTransformer",
dummy_shape_calculator, dummy_converter,
options={'opt1': [False, True], 'opt2': None,
'opt3': ('r', 't'), 'opt4': -1})
def check_in(self, value, onx):
if str(value) not in str(onx):
raise AssertionError(
"Unable to find %r in\n%s" % (str(value), str(onx)))
def test_no_options(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET)
self.check_in('57777', model_onnx)
def test_options_list_true(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt1': True})
self.check_in('57778', model_onnx)
def test_options_list_false(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt1': False})
self.check_in('57779', model_onnx)
def test_options_list_outside_none(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt1': None})
self.check_in('57789', model_onnx)
def test_options_list_outside(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
with self.assertRaises(ValueError):
# value not allowed
to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt1': 'OUT'})
def test_options_integer(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
with self.assertRaises(TypeError):
# integer not allowed
to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt4': 44444})
def test_options_tuple1(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt3': 't'})
self.check_in('58779', model_onnx)
def test_options_tuple2(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt3': 'r'})
self.check_in('56779', model_onnx)
def test_options_tuple_none(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt3': None})
self.check_in('51789', model_onnx)
def test_options_tuple_out(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
with self.assertRaises(ValueError):
# value not allowed
to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt3': 'G'})
def test_options_none(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt2': None})
self.check_in('44444', model_onnx)
def test_options_num(self):
digits = datasets.load_digits(n_class=6)
Xd = digits.data[:20].astype(numpy.float32)
yd = digits.target[:20]
idtr = DummyTransformer().fit(Xd, yd)
model_onnx = to_onnx(idtr, Xd, target_opset=TARGET_OPSET,
options={'opt2': 33333})
self.check_in('33333', model_onnx)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
169690 | <gh_stars>1-10
"""
Script to calculate the surface area of gridded data.
The output from this script is used when summing up total precipitation and
total area of precipitation
Created: Oct 2016
Author: <NAME> <EMAIL>
"""
import os, errno
from netCDF4 import Dataset
import netCDF4
import numpy as np
import datetime as dt
import pandas
import xray
import Ngl
from scipy import stats
import math
import argparse
import resource
parser = argparse.ArgumentParser(description="map event data")
parser.add_argument('--Version',type=str,nargs=1,help='Version of Data, Standard, low, 6th_from6 etc')
parser.add_argument('--Data',type=str,nargs=1,help='type of Data, TRMM, ERAI, or CESM')
parser.add_argument('--startyr',metavar='startyr',type=int,nargs=1,help='start year for analysis')
parser.add_argument('--endyr',type=int,nargs=1,help='end year for analysis')
args = parser.parse_args()
print args.Data
pi = 3.14159 # pi
rE = 6.371E6 # radius of earth in m
Data = args.Data[0]
startyr = args.startyr[0]
endyr = args.endyr[0]
if Data == "TRMM":
filetimespan = "3hrly"
DirP = '/home/disk/eos4/rachel/Obs/TRMM/' + filetimespan + '/'
FileP = 'TRMM_' + str(startyr) + '-' + str(endyr) + '_3B42_3hrly_nonan.nc'
elif Data == "TRMM_ERAIgd":
filetimespan = "3hrly"
DirP = '/home/disk/eos4/rachel/Obs/TRMM/' + filetimespan + '/'
FileP = "regrid2ERAI_TRMM_3B42_" + str(startyr) + '-' + str(endyr) + ".nc"
elif Data == "ERAI":
filetimespan = "3hrly"
DirP = '/home/disk/eos4/rachel/Obs/ERAI/Precip_' + filetimespan + '/'
FileP = 'ERAI_Totalprecip_' + str(startyr) + '-' + str(endyr) + '_preprocess.nc'
elif Data == "ERA20C":
DirP = '/home/disk/eos4/rachel/Obs/ERA_20C/'
FileP = 'ERA_20C_Totalprecip_' + str(startyr) + '-' + str(endyr) + '_preprocess.nc'
elif Data == "CESM":
DirP = '/home/disk/eos4/rachel/EventTracking/Inputs/CESM/f.e13.FAMPIC5.ne120_ne120.1979_2012.001/'
FileP = 'f.e13.FAMIPC5.ne120_ne120_TotalPrecip_' + str(startyr) + '-' + str(endyr) + '.nc'
elif Data == "GPCP":
DirP = '/home/disk/eos4/rachel/Obs/GPCP/Daily/'
FileP = 'GPCP_1DD_v1.2_199610-201510.nc'
print DirP + FileP
#Get lons and lats
FileIn = xray.open_dataset(DirP + FileP)
if Data in ["CESM",'GPCP']:
lats = FileIn['lat']
lons = FileIn['lon']
else:
lats = FileIn['latitude']
lons = FileIn['longitude']
#convert to radians
latsr = lats * pi / 180.0
lonsr = lons * pi / 180.0
nlats = len(lats)
nlons = len(lons)
area = np.zeros([nlats,nlons],np.float)
lonvalue = np.zeros(nlons,np.float)
# Almost all grids will have equal longitude spacing, but just in case:
for ilon in range(0,nlons-1):
lonvalue[ilon] = abs(lonsr[ilon+1] - lonsr[ilon])
lonvalue[nlons-1] = abs(lonsr[nlons-1] - lonsr[nlons-2])
for ilat in range(0,nlats):
print ilat
# Based on: area above a latitude lat = 2piR^2(1 - sin(lat)
# Thus area between two latitudes: 2piR^2(sin(lat1) - sin(lat2))
# Break into 2pi and multiply by difference between lons: R^2(sin(lat1)-sin(lat2)) * (lon1 - lon2)
if ilat == 0:
latvalue = abs(np.sin(0.5*(latsr[ilat+1] + latsr[ilat]) - np.sin(latsr[ilat])))
elif ilat == nlats-1:
latvalue = abs(np.sin(latsr[ilat]) - np.sin(0.5 * (latsr[ilat-1] + latsr[ilat])))
else:
latvalue = abs(np.sin(0.5*(latsr[ilat] + latsr[ilat+1])) - np.sin(0.5 * (latsr[ilat]+ latsr[ilat-1])))
for ilon in range(0,nlons):
area[ilat,ilon] = abs(rE * rE * latvalue * lonvalue[ilon])
#old version:
"""
if ilat == nlats-1:
xlength = (rE) * (latsr[ilat] - latsr[ilat-1])
else:
xlength = (rE) * (latsr[ilat+1] - latsr[ilat])
if ilon == nlons-1:
# Assuming constant longitude spacing!
ylength = (rE * math.cos(latsr[ilat])) * (lonsr[ilon] - lonsr[ilon-1])
else:
ylength = (rE * math.cos(latsr[ilat])) * (lonsr[ilon-1] - lonsr[ilon])
area[ilat,ilon] = xlength * ylength
"""
ncfile = Dataset(DirP + Data + "_SurfaceArea.nc", 'w')
ncfile.createDimension('lon', nlons)
ncfile.createDimension('lat', nlats)
SurfA = ncfile.createVariable('SurfaceArea','f4',('lat','lon'),fill_value=-9999)
SurfA[...] = area[...]
print np.sum(SurfA)
ncfile.close()
Ngl.end()
| StarcoderdataPython |
1706266 | # Collaborators (including web sites where you got help: (enter none if you didn't need help)
#
def factorial_calc(x): #you may choose the name of the parameter
return # be sure to return the factorial
if __name__ == '__main__':
# Test your code with this first
# Change the argument to try different values
print(factorial_calc(5))
# After you are satisfied with your results, use input() to prompt the user for a value:
#num = input("Enter value to factorialize: ")
#print(factorial_calc(int(num)))
| StarcoderdataPython |
1662512 | <reponame>KATO-Hiro/AtCoder
# -*- coding: utf-8 -*-
def main():
n, m = map(int, input().split())
for i in range(1, n + 1):
if i != m:
print(i)
exit()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3385504 | <reponame>Cynthia-Borot-PNE/Geotrek-admin
from geotrek.authent.serializers import StructureSerializer
from geotrek.common.serializers import PictogramSerializerMixin, BasePublishableSerializerMixin
from geotrek.infrastructure import models as infrastructure_models
class InfrastructureTypeSerializer(PictogramSerializerMixin):
class Meta:
model = infrastructure_models.InfrastructureType
fields = ('id', 'pictogram', 'label')
class InfrastructureSerializer(BasePublishableSerializerMixin):
type = InfrastructureTypeSerializer()
structure = StructureSerializer()
class Meta:
model = infrastructure_models.Infrastructure
id_field = 'id' # By default on this model it's topo_object = OneToOneField(parent_link=True)
geo_field = 'geom'
fields = ('id', ) + \
('id', 'structure', 'name', 'type') + \
BasePublishableSerializerMixin.Meta.fields
| StarcoderdataPython |
124049 | <filename>symupy/runtime/logic/states.py
"""
This module defines the basic states required to execute and launch a simulation.
The states are defined as:
* **Compliance**: This state is defined to check availability of files and candidates.
* **Connect**: This state is defined to process
* **Initialize**: This state perform initialization tasks. In example, loading the file scenarios into the simulator. Declaring initial conditions for the platoon, etc.
* **Preroutine**: Tasks to be done before the querying information from the simulator
* **Query**: Tasks of parsing data and querying information from the simulator
* **Control**: Perform decision tasks for the platoon
* **Push**: Push updated information to the simulator for platoon vehicles
* **Postroutine**: Performs tasks after the information has been pushed.
"""
from .base import State
# Start of our states
class Compliance(State):
"""
The state which declares an status to check file compliance .
"""
def on_event(self, event: str) -> None:
if event == "Connect":
return Connect()
return self
class Connect(State):
"""
The state which declares the creation of a connection with the simulator
"""
def on_event(self, event: str):
if event == "Initialize":
return Initialize()
return self
class Initialize(State):
"""
The state which initializes values for the scenario simulation
"""
def on_event(self, event: str):
if event == "PreRoutine":
return PreRoutine()
return self
class PreRoutine(State):
"""
The state which performs task previous to the interaction with the simulator
"""
def on_event(self, event: str):
if event == "Query":
return Query()
return self
class Query(State):
"""
The state which retrieves information from the simulator
"""
def on_event(self, event: str):
if event == "Control":
return Control()
return self
class Control(State):
"""
The state which computes the control decision
"""
def on_event(self, event: str):
if event == "Push":
return Push()
return self
class Push(State):
"""
The state which pushes data back to the simulator
"""
def on_event(self, event: str):
if event == "PostRoutine":
return PostRoutine()
return self
class PostRoutine(State):
"""
The state which logs information or compute step indicators
"""
def on_event(self, event: str):
if event == "PreRoutine":
return PreRoutine()
elif event == "Terminate":
return Terminate()
return self
class Terminate(State):
"""
The state which declares the end of a simulation
"""
def on_event(self, event: str):
return self
# End of our states.
| StarcoderdataPython |
63509 | <filename>src/decisionengine_modules/GCE/sources/GCEBillingInfoSourceProxy.py
from decisionengine.framework.modules import Source, SourceProxy
GCEBillingInfoSourceProxy = SourceProxy.SourceProxy
Source.describe(GCEBillingInfoSourceProxy)
| StarcoderdataPython |
1643800 | '''
Copyright 2010 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Contains parser and data structures for serialising and deserialising the dacp binary
protocol
'''
import struct
import binascii
class string_content_element():
'''
A content element holding a string
name - the 4 letter element name
content - the element content string
'''
def __init__(self, name, content):
self.name = name
self.content = content
def get_bytes(self):
length = len(self.content)
return struct.pack(">4sI" + str(length) + "s", self.name, length, self.content)
def to_string(self, indent):
print indent + "S[" + self.name + "]: " + self.content
class number_content_element():
'''
A content element holding a number:
name - the 4 letter element name
content - the element content number
type - the number type code, must be one of: B (byte), H (short), I (integer), Q (long)
'''
def __init__(self, name, content, number_type):
self.name = name
self.content = content
self.type = number_type
self.number_types_by_type = { "B":1, "H":2, "I":4, "Q":8 }
if not self.number_types_by_type.has_key(self.type):
raise ValueError("Number type must be one of: B, H, I, Q not: " + self.type)
# check the number size
max_size = 2 ** (8 * self.number_types_by_type[self.type]) - 1
if (self.content < 0 or self.content > max_size):
raise ValueError("Number must be between 0 and %d, not %d" % (max_size, self.content))
def get_bytes(self):
number_type_length = self.number_types_by_type[self.type]
return struct.pack(">4sI" + self.type, self.name, number_type_length, self.content)
def to_string(self, indent):
print indent + self.type + "[" + self.name + "]: " + str(self.content)
class hex_content_element():
'''
A content element holding a hex string
name - the 4 letter element name
content - the element content as a hex string
'''
def __init__(self, name, content):
self.name = name
if len(content) % 2 == 0:
self.content = content.upper()
else:
# pad the hex string with a leading 0 if it has a non-even
# number of characters otherwise the conversion back to
# binary is more difficult
self.content = "0" + content.upper()
def get_bytes(self):
length = len(self.content) / 2
bytes = binascii.a2b_hex(self.content)
return struct.pack(">4sI" + str(length) + "s", self.name, length, bytes)
def to_string(self, indent):
print indent + "X[" + self.name + "]: " + self.content
class parent_element():
'''
An element holding a collection of other elements
name - the 4 letter element name
children - a list or tuple of child elements
'''
def __init__(self, name, children):
self.name = name
self.children = children
def get_bytes(self):
child_bytes = ''
for child in self.children:
child_bytes = child_bytes + child.get_bytes()
length = len(child_bytes)
return struct.pack(">4sI" + str(length) + "s", self.name, length, child_bytes)
def assert_self(self, name):
if self.name == name:
return self
else:
raise AssertionError("Expected element with name: " + name + " not: " + self.name)
def assert_child(self, child_name):
for child in self.children:
if child.name == child_name:
return child
raise AssertionError("Child with name: " + child_name + " does not exist for parent: " + self.name)
def to_string(self, indent):
print indent + "P[" + self.name + "]:"
for child in self.children:
child.to_string(indent + " ")
class parser_exception(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class parser():
def __init__(self):
self.nodes = ("arsv", "mupd", "msrv", "mdcl", "mccr", "cmst", "mlog", "agal", "mlcl", "mshl", "mlit", "abro", "abar", "apso", "caci", "avdb", "cmgt", "aply", "adbs", "cmpa")
self.strings = ("mcnm", "mcna", "minm", "cann", "cana", "canl", "asaa", "asal", "asar", "cmty", "cmnm")
self.number_types_by_length = { 1:"B", 2:"H", 4:"I", 8:"Q" }
def parse(self, data, assert_status = True, allow_null = False):
'''
Transform the supplied binary data into a structure of element objects. Returns
a parent_element.
assert_status - if true throws a parser_exception when the return code (mstt) != 200 (OK)
allow_null - if false throws a parser_exception when there are no data elements, otherwise return None
'''
server_response = self._parse(data)
if len(server_response) == 0:
if allow_null:
return None
else:
raise parser_exception("data did not contain any valid dacp elements")
if len(server_response) > 1:
raise parser_exception("data contained too many elements: " + len(server_response))
if assert_status:
if server_response[0].assert_child("mstt").content != 200:
raise parser_exception("dacp error: " + server_response[0].assert_child("mstt").content)
return server_response[0]
def _parse(self, data):
elements = []
remaining_data = data
while len(remaining_data) > 0:
data_length = len(remaining_data) - struct.calcsize("4sI")
element_name, element_length, temp_data = struct.unpack(">4sI" + str(data_length) + "s", remaining_data)
remaining_length = data_length - element_length
'''
print "DEBUG ------------------------------"
print "parsing data len: ", len(remaining_data)
print "data_length: ", data_length
print "element_name: ", element_name
print "element_length: ", element_length
print "temp_data len: ", len(temp_data)
print "remaining_length: ", remaining_length
'''
# if the element is a node type
if element_name in self.nodes:
element_data, remaining_data = struct.unpack(">" + str(element_length) + "s" + str(remaining_length) + "s", temp_data)
children = self._parse(element_data)
elements.append(parent_element(element_name, children))
continue
# if the element is a string type
if element_name in self.strings:
element_data, remaining_data = struct.unpack(">" + str(element_length) + "s" + str(remaining_length) + "s", temp_data)
elements.append(string_content_element(element_name, element_data))
continue
# if the element length matches one of the number types
if self.number_types_by_length.has_key(element_length):
number_type = self.number_types_by_length[element_length]
element_data, remaining_data = struct.unpack(">" + number_type + str(remaining_length) + "s", temp_data)
elements.append(number_content_element(element_name, element_data, number_type))
continue
# otherwise convert the data to hex
element_data, remaining_data = struct.unpack(">" + str(element_length) + "s" + str(remaining_length) + "s", temp_data)
hex = binascii.b2a_hex(element_data).upper()
elements.append(hex_content_element(element_name, hex))
return elements | StarcoderdataPython |
3392807 | # Generated by Django 3.2.2 on 2021-05-13 10:12
import ai.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ai', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='project',
name='data',
field=models.FileField(blank=True, null=True, upload_to=ai.models.user_directory_path),
),
]
| StarcoderdataPython |
3261001 | <reponame>vaalu/fxalpha2.0
import json
from kafka import KafkaConsumer
topic_name='3499'
consumer = KafkaConsumer( topic_name,
auto_offset_reset='latest',
bootstrap_servers=['localhost:9092'],
api_version=(0, 10),
consumer_timeout_ms=1000)
while True:
for msg in consumer:
print('Message from topic %s - %s'%(topic_name, msg.value))
if consumer is not None:
consumer.close()
print('Kafka subscription to topic - end.') | StarcoderdataPython |
3251140 | import os
import sys
from distutils.core import setup
if sys.version_info[:2] < (2, 7):
required = ['ordereddict']
else:
required = []
long_desc = open('enum/doc/enum.rst').read()
setup( name='enum34',
version='1.0.4',
url='https://pypi.python.org/pypi/enum34',
packages=['enum'],
package_data={
'enum' : [
'LICENSE',
'README',
'doc/enum.rst',
'doc/enum.pdf',
'test_enum.py',
]
},
license='BSD License',
description='Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4',
long_description=long_desc,
provides=['enum'],
install_requires=required,
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
)
| StarcoderdataPython |
1728063 | #!/usr/bin/env python3
# Programa simple para aprender a usar Qt
from __future__ import with_statement
import sys
import matplotlib
matplotlib.use('Qt4Agg')
from PyQt4 import QtGui, QtCore
from ellipse_plot import Ui_MplMainWindow
from polarization_routines import plot_ellipse, getAnglesFromEllipse, getAnglesFromJones
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from numpy import pi
import numpy as np
__version__ = '1.0'
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class MainWindow(QtGui.QMainWindow, Ui_MplMainWindow):
def __init__(self, parent = None ):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.setWindowTitle("Assistant for polarization transformations")
self.action_About.triggered.connect(self.about)
self.action_Close.triggered.connect(self.fileQuit)
self.action_Documentation.triggered.connect(self.get_to_doc)
# Generate plots in case the button is clicked
self.generateButton.clicked.connect(self.GeneratePlots)
self.jonesGroupBox.clicked.connect(self.jonesChecked)
self.ellipticityGroupBox.clicked.connect(self.ellipticityChecked)
self.jonesTranslatePushButton.clicked.connect(self.TranslateJones)
self.ellipticityTranslatePushButton.clicked.connect(self.TranslateEllipse)
def TranslateJones(self):
""" This function takes the values of jones vector representation,
translates them into the ellipse and lab representations and updates those fields. """
psi = eval(self.PsiLineEdit.text())
delta = eval(self.deltaLineEdit.text())
angles = getAnglesFromJones(psi, delta )
self.qwpLineEdit.setText(_translate("MplMainWindow", str(angles['alpha']), None))
self.polLineEdit.setText(_translate("MplMainWindow", str(angles['psi']), None))
self.directionLineEdit.setText(_translate("MplMainWindow", str(angles['phi']), None))
self.eangleLineEdit.setText(_translate("MplMainWindow", str(angles['theta']), None))
def TranslateEllipse(self):
""" This function takes the values of ellipse parameters representation,
translates them into the jones vector and lab representations and updates those fields. """
phi = eval(self.directionLineEdit.text())
theta = eval(self.eangleLineEdit.text())
angles = getAnglesFromEllipse(phi, theta)
self.qwpLineEdit.setText(_translate("MplMainWindow", str(angles['alpha']), None))
self.polLineEdit.setText(_translate("MplMainWindow", str(angles['psi']), None))
self.PsiLineEdit.setText(_translate("MplMainWindow", str(angles['psi']), None))
self.deltaLineEdit.setText(_translate("MplMainWindow", str(angles['delta']), None))
def jonesChecked(self):
self.ellipticityGroupBox.setChecked(False)
def ellipticityChecked(self):
self.jonesGroupBox.setChecked(False)
def GeneratePlots(self):
""" Reads values of angles and sends them to the mplWidget """
polAngle = eval(self.polLineEdit.text())
qwpAngle = eval(self.qwpLineEdit.text())
self.mpl_1.plot_ellipses(polAngle, qwpAngle)
def fileQuit(self):
self.close()
def keyPressEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Return:
self.GeneratePlots()
def about(self):
QtGui.QMessageBox.about(self, "About",
"""
<br>Polarization State Plotter
<br>Platform for plotting and transforming polarization ellipses
<br>Licensed under the terms of the MIT License
</li></ul>
<p>This program is intended for plotting the polarization ellipse of
a state of polarization given by an arbitrary Jones vector. It is also
a tool for easily identifying the correct translation between different
representations of polarization states.
""" )
def get_to_doc(self):
from PyQt4.QtGui import QDesktopServices
from PyQt4.QtCore import QUrl
url = QUrl()
url.setUrl('https://github.com/RomaniukVadim/ellipsometric-calculator')
QDesktopServices.openUrl(url)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
frame = MainWindow()
frame.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1720555 | from carla_utils import carla
cc = carla.ColorConverter
import re
import numpy as np
import collections
import pygame
def find_weather_presets():
rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
def get_actor_display_name(actor, truncate=250):
name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])
return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name
def make_surface(carla_image):
carla_image.convert(cc.Raw)
array = np.frombuffer(carla_image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (carla_image.height, carla_image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
return pygame.surfarray.make_surface(array.swapaxes(0, 1))
def parse_collision_history(history):
history_dict = collections.defaultdict(int)
if history:
for frame, data, intensity in history:
history_dict[frame] += intensity
return history_dict
class Util(object):
@staticmethod
def blits(destination_surface, source_surfaces, rect=None, blend_mode=0):
"""Function that renders the all the source surfaces in a destination source"""
for surface in source_surfaces:
destination_surface.blit(surface[0], surface[1], rect, blend_mode)
@staticmethod
def length(v):
"""Returns the length of a vector"""
return np.sqrt(v.x**2 + v.y**2 + v.z**2)
@staticmethod
def get_bounding_box(actor):
"""Gets the bounding box corners of an actor in world space"""
bb = actor.trigger_volume.extent
corners = [carla.Location(x=-bb.x, y=-bb.y),
carla.Location(x=bb.x, y=-bb.y),
carla.Location(x=bb.x, y=bb.y),
carla.Location(x=-bb.x, y=bb.y),
carla.Location(x=-bb.x, y=-bb.y)]
corners = [x + actor.trigger_volume.location for x in corners]
t = actor.get_transform()
t.transform(corners)
return corners
COLOR_WHITE = pygame.Color(255, 255, 255)
COLOR_BLACK = pygame.Color(0, 0, 0)
class FadingText(object):
"""Renders texts that fades out after some seconds that the user specifies"""
def __init__(self, font, dim, pos):
"""Initializes variables such as text font, dimensions and position"""
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=COLOR_WHITE, seconds=2.0):
"""Sets the text, color and seconds until fade out"""
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill(COLOR_BLACK)
self.surface.blit(text_texture, (10, 11))
def tick(self, clock):
"""Each frame, it shows the displayed text for some specified seconds, if any"""
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
""" Renders the text in its surface and its position"""
display.blit(self.surface, self.pos)
class HelpText(object):
def __init__(self, font, width, height):
"""Renders the help text that shows the controls for using no rendering mode"""
lines = __doc__.split('\n')
self.font = font
self.dim = (680, len(lines) * 22 + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill(COLOR_BLACK)
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, COLOR_WHITE)
self.surface.blit(text_texture, (22, n * 22))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
"""Toggles display of help text"""
self._render = not self._render
def render(self, display):
"""Renders the help text, if enabled"""
if self._render:
display.blit(self.surface, self.pos)
| StarcoderdataPython |
3308557 | from typing import List, Tuple
from abides_core import Message
from abides_markets.order_book import OrderBook
from abides_markets.orders import LimitOrder, Side
SYMBOL = "X"
TIME = 0
class FakeExchangeAgent:
def __init__(self):
self.messages = []
self.current_time = TIME
self.mkt_open = TIME
self.book_logging = None
self.stream_history = 10
def reset(self):
self.messages = []
def send_message(self, recipient_id: int, message: Message, _: int = 0):
self.messages.append((recipient_id, message))
def logEvent(self, *args, **kwargs):
pass
def setup_book_with_orders(
bids: List[Tuple[int, List[int]]] = [], asks: List[Tuple[int, List[int]]] = []
) -> Tuple[OrderBook, FakeExchangeAgent, List[LimitOrder]]:
agent = FakeExchangeAgent()
book = OrderBook(agent, SYMBOL)
orders = []
for price, quantities in bids:
for quantity in quantities:
order = LimitOrder(1, TIME, SYMBOL, quantity, Side.BID, price)
book.handle_limit_order(order)
orders.append(order)
for price, quantities in asks:
for quantity in quantities:
order = LimitOrder(1, TIME, SYMBOL, quantity, Side.ASK, price)
book.handle_limit_order(order)
orders.append(order)
agent.reset()
return book, agent, orders
| StarcoderdataPython |
1748832 | r"""
ECEI2D
=======
contains 2D version of synthetic Electron Cyclotron
Emission Imaging Diagnostic.
Unit Conventions
-----------------
In ECEI2D, Gaussian unit is used by default. The units for common quantities
are:
length:
centi-meter
time:
second
mass:
gram
magnetic field:
Gauss
temperature:
erg (we use energy unit for particle temperature)
Usage
------
Preparation
************
A complete ECEI2D run requires knowledge of the plasma, and the receivers.
The former should be provided as an instance of
:py:class:`ECEI_Profile<FPSDP.Plasma.PlasmaProfile.ECEI_Profile>`, and the
latter a list of
:py:class:`Detector2D<FPSDP.Diagnostics.ECEI.ECEI2D.Detector2D.Detector2D>`.
We will assume these two objects have been created and named `plasma2d` and
`detectors`.
First, we import the ECEImagingSystem class::
>>> from sdp.diagnostic.ecei.ecei2d import ECEImagingSystem
Then, we initialize the ECEI with plasma2d and detectors::
>>> ecei = ECEImagingSystem(plasma2d, detectors)
Note that some additional parameters can be provided while initialization,
check the doc-string in :py:class:`ECEImagingSystem
<FPSDP.Diagnostics.ECEI.ECEI2D.Imaging.ECEImagingSystem>` for a detailed list
of these parameters.
The next step is to setup the calculation area. ECEI uses 3D Cartesian
coordinates, and assumes rectangular cells. So, three 1D arrays specifying
grids along Z(local toroidal), Y(vertical), and X(Radial) directions is needed.
The detector is always assumed being on the low-field side, and in vacuum. The
calculation area needs to include part of the vacuum region, and large enough
to include all the resonant region. X1D mesh also determines the calculation
start and end points, so its normally from larger X (vacuum region outside of
plasma) to smaller X (inner plasma).
Let's say we choose a uniform XYZ grid, we can create it using
:py:module:`numpy<numpy>` as::
>>> X1D = numpy.linspace(251, 216, 160)
>>> Y1D = numpy.linspace(-30, 30, 65)
>>> Z1D = numpy.linspace(-30, 30, 65)
and set ECEI calculation area::
>>> ecei.set_coords([Z1D, Y1D, X1D])
It is possible that different detectors need different initial mesh. This is
particularly important if these channels have very different resonance
locations. In this case, we can specify mesh for chosen channels only.
For example, we can set channel 0, and channel 3 only::
>>> ecei.set_coords([Z1D, Y1D, X1D], channelID=[0, 3])
Note that channelID is numbered from 0.
It is recommended to run the automatic mesh adjustment before diagnosing::
>>> ecei.auto_adjust_mesh(fine_coeff = 1)
This function run diagnose on the preset mesh and optimize its X grids by
making mesh fine within resonance region, and coarse elsewhere. The fine_coeff
is a parameter controlling the mesh size. The larger this parameter, the finer
resulted mesh overall.
Diagnose
*********
We can now run ECEI and observe the result::
>>> ecei.diagnose(time=[0, 1, 2])
Running diagnose() without a `time` argument will diagnose the equilibrium
plasma. And a given `time` list will result in a series of diagnosis on
perturbed plasma at corresponding time snaps.
The measured electron temperature is stored in `Te` attribute.
>>> ecei.Te
array([[ 1.47142490e-08, 1.46694915e-08, 1.46748651e-08],
[ 1.56084333e-08, 1.51977835e-08, 1.48657565e-08],
[ 1.69261271e-08, 1.65879854e-08, 1.61561885e-08],
[ 1.58508369e-08, 1.63720864e-08, 1.68176195e-08],
[ 1.46057450e-08, 1.47844442e-08, 1.50868828e-08],
[ 1.45398116e-08, 1.45283573e-08, 1.45292955e-08],
[ 1.49914189e-08, 1.48120112e-08, 1.47148505e-08],
[ 1.65238937e-08, 1.60221945e-08, 1.55572079e-08]])
>>> ecei.Te.shape
(8L, 3L)
The first dimension of Te corresponds to the detectors, and the second
dimension for time.
It is OK to run diagnose multiple times with different parameters, but the
result will be overwritten.
Post analysis
**************
ECEImagingSystem provides additional information about the diagnosing process.
The most useful one is `view_spots`. This attribute stores a list of detailed
emission spot information for each channel in the most recent time snap.
>>> vs = ecei.view_spots
>>> len(vs)
8
>>> vs[0].shape
(65L, 63L)
The shape of each view_spot is [NY, NX], it contains the instrumental function
on the 2D plane, with the largest point normalized to 1. This means the
measured Te is just a weighted average of the Te on the 2D plane under this
weighting function.
More information can be obtained from `channels` attribute, which is literally
the ECE2D objects that carry out the diagnostic.
The propogation and absorption of the probing waves can be found in
`propagator` attribute in each channel.
Modules
--------
CurrentCorrelationTensor:
Contains classes for calculating current correlation tensor. Mainly
includes non-relativistic and relativistic versions.
Detector2D:
Contains Detector class for ECEI2D. Now it has GaussianAntenna type
detector.
Reciprocity:
Main module carrying out 2D ECE calculation. ECE2D class is the main
class.
Imaging:
Contains multi-channel ECE Imaging class. ECEImagingSystem is the main
class.
"""
from .imaging import ECEImagingSystem
from .ece import ECE2D
from .detector2d import GaussianAntenna
| StarcoderdataPython |
1678425 | import argparse
import sys
from pathlib import Path
import day03
def main(*argv):
parser = argparse.ArgumentParser("Advent of Code - Day 3")
parser.add_argument("filename", type=str, help="The input filename")
args = parser.parse_args(argv)
with open(Path(args.filename), 'rt') as file:
lines = file.readlines()
lines = [l for l in lines if l != ""]
values = day03.input_to_array(lines)
most_common = day03.most_common_value(values, keep=1)
most_common_binary = day03.list_to_binary(most_common)
print(f"The gamma rate is {most_common_binary} ({int(most_common_binary, 2)}) ")
inverted = day03.invert(most_common)
inverted_binary = day03.list_to_binary(inverted)
print(f"The epsilon rate is {inverted_binary} ({int(inverted_binary, 2)}) ")
print(f"The power consumption is {int(most_common_binary, 2) * int(inverted_binary, 2)}")
oxygen_rating = day03.generator_rating(values, inverted=False)
print(oxygen_rating)
scrubber_rating = day03.generator_rating(values, inverted=True)
print(scrubber_rating)
print(f"The life support rating is "
f"{int(day03.list_to_binary(oxygen_rating), 2) * int(day03.list_to_binary(scrubber_rating), 2)}")
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
3349789 | import mango
from decimal import Decimal
from mango.marketmaking.orderreconciler import NullOrderReconciler
def test_nulloperation():
existing = [
mango.Order.from_basic_info(mango.Side.BUY, price=Decimal(1), quantity=Decimal(10)),
mango.Order.from_basic_info(mango.Side.SELL, price=Decimal(2), quantity=Decimal(20))
]
desired = [
mango.Order.from_basic_info(mango.Side.BUY, price=Decimal(3), quantity=Decimal(30)),
mango.Order.from_basic_info(mango.Side.SELL, price=Decimal(4), quantity=Decimal(40))
]
actual = NullOrderReconciler()
result = actual.reconcile(None, existing, desired)
assert result.to_keep == existing
assert result.to_ignore == desired
| StarcoderdataPython |
3309955 | import logging
import numpy as np
from openpnm.algorithms import ReactiveTransport
from openpnm.utils import Docorator, SettingsAttr
from openpnm.integrators import ScipyRK45
from openpnm.algorithms._solution import SolutionContainer
docstr = Docorator()
logger = logging.getLogger(__name__)
__all__ = ['TransientReactiveTransport']
@docstr.get_sections(base='TransientReactiveTransportSettings',
sections=['Parameters', 'Other Parameters'])
@docstr.dedent
class TransientReactiveTransportSettings:
r"""
Parameters
----------
%(ReactiveTransportSettings.parameters)s
"""
pore_volume = 'pore.volume'
class TransientReactiveTransport(ReactiveTransport):
r"""
A subclass of ReactiveTransport for transient simulations.
Parameters
----------
network : GenericNetwork
The Network with which this algorithm is associated.
Notes
-----
Either a Network or a Project must be specified.
"""
def __init__(self, phase, settings=None, **kwargs):
self.settings = SettingsAttr(TransientReactiveTransportSettings, settings)
super().__init__(phase=phase, settings=self.settings, **kwargs)
self.settings['phase'] = phase.name
self["pore.ic"] = np.nan
def run(self, x0, tspan, saveat=None, integrator=None):
"""
Runs the transient algorithm and returns the solution.
Parameters
----------
x0 : ndarray or float
Array (or scalar) containing initial condition values.
tspan : array_like
Tuple (or array) containing the integration time span.
saveat : array_like or float, optional
If an array is passed, it signifies the time points at which
the solution is to be stored, and if a scalar is passed, it
refers to the interval at which the solution is to be stored.
integrator : Integrator, optional
Integrator object which will be used to to the time stepping.
Can be instantiated using openpnm.integrators module.
Returns
-------
TransientSolution
The solution object, which is basically a numpy array with
the added functionality that it can be called to return the
solution at intermediate times (i.e., those not stored in the
solution object).
"""
logger.info('Running TransientTransport')
if np.isscalar(saveat):
saveat = np.arange(*tspan, saveat)
# FIXME: why do we forcibly add tspan[1] to saveat even if the user
# didn't want to?
if (saveat is not None) and (tspan[1] not in saveat):
saveat = np.hstack((saveat, [tspan[1]]))
integrator = ScipyRK45() if integrator is None else integrator
# Perform pre-solve validations
self._validate_settings()
self._validate_data_health()
# Write x0 to algorithm the obj (needed by _update_iterative_props)
self['pore.ic'] = x0 = np.ones(self.Np, dtype=float) * x0
self._merge_inital_and_boundary_values()
# Build RHS (dx/dt = RHS), then integrate the system of ODEs
rhs = self._build_rhs()
# Integrate RHS using the given solver
soln = integrator.solve(rhs, x0, tspan, saveat)
# Return solution as dictionary
self.soln = SolutionContainer()
self.soln[self.settings['quantity']] = soln
return self.soln
def _run_special(self, x0): ...
def _build_rhs(self):
"""
Returns a function handle, which calculates dy/dt = rhs(y, t).
Notes
-----
``y`` is the variable that the algorithms solves for, e.g., for
``TransientFickianDiffusion``, it would be concentration.
"""
def ode_func(t, y):
# TODO: add a cache mechanism
self.x = y
self._update_A_and_b()
A = self.A.tocsc()
b = self.b
V = self.network[self.settings["pore_volume"]]
return (-A.dot(y) + b) / V # much faster than A*y
return ode_func
def _merge_inital_and_boundary_values(self):
x0 = self['pore.ic']
bc_pores = ~np.isnan(self['pore.bc_value'])
x0[bc_pores] = self['pore.bc_value'][bc_pores]
quantity = self.settings['quantity']
self[quantity] = x0
| StarcoderdataPython |
3337981 | <filename>json_field.py
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
try:
import json # json module added in Python 2.6.
except ImportError:
from django.utils import simplejson as json
# https://bitbucket.org/offline/django-annoying
class JSONField(models.TextField):
"""
JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly.
Django snippet #1478
example:
class Page(models.Model):
data = JSONField(blank=True, null=True)
page = Page.objects.get(pk=5)
page.data = {'title': 'test', 'type': 3}
page.save()
"""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
if value == '':
return None
try:
if isinstance(value, basestring):
return json.loads(value)
except ValueError:
pass
return value
def get_db_prep_save(self, value, *args, **kwargs):
if value == '':
return None
value = json.dumps(value, cls=DjangoJSONEncoder, separators=(',', ':'))
return super(JSONField, self).get_db_prep_save(value, *args, **kwargs)
# South support.
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ['^json_field\.JSONField'])
| StarcoderdataPython |
4813003 | <reponame>lilsweetcaligula/MIT6.00.1x
import operator
def ParsePolishExpression(exp):
if type(exp) != str:
raise TypeError("expression must be of type string")
try:
supportedOperators = { '/': operator.div,\
'*': operator.mul,\
'+': operator.add,\
'-': operator.sub,\
'%': operator.mod }
operators = []
operands = []
expLen = len(exp)
result = 0.0
i = 0
c = exp[i]
while (i < expLen):
c = exp[i]
if c in supportedOperators:
op = supportedOperators[c]
operators.append(op)
break
i += 1
i += 1
while len(operators) > 0:
c = exp[i]
if c in supportedOperators:
op = supportedOperators[c]
operators.append(op)
elif c.isdigit() or c == '.':
start = i
while i < expLen:
c = exp[i]
if not c.isdigit() and c not in supportedOperators:
break
i += 1
end = i
i -= 1
lit = exp[start:end]
val = float(lit)
operands.append(val)
c = exp[i]
if c == ')' or i == expLen - 1:
op = operators.pop()
value = reduce(op, operands)
operands = []
operands.append(value)
i += 1
return operands.pop()
except ValueError as e:
raise e
| StarcoderdataPython |
3325860 | <reponame>brettkelly/Bible-kjv<filename>sqlGen.py
#!/usr/bin/python3
import json
import os
import os.path
from string import punctuation
BOOKFILE = './Books.json'
SRCDIR = './kjv-data'
bookData = json.load(open(BOOKFILE))
def getBookByLongName(bookName):
for b in books:
if b.longName == bookName:
return b
return None
class Book(object):
def __init__(self, sequence, name):
self.longName = name
self.shortName = name.strip(punctuation).replace(" ", "")[:4].upper()
self.sequence = sequence
self.verses = []
def __str__(self):
return f"{self.sequence} — {self.longName} ({self.shortName})"
def sqlInsert(self):
return f"INSERT into books(book_id, book_key_name, book_short_name, book_full_name) VALUES ({self.sequence},'{self.shortName}','{self.longName}','{self.longName}');"
def addVerse(self, verseObj):
self.verses.append(verseObj)
class Verse(object):
def __init__(self, book, chapter, verseNumber, verseText, translation):
self.book = book # this is the Book objecfg
self.chapter = chapter
self.verseNumber = verseNumber
self.verseText = verseText
self.translation = translation
def __str__(self):
return f"{self.verseText} — {self.book.longName} {self.chapter}:{self.verseNumber}"
def sqlInsert(self):
return f"""INSERT into verses(book_id, translation_short_name, verse_chapter, verse_number, verse_text) VALUES ({self.book.sequence},'{self.translation}',{self.chapter},{self.verseNumber.strip()},\"{self.verseText.strip()}\");"""
books = []
bookNameIdMap = {}
for i in range(len(bookData)):
b = Book(i+1, bookData[i])
books.append(b)
bookNameIdMap[b.longName] = b.sequence
bookFiles = os.listdir(SRCDIR)
for f in bookFiles:
with open(os.path.join(SRCDIR, f)) as fd:
try:
versesFile = json.load(fd)
except:
print(f"Error loading {f}; skipping...")
continue
bookObj = getBookByLongName(versesFile['book'])
chapters = versesFile['chapters']
for chapter in chapters:
chapterNum = chapter['chapter']
for v in chapter['verses']:
verseObj = Verse(bookObj, chapterNum,
v['verse'], v['text'], 'KJV')
bookObj.addVerse(verseObj)
for b in books:
print(b.sqlInsert())
for b in books:
for v in b.verses:
print(v.sqlInsert())
| StarcoderdataPython |
3387846 | <reponame>leuder/interest_rate
import unittest
from pynterest_rate import Compound
class TestCompoundClass(unittest.TestCase):
def setUp(self):
self.compound = Compound(0.05, 1, 22.517)
def test_futurevaluecalculation(self):
self.assertEqual(
round(self.compound.calulate_future_value()), 3, 'calculated future value not as expected'
)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1736280 | # -*- coding: utf-8 -*-
"""
..
.. seealso:: `SPARQL Specification <http://www.w3.org/TR/rdf-sparql-query/>`_
Developers involved:
* <NAME> <http://www.ivan-herman.net>
* <NAME> <http://www.wikier.org>
* <NAME> <http://www.dayures.net>
* <NAME> <https://indeyets.ru/>
Organizations involved:
* `World Wide Web Consortium <http://www.w3.org>`_
* `Salzburg Research <http://www.salzburgresearch.at>`_
* `Foundation CTIC <http://www.fundacionctic.org/>`_
:license: `W3C® Software notice and license <http://www.w3.org/Consortium/Legal/copyright-software>`_
:requires: `RDFLib <https://rdflib.readthedocs.io>`_ package.
"""
import urllib
import urllib2
from urllib2 import urlopen as urlopener # don't change the name: tests override it
import base64
import re
import sys
import warnings
import json
from KeyCaseInsensitiveDict import KeyCaseInsensitiveDict
from SPARQLExceptions import QueryBadFormed, EndPointNotFound, EndPointInternalError, Unauthorized, URITooLong
from SPARQLWrapper import __agent__
# alias
XML = "xml"
"""to be used to set the return format to ``XML`` (``SPARQL Query Results XML`` format or ``RDF/XML``, depending on the query type). **This is the default**."""
JSON = "json"
"""to be used to set the return format to ``JSON``."""
JSONLD = "json-ld"
"""to be used to set the return format to ``JSON-LD``."""
TURTLE = "turtle"
"""to be used to set the return format to ``Turtle``."""
N3 = "n3"
"""to be used to set the return format to ``N3`` (for most of the SPARQL services this is equivalent to Turtle)."""
RDF = "rdf"
"""to be used to set the return ``RDF Graph``."""
RDFXML = "rdf+xml"
"""to be used to set the return format to ``RDF/XML`` explicitly."""
CSV = "csv"
"""to be used to set the return format to ``CSV``"""
TSV = "tsv"
"""to be used to set the return format to ``TSV``"""
_allowedFormats = [JSON, XML, TURTLE, N3, RDF, RDFXML, CSV, TSV]
# Possible HTTP methods
GET = "GET"
"""to be used to set HTTP method ``GET``. **This is the default**."""
POST = "POST"
"""to be used to set HTTP method ``POST``."""
_allowedRequests = [POST, GET]
# Possible HTTP Authentication methods
BASIC = "BASIC"
"""to be used to set ``BASIC`` HTTP Authentication method."""
DIGEST = "DIGEST"
"""to be used to set ``DIGEST`` HTTP Authentication method."""
_allowedAuth = [BASIC, DIGEST]
# Possible SPARQL/SPARUL query type (aka SPARQL Query forms)
SELECT = "SELECT"
"""to be used to set the query type to ``SELECT``. This is, usually, determined automatically."""
CONSTRUCT = "CONSTRUCT"
"""to be used to set the query type to ``CONSTRUCT``. This is, usually, determined automatically."""
ASK = "ASK"
"""to be used to set the query type to ``ASK``. This is, usually, determined automatically."""
DESCRIBE = "DESCRIBE"
"""to be used to set the query type to ``DESCRIBE``. This is, usually, determined automatically."""
INSERT = "INSERT"
"""to be used to set the query type to ``INSERT``. This is, usually, determined automatically."""
DELETE = "DELETE"
"""to be used to set the query type to ``DELETE``. This is, usually, determined automatically."""
CREATE = "CREATE"
"""to be used to set the query type to ``CREATE``. This is, usually, determined automatically."""
CLEAR = "CLEAR"
"""to be used to set the query type to ``CLEAR``. This is, usually, determined automatically."""
DROP = "DROP"
"""to be used to set the query type to ``DROP``. This is, usually, determined automatically."""
LOAD = "LOAD"
"""to be used to set the query type to ``LOAD``. This is, usually, determined automatically."""
COPY = "COPY"
"""to be used to set the query type to ``COPY``. This is, usually, determined automatically."""
MOVE = "MOVE"
"""to be used to set the query type to ``MOVE``. This is, usually, determined automatically."""
ADD = "ADD"
"""to be used to set the query type to ``ADD``. This is, usually, determined automatically."""
_allowedQueryTypes = [SELECT, CONSTRUCT, ASK, DESCRIBE, INSERT, DELETE, CREATE, CLEAR, DROP,
LOAD, COPY, MOVE, ADD]
# Possible methods to perform requests
URLENCODED = "urlencoded"
"""to be used to set **URL encode** as the encoding method for the request. This is, usually, determined automatically."""
POSTDIRECTLY = "postdirectly"
"""to be used to set **POST directly** as the encoding method for the request. This is, usually, determined automatically."""
_REQUEST_METHODS = [URLENCODED, POSTDIRECTLY]
# Possible output format (mime types) that can be converted by the local script. Unfortunately,
# it does not work by simply setting the return format, because there is still a certain level of confusion
# among implementations.
# For example, Joseki returns application/javascript and not the sparql-results+json thing that is required...
# Ie, alternatives should be given...
# <NAME> told me (June 2007) that the right return format is now added to his CVS, ie, future releases of
# joseki will be o.k., too. The situation with turtle and n3 is even more confusing because the text/n3 and text/turtle
# mime types have just been proposed and not yet widely used...
_SPARQL_DEFAULT = ["application/sparql-results+xml", "application/rdf+xml", "*/*"]
_SPARQL_XML = ["application/sparql-results+xml"]
_SPARQL_JSON = ["application/sparql-results+json", "application/json", "text/javascript", "application/javascript"] # VIVO server returns "application/javascript"
_RDF_XML = ["application/rdf+xml"]
_RDF_TURTLE = ["application/turtle", "text/turtle"]
_RDF_N3 = _RDF_TURTLE + ["text/rdf+n3", "application/n-triples", "application/n3", "text/n3"]
_RDF_JSONLD = ["application/ld+json", "application/x-json+ld"]
_CSV = ["text/csv"]
_TSV = ["text/tab-separated-values"]
_XML = ["application/xml"]
_ALL = ["*/*"]
_RDF_POSSIBLE = _RDF_XML + _RDF_N3 + _XML
_SPARQL_PARAMS = ["query"]
try:
import rdflib_jsonld
_allowedFormats.append(JSONLD)
_RDF_POSSIBLE = _RDF_POSSIBLE + _RDF_JSONLD
except ImportError:
# warnings.warn("JSON-LD disabled because no suitable support has been found", RuntimeWarning)
pass
# This is very ugly. The fact is that the key for the choice of the output format is not defined.
# Virtuoso uses 'format', joseki uses 'output', rasqual seems to use "results", etc. Lee Feigenbaum
# told me that virtuoso also understand 'output' these days, so I removed 'format'. I do not have
# info about the others yet, ie, for the time being I keep the general mechanism. Hopefully, in a
# future release, I can get rid of that. However, these processors are (hopefully) oblivious to the
# parameters they do not understand. So: just repeat all possibilities in the final URI. UGLY!!!!!!!
_returnFormatSetting = ["format", "output", "results"]
#######################################################################################################
class SPARQLWrapper(object):
"""
Wrapper around an online access to a SPARQL Web entry point.
The same class instance can be reused for subsequent queries. The values of the base Graph URI, return formats, etc,
are retained from one query to the next (in other words, only the query string changes). The instance can also be
reset to its initial values using the :meth:`resetQuery` method.
:ivar endpoint: SPARQL endpoint's URI.
:vartype endpoint: string
:ivar updateEndpoint: SPARQL endpoint's URI for SPARQL Update operations (if it's a different one). The **default** value is ``None``.
:vartype updateEndpoint: string
:ivar agent: The User-Agent for the HTTP request header. The **default** value is an autogenerated string using the SPARQLWrapper version code.
:vartype agent: string
:ivar _defaultGraph: URI for the default graph. The value can be set either via an explicit call :func:`addParameter("default-graph-uri", uri)<addParameter>` or as part of the query string. The **default** value is ``None``.
:vartype _defaultGraph: string
:ivar user: The username of the credentials for querying the current endpoint. The value can be set an explicit call :func:`setCredentials`. The **default** value is ``None``.
:vartype user: string
:ivar passwd: The password of the credentials for querying the current endpoint. The value can be set an explicit call :func:`setCredentials`. The **default** value is ``None``.
:vartype passwd: string
:ivar http_auth: HTTP Authentication type. The **default** value is :data:`BASIC`. Possible values are :data:`BASIC` or :data:`DIGEST`. It is used only in case the credentials are set.
:vartype http_auth: string
:ivar onlyConneg: Option for allowing (or not) **only** HTTP Content Negotiation (so dismiss the use of HTTP parameters). The default value is ``False``.
:vartype onlyConneg: boolean
:ivar customHttpHeaders: Custom HTTP Headers to be included in the request. It is a dictionary where keys are the header field and values are the header values. **Important**: These headers override previous values (including ``Content-Type``, ``User-Agent``, ``Accept`` and ``Authorization`` if they are present).
:vartype customHttpHeaders: dict
:ivar timeout: The timeout (in seconds) to use for querying the endpoint.
:vartype timeout: int
:ivar queryString: The SPARQL query text.
:vartype queryString: string
:ivar queryType: The type of SPARQL query (aka SPARQL query form), like :data:`CONSTRUCT`, :data:`SELECT`, :data:`ASK`, :data:`DESCRIBE`, :data:`INSERT`, :data:`DELETE`, :data:`CREATE`, :data:`CLEAR`, :data:`DROP`, :data:`LOAD`, :data:`COPY`, :data:`MOVE` or :data:`ADD` (constants in this module).
:vartype queryType: string
:ivar returnFormat: The return format.\
No local check is done, so the parameter is simply sent to the endpoint. Eg, if the value is set to :data:`JSON` and a construct query is issued, it is up to the endpoint to react or not, this wrapper does not check.\
The possible values are :data:`JSON`, :data:`XML`, :data:`TURTLE`, :data:`N3`, :data:`RDF`, :data:`RDFXML`, :data:`CSV`, :data:`TSV`, :data:`JSONLD` (constants in this module).\
The **default** value is :data:`XML`.
:vartype returnFormat: string
:ivar requestMethod: The request method for query or update operations. The possibles values are URL-encoded (:data:`URLENCODED`) or POST directly (:data:`POSTDIRECTLY`).
:vartype requestMethod: string
:ivar method: The invocation method (HTTP verb). The **default** value is :data:`GET`, but it can be set to :data:`POST`.
:vartype method: string
:ivar parameters: The parameters of the request (key/value pairs in a dictionary).
:vartype parameters: dict
:ivar _defaultReturnFormat: The default return format. It is used in case the same class instance is reused for subsequent queries.
:vartype _defaultReturnFormat: string
:cvar prefix_pattern: regular expression used to remove base/prefixes in the process of determining the query type.
:vartype prefix_pattern: :class:`re.RegexObject`, a compiled regular expression. See the :mod:`re` module of Python
:cvar pattern: regular expression used to determine whether a query (without base/prefixes) is of type :data:`CONSTRUCT`, :data:`SELECT`, :data:`ASK`, :data:`DESCRIBE`, :data:`INSERT`, :data:`DELETE`, :data:`CREATE`, :data:`CLEAR`, :data:`DROP`, :data:`LOAD`, :data:`COPY`, :data:`MOVE` or :data:`ADD`.
:vartype pattern: :class:`re.RegexObject`, a compiled regular expression. See the :mod:`re` module of Python
:cvar comments_pattern: regular expression used to remove comments from a query.
:vartype comments_pattern: :class:`re.RegexObject`, a compiled regular expression. See the :mod:`re` module of Python
"""
prefix_pattern = re.compile(r"((?P<base>(\s*BASE\s*<.*?>)\s*)|(?P<prefixes>(\s*PREFIX\s+.+:\s*<.*?>)\s*))*")
# Maybe the future name could be queryType_pattern
pattern = re.compile(r"(?P<queryType>(CONSTRUCT|SELECT|ASK|DESCRIBE|INSERT|DELETE|CREATE|CLEAR|DROP|LOAD|COPY|MOVE|ADD))", re.VERBOSE | re.IGNORECASE)
comments_pattern = re.compile(r"(^|\n)\s*#.*?\n")
def __init__(self, endpoint, updateEndpoint=None, returnFormat=XML, defaultGraph=None, agent=__agent__):
"""
Class encapsulating a full SPARQL call.
:param endpoint: SPARQL endpoint's URI.
:type endpoint: string
:param updateEndpoint: SPARQL endpoint's URI for update operations (if it's a different one). The **default** value is ``None``.
:type updateEndpoint: string
:param returnFormat: The return format.\
No local check is done, so the parameter is simply sent to the endpoint. Eg, if the value is set to :data:`JSON` and a construct query is issued, it is up to the endpoint to react or not, this wrapper does not check.\
The possible values are :data:`JSON`, :data:`XML`, :data:`TURTLE`, :data:`N3`, :data:`RDF`, :data:`RDFXML`, :data:`CSV`, :data:`TSV`, :data:`JSONLD` (constants in this module).\
The **default** value is :data:`XML`.
:param defaultGraph: URI for the default graph. The value can be set either via an explicit call :func:`addParameter("default-graph-uri", uri)<addParameter>` or as part of the query string. The **default** value is ``None``.
:type defaultGraph: string
:param agent: The User-Agent for the HTTP request header. The **default** value is an autogenerated string using the SPARQLWrapper version number.
:type agent: string
"""
self.endpoint = endpoint
self.updateEndpoint = updateEndpoint if updateEndpoint else endpoint
self.agent = agent
self.user = None
self.passwd = None
self.http_auth = BASIC
self._defaultGraph = defaultGraph
self.onlyConneg = False # Only Content Negotiation
self.customHttpHeaders = {}
if returnFormat in _allowedFormats:
self._defaultReturnFormat = returnFormat
else:
self._defaultReturnFormat = XML
self.resetQuery()
def resetQuery(self):
"""Reset the query, ie, return format, method, query, default or named graph settings, etc,
are reset to their default values. This includes the default values for parameters, method, timeout or requestMethod.
"""
self.parameters = {}
if self._defaultGraph:
self.addParameter("default-graph-uri", self._defaultGraph)
self.returnFormat = self._defaultReturnFormat
self.method = GET
self.setQuery("""SELECT * WHERE{ ?s ?p ?o }""")
self.timeout = None
self.requestMethod = URLENCODED
def setReturnFormat(self, format):
"""Set the return format. If the one set is not an allowed value, the setting is ignored.
:param format: Possible values are :data:`JSON`, :data:`XML`, :data:`TURTLE`, :data:`N3`, :data:`RDF`, :data:`RDFXML`, :data:`CSV`, :data:`TSV`, :data:`JSONLD` (constants in this module). All other cases are ignored.
:type format: string
:raises ValueError: If :data:`JSONLD` is tried to set and the current instance does not support ``JSON-LD``.
"""
if format in _allowedFormats:
self.returnFormat = format
elif format == JSONLD:
raise ValueError("Current instance does not support JSON-LD; you might want to install the rdflib-jsonld package.")
else:
warnings.warn("Ignore format '%s'; current instance supports: %s." %(format, ", ".join(_allowedFormats)), SyntaxWarning)
def supportsReturnFormat(self, format):
"""Check if a return format is supported.
:param format: Possible values are :data:`JSON`, :data:`XML`, :data:`TURTLE`, :data:`N3`, :data:`RDF`, :data:`RDFXML`, :data:`CSV`, :data:`TSV`, :data:`JSONLD` (constants in this module). All other cases are ignored.
:type format: string
:return: Returns ``True`` if the return format is supported, otherwise ``False``.
:rtype: bool
"""
return (format in _allowedFormats)
def setTimeout(self, timeout):
"""Set the timeout (in seconds) to use for querying the endpoint.
:param timeout: Timeout in seconds.
:type timeout: int
"""
self.timeout = int(timeout)
def setOnlyConneg(self, onlyConneg):
"""Set this option for allowing (or not) only HTTP Content Negotiation (so dismiss the use of HTTP parameters).
.. versionadded:: 1.8.1
:param onlyConneg: ``True`` if **only** HTTP Content Negotiation is allowed; ``False`` if HTTP parameters are used.
:type onlyConneg: bool
"""
self.onlyConneg = onlyConneg
def setRequestMethod(self, method):
"""Set the internal method to use to perform the request for query or
update operations, either URL-encoded (:data:`URLENCODED`) or
POST directly (:data:`POSTDIRECTLY`).
Further details at `query operation in SPARQL <http://www.w3.org/TR/sparql11-protocol/#query-operation>`_
and `update operation in SPARQL Update <http://www.w3.org/TR/sparql11-protocol/#update-operation>`_.
:param method: Possible values are :data:`URLENCODED` (URL-encoded) or :data:`POSTDIRECTLY` (POST directly). All other cases are ignored.
:type method: string
"""
if method in _REQUEST_METHODS:
self.requestMethod = method
else:
warnings.warn("invalid update method '%s'" % method, RuntimeWarning)
def addDefaultGraph(self, uri):
"""
Add a default graph URI.
.. deprecated:: 1.6.0 Use :func:`addParameter("default-graph-uri", uri)<addParameter>` instead of this method.
:param uri: URI of the default graph.
:type uri: string
"""
self.addParameter("default-graph-uri", uri)
def addNamedGraph(self, uri):
"""
Add a named graph URI.
.. deprecated:: 1.6.0 Use :func:`addParameter("named-graph-uri", uri)<addParameter>` instead of this method.
:param uri: URI of the named graph.
:type uri: string
"""
self.addParameter("named-graph-uri", uri)
def addExtraURITag(self, key, value):
"""
Some SPARQL endpoints require extra key value pairs.
E.g., in virtuoso, one would add ``should-sponge=soft`` to the query forcing
virtuoso to retrieve graphs that are not stored in its local database.
Alias of :func:`addParameter` method.
.. deprecated:: 1.6.0 Use :func:`addParameter(key, value)<addParameter>` instead of this method
:param key: key of the query part.
:type key: string
:param value: value of the query part.
:type value: string
"""
self.addParameter(key, value)
def addCustomParameter(self, name, value):
"""
Method is kept for backwards compatibility. Historically, it "replaces" parameters instead of adding.
.. deprecated:: 1.6.0 Use :func:`addParameter(key, value)<addParameter>` instead of this method
:param name: name.
:type name: string
:param value: value.
:type value: string
:return: Returns ``True`` if the adding has been accomplished, otherwise ``False``.
:rtype: bool
"""
self.clearParameter(name)
return self.addParameter(name, value)
def addParameter(self, name, value):
"""
Some SPARQL endpoints allow extra key value pairs.
E.g., in virtuoso, one would add ``should-sponge=soft`` to the query forcing
virtuoso to retrieve graphs that are not stored in its local database.
If the parameter :attr:`query` is tried to be set, this intent is dismissed.
Returns a boolean indicating if the set has been accomplished.
:param name: name.
:type name: string
:param value: value.
:type value: string
:return: Returns ``True`` if the adding has been accomplished, otherwise ``False``.
:rtype: bool
"""
if name in _SPARQL_PARAMS:
return False
else:
if name not in self.parameters:
self.parameters[name] = []
self.parameters[name].append(value)
return True
def addCustomHttpHeader(self, httpHeaderName, httpHeaderValue):
"""
Add a custom HTTP header (this method can override all HTTP headers).
**Important**: Take into account that each previous value for the header field names
``Content-Type``, ``User-Agent``, ``Accept`` and ``Authorization`` would be overriden
if the header field name is present as value of the parameter :attr:`httpHeaderName`.
.. versionadded:: 1.8.2
:param httpHeaderName: The header field name.
:type httpHeaderName: string
:param httpHeaderValue: The header field value.
:type httpHeaderValue: string
"""
self.customHttpHeaders[httpHeaderName] = httpHeaderValue
def clearCustomHttpHeader(self, httpHeaderName):
"""
Clear the values of a custom HTTP Header previously set.
Returns a boolean indicating if the clearing has been accomplished.
.. versionadded:: 1.8.2
:param httpHeaderName: HTTP header name.
:type httpHeaderName: string
:return: Returns ``True`` if the clearing has been accomplished, otherwise ``False``.
:rtype: bool
"""
try:
del self.customHttpHeaders[httpHeaderName]
return True
except KeyError:
return False
def clearParameter(self, name):
"""
Clear the values of a concrete parameter.
Returns a boolean indicating if the clearing has been accomplished.
:param name: name
:type name: string
:return: Returns ``True`` if the clearing has been accomplished, otherwise ``False``.
:rtype: bool
"""
if name in _SPARQL_PARAMS:
return False
else:
try:
del self.parameters[name]
return True
except KeyError:
return False
def setCredentials(self, user, passwd, realm="SPARQL"):
"""
Set the credentials for querying the current endpoint.
:param user: username.
:type user: string
:param passwd: password.
:type passwd: string
:param realm: realm. Only used for :data:`DIGEST` authentication. The **default** value is ``SPARQL``
:type realm: string
.. versionchanged:: 1.8.3
Added :attr:`realm` parameter.
"""
self.user = user
self.passwd = <PASSWORD>
self.realm = realm
def setHTTPAuth(self, auth):
"""
Set the HTTP Authentication type. Possible values are :class:`BASIC` or :class:`DIGEST`.
:param auth: auth type.
:type auth: string
:raises TypeError: If the :attr:`auth` parameter is not an string.
:raises ValueError: If the :attr:`auth` parameter has not one of the valid values: :class:`BASIC` or :class:`DIGEST`.
"""
if not isinstance(auth, str):
raise TypeError('setHTTPAuth takes a string')
elif auth.upper() in _allowedAuth:
self.http_auth = auth.upper()
else:
valid_types = ", ".join(_allowedAuth)
raise ValueError("Value should be one of {0}".format(valid_types))
def setQuery(self, query):
"""
Set the SPARQL query text.
.. note::
No check is done on the validity of the query
(syntax or otherwise) by this module, except for testing the query type (SELECT,
ASK, etc). Syntax and validity checking is done by the SPARQL service itself.
:param query: query text.
:type query: string
:raises TypeError: If the :attr:`query` parameter is not an unicode-string or utf-8 encoded byte-string.
"""
if sys.version < '3': # have to write it like this, for 2to3 compatibility
if isinstance(query, unicode):
pass
elif isinstance(query, str):
query = query.decode('utf-8')
else:
raise TypeError('setQuery takes either unicode-strings or utf-8 encoded byte-strings')
else:
if isinstance(query, str):
pass
elif isinstance(query, bytes):
query = query.decode('utf-8')
else:
raise TypeError('setQuery takes either unicode-strings or utf-8 encoded byte-strings')
self.queryString = query
self.queryType = self._parseQueryType(query)
def _parseQueryType(self, query):
"""
Internal method for parsing the SPARQL query and return its type (ie, :data:`SELECT`, :data:`ASK`, etc).
.. note::
The method returns :data:`SELECT` if nothing is specified. This is just to get all other
methods running; in fact, this means that the query is erroneous, because the query must be,
according to the SPARQL specification. The
SPARQL endpoint should raise an exception (via :mod:`urllib`) for such syntax error.
:param query: query text.
:type query: string
:return: the type of SPARQL query (aka SPARQL query form).
:rtype: string
"""
try:
query = query if (isinstance(query, str)) else query.encode('ascii', 'ignore')
query = self._cleanComments(query)
query_for_queryType = re.sub(self.prefix_pattern, "", query.strip())
r_queryType = self.pattern.search(query_for_queryType).group("queryType").upper()
except AttributeError:
warnings.warn("not detected query type for query '%s'" % query.replace("\n", " "), RuntimeWarning)
r_queryType = None
if r_queryType in _allowedQueryTypes:
return r_queryType
else:
# raise Exception("Illegal SPARQL Query; must be one of SELECT, ASK, DESCRIBE, or CONSTRUCT")
warnings.warn("unknown query type '%s'" % r_queryType, RuntimeWarning)
return SELECT
def setMethod(self, method):
"""Set the invocation method. By default, this is :data:`GET`, but can be set to :data:`POST`.
:param method: should be either :data:`GET` or :data:`POST`. Other cases are ignored.
:type method: string
"""
if method in _allowedRequests:
self.method = method
def setUseKeepAlive(self):
"""Make :mod:`urllib2` use keep-alive.
:raises ImportError: when could not be imported ``keepalive.HTTPHandler``.
"""
try:
from keepalive import HTTPHandler
if urllib2._opener and any(isinstance(h, HTTPHandler) for h in urllib2._opener.handlers):
# already installed
return
keepalive_handler = HTTPHandler()
opener = urllib2.build_opener(keepalive_handler)
urllib2.install_opener(opener)
except ImportError:
warnings.warn("keepalive support not available, so the execution of this method has no effect")
def isSparqlUpdateRequest(self):
""" Returns ``True`` if SPARQLWrapper is configured for executing SPARQL Update request.
:return: Returns ``True`` if SPARQLWrapper is configured for executing SPARQL Update request.
:rtype: bool
"""
return self.queryType in [INSERT, DELETE, CREATE, CLEAR, DROP, LOAD, COPY, MOVE, ADD]
def isSparqlQueryRequest(self):
""" Returns ``True`` if SPARQLWrapper is configured for executing SPARQL Query request.
:return: Returns ``True`` if SPARQLWrapper is configured for executing SPARQL Query request.
:rtype: bool
"""
return not self.isSparqlUpdateRequest()
def _cleanComments(self, query):
""" Internal method for returning the query after all occurrence of singleline comments are removed (issues #32 and #77).
:param query: The query.
:type query: string
:return: the query after all occurrence of singleline comments are removed.
:rtype: string
"""
return re.sub(self.comments_pattern, "\n\n", query)
def _getRequestEncodedParameters(self, query=None):
""" Internal method for getting the request encoded parameters.
:param query: a tuple of two items. The first item can be the string \
``query`` (for :data:`SELECT`, :data:`DESCRIBE`, :data:`ASK`, :data:`CONSTRUCT` query) or the string ``update`` \
(for SPARQL Update queries, like :data:`DELETE` or :data:`INSERT`). The second item of the tuple \
is the query string itself.
:type query: tuple
:return: the request encoded parameters.
:rtype: string
"""
query_parameters = self.parameters.copy()
# in case of query = tuple("query"/"update", queryString)
if query and (isinstance(query, tuple)) and len(query) == 2:
query_parameters[query[0]] = [query[1]]
if not self.isSparqlUpdateRequest():
# This is very ugly. The fact is that the key for the choice of the output format is not defined.
# Virtuoso uses 'format',sparqler uses 'output'
# However, these processors are (hopefully) oblivious to the parameters they do not understand.
# So: just repeat all possibilities in the final URI. UGLY!!!!!!!
if not self.onlyConneg:
for f in _returnFormatSetting:
query_parameters[f] = [self.returnFormat]
# Virtuoso is not supporting a correct Accept header and an unexpected "output"/"format" parameter value. It returns a 406.
# "tsv", "rdf+xml" and "json-ld" are not supported as a correct "output"/"format" parameter value but "text/tab-separated-values" or "application/rdf+xml" are a valid values,
# and there is no problem to send both (4store does not support unexpected values).
if self.returnFormat in [TSV, JSONLD, RDFXML]:
acceptHeader = self._getAcceptHeader() # to obtain the mime-type "text/tab-separated-values" or "application/rdf+xml"
if "*/*" in acceptHeader:
acceptHeader = "" # clear the value in case of "*/*"
query_parameters[f] += [acceptHeader]
pairs = (
"%s=%s" % (
urllib.quote_plus(param.encode('UTF-8'), safe='/'),
urllib.quote_plus(value.encode('UTF-8'), safe='/')
)
for param, values in query_parameters.items() for value in values
)
return '&'.join(pairs)
def _getAcceptHeader(self):
""" Internal method for getting the HTTP Accept Header.
.. seealso:: `Hypertext Transfer Protocol -- HTTP/1.1 - Header Field Definitions <https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1>`_
"""
if self.queryType in [SELECT, ASK]:
if self.returnFormat == XML:
acceptHeader = ",".join(_SPARQL_XML)
elif self.returnFormat == JSON:
acceptHeader = ",".join(_SPARQL_JSON)
elif self.returnFormat == CSV: # Allowed for SELECT and ASK (https://www.w3.org/TR/2013/REC-sparql11-protocol-20130321/#query-success) but only described for SELECT (https://www.w3.org/TR/sparql11-results-csv-tsv/)
acceptHeader = ",".join(_CSV)
elif self.returnFormat == TSV: # Allowed for SELECT and ASK (https://www.w3.org/TR/2013/REC-sparql11-protocol-20130321/#query-success) but only described for SELECT (https://www.w3.org/TR/sparql11-results-csv-tsv/)
acceptHeader = ",".join(_TSV)
else:
acceptHeader = ",".join(_ALL)
warnings.warn("Sending Accept header '*/*' because unexpected returned format '%s' in a '%s' SPARQL query form" % (self.returnFormat, self.queryType), RuntimeWarning)
elif self.queryType in [CONSTRUCT, DESCRIBE]:
if self.returnFormat == TURTLE:
acceptHeader = ",".join(_RDF_TURTLE)
elif self.returnFormat == N3:
acceptHeader = ",".join(_RDF_N3)
elif self.returnFormat == XML or self.returnFormat == RDFXML:
acceptHeader = ",".join(_RDF_XML)
elif self.returnFormat == JSONLD and JSONLD in _allowedFormats:
acceptHeader = ",".join(_RDF_JSONLD)
else:
acceptHeader = ",".join(_ALL)
warnings.warn("Sending Accept header '*/*' because unexpected returned format '%s' in a '%s' SPARQL query form" % (self.returnFormat, self.queryType), RuntimeWarning)
elif self.queryType in [INSERT, DELETE, CREATE, CLEAR, DROP, LOAD, COPY, MOVE, ADD]:
if self.returnFormat == XML:
acceptHeader = ",".join(_SPARQL_XML)
elif self.returnFormat == JSON:
acceptHeader = ",".join(_SPARQL_JSON)
else:
acceptHeader = ",".join(_ALL)
else:
acceptHeader = "*/*"
return acceptHeader
def _createRequest(self):
"""Internal method to create request according a HTTP method. Returns a
:class:`urllib2.Request` object of the :mod:`urllib2` Python library
:raises NotImplementedError: If the HTTP authentification method is not one of the valid values: :data:`BASIC` or :data:`DIGEST`.
:return: request a :class:`urllib2.Request` object of the :mod:`urllib2` Python library
"""
request = None
if self.isSparqlUpdateRequest():
# protocol details at http://www.w3.org/TR/sparql11-protocol/#update-operation
uri = self.updateEndpoint
if self.method != POST:
warnings.warn("update operations MUST be done by POST")
if self.requestMethod == POSTDIRECTLY:
request = urllib2.Request(uri + "?" + self._getRequestEncodedParameters())
request.add_header("Content-Type", "application/sparql-update")
request.data = self.queryString.encode('UTF-8')
else: # URL-encoded
request = urllib2.Request(uri)
request.add_header("Content-Type", "application/x-www-form-urlencoded")
request.data = self._getRequestEncodedParameters(("update", self.queryString)).encode('ascii')
else:
# protocol details at http://www.w3.org/TR/sparql11-protocol/#query-operation
uri = self.endpoint
if self.method == POST:
if self.requestMethod == POSTDIRECTLY:
request = urllib2.Request(uri + "?" + self._getRequestEncodedParameters())
request.add_header("Content-Type", "application/sparql-query")
request.data = self.queryString.encode('UTF-8')
else: # URL-encoded
request = urllib2.Request(uri)
request.add_header("Content-Type", "application/x-www-form-urlencoded")
request.data = self._getRequestEncodedParameters(("query", self.queryString)).encode('ascii')
else: # GET
request = urllib2.Request(uri + "?" + self._getRequestEncodedParameters(("query", self.queryString)))
request.add_header("User-Agent", self.agent)
request.add_header("Accept", self._getAcceptHeader())
if self.user and self.passwd:
if self.http_auth == BASIC:
credentials = "%s:%s" % (self.user, self.passwd)
request.add_header("Authorization", "Basic %s" % base64.b64encode(credentials.encode('utf-8')).decode('utf-8'))
elif self.http_auth == DIGEST:
realm = self.realm
pwd_mgr = urllib2.HTTPPasswordMgr()
pwd_mgr.add_password(realm, uri, self.user, self.passwd)
opener = urllib2.build_opener()
opener.add_handler(urllib2.HTTPDigestAuthHandler(pwd_mgr))
urllib2.install_opener(opener)
else:
valid_types = ", ".join(_allowedAuth)
raise NotImplementedError("Expecting one of: {0}, but received: {1}".format(valid_types,
self.http_auth))
# The header field name is capitalized in the request.add_header method.
for customHttpHeader in self.customHttpHeaders:
request.add_header(customHttpHeader, self.customHttpHeaders[customHttpHeader])
return request
def _query(self):
"""Internal method to execute the query. Returns the output of the
:func:`urllib2.urlopen` method of the :mod:`urllib2` Python library
:return: tuples with the raw request plus the expected format.
:raises QueryBadFormed: If the HTTP return code is ``400``.
:raises Unauthorized: If the HTTP return code is ``401``.
:raises EndPointNotFound: If the HTTP return code is ``404``.
:raises URITooLong: If the HTTP return code is ``414``.
:raises EndPointInternalError: If the HTTP return code is ``500``.
:raises urllib2.HTTPError: If the HTTP return code is different to ``400``, ``401``, ``404``, ``414``, ``500``.
"""
request = self._createRequest()
try:
if self.timeout:
response = urlopener(request, timeout=self.timeout)
else:
response = urlopener(request)
return response, self.returnFormat
except urllib2.HTTPError as e:
if e.code == 400:
raise QueryBadFormed(e.read())
elif e.code == 404:
raise EndPointNotFound(e.read())
elif e.code == 401:
raise Unauthorized(e.read())
elif e.code == 414:
raise URITooLong(e.read())
elif e.code == 500:
raise EndPointInternalError(e.read())
else:
raise e
def query(self):
"""
Execute the query.
Exceptions can be raised if either the URI is wrong or the HTTP sends back an error (this is also the
case when the query is syntactically incorrect, leading to an HTTP error sent back by the SPARQL endpoint).
The usual urllib2 exceptions are raised, which therefore cover possible SPARQL errors, too.
Note that some combinations of return formats and query types may not make sense. For example,
a SELECT query with Turtle response is meaningless (the output of a SELECT is not a Graph), or a CONSTRUCT
query with JSON output may be a problem because, at the moment, there is no accepted JSON serialization
of RDF (let alone one implemented by SPARQL endpoints). In such cases the returned media type of the result is
unpredictable and may differ from one SPARQL endpoint implementation to the other. (Endpoints usually fall
back to one of the "meaningful" formats, but it is up to the specific implementation to choose which
one that is.)
:return: query result
:rtype: :class:`QueryResult` instance
"""
return QueryResult(self._query())
def queryAndConvert(self):
"""Macro like method: issue a query and return the converted results.
:return: the converted query result. See the conversion methods for more details.
"""
res = self.query()
return res.convert()
def __str__(self):
"""This method returns the string representation of a :class:`SPARQLWrapper` object.
.. versionadded:: 1.8.3
:return: A human-readable string of the object.
:rtype: string
"""
fullname = self.__module__ + "." + self.__class__.__name__
items = ('"%s" : %r' % (k, v) for k, v in sorted(self.__dict__.items()))
str_dict_items = "{%s}" % (',\n'.join(items))
return "<%s object at 0x%016X>\n%s" % (fullname, id(self), str_dict_items)
#######################################################################################################
class QueryResult(object):
"""
Wrapper around an a query result. Users should not create instances of this class, it is
generated by a :func:`SPARQLWrapper.query` call. The results can be
converted to various formats, or used directly.
If used directly: the class gives access to the direct HTTP request results
``response`` obtained from the call to :func:`urllib.urlopen`.
It is a file-like object with two additional methods:
* ``geturl()`` to return the URL of the resource retrieved
* ``info()`` that returns the meta-information of the HTTP result as a dictionary-like object.
For convenience, these methods are also available on the :class:`QueryResult` instance.
The :func:`__iter__` and :func:`next` methods are also implemented (by mapping them to :attr:`response`). This means that the
common idiom ``for l in obj : do_something_with_line(l)`` would work, too.
:ivar response: the direct HTTP response; a file-like object, as return by the :func:`urllib2.urlopen` library call.
:ivar requestedFormat: The requested format. The possible values are: :data:`JSON`, :data:`XML`, :data:`RDFXML`, :data:`TURTLE`, :data:`N3`, :data:`RDF`, :data:`CSV`, :data:`TSV`, :data:`JSONLD`.
:type requestedFormat: string
"""
def __init__(self, result):
"""
:param result: HTTP response stemming from a :func:`SPARQLWrapper.query` call, or a tuple with the expected format: (response, format).
"""
if isinstance(result, tuple):
self.response = result[0]
self.requestedFormat = result[1]
else:
self.response = result
def geturl(self):
"""Return the URL of the original call.
:return: URL of the original call.
:rtype: string
"""
return self.response.geturl()
def info(self):
"""Return the meta-information of the HTTP result.
:return: meta-information of the HTTP result.
:rtype: dict
"""
return KeyCaseInsensitiveDict(self.response.info())
def __iter__(self):
"""Return an iterator object. This method is expected for the inclusion
of the object in a standard ``for`` loop.
"""
return self.response.__iter__()
def next(self):
"""Method for the standard iterator."""
return self.response.next()
def _convertJSON(self):
"""
Convert a JSON result into a Python dict. This method can be overwritten in a subclass
for a different conversion method.
:return: converted result.
:rtype: dict
"""
return json.loads(self.response.read().decode("utf-8"))
def _convertXML(self):
"""
Convert an XML result into a Python dom tree. This method can be overwritten in a
subclass for a different conversion method.
:return: converted result.
:rtype: :class:`xml.dom.minidom.Document`
"""
from xml.dom.minidom import parse
return parse(self.response)
def _convertRDF(self):
"""
Convert a RDF/XML result into an RDFLib Graph. This method can be overwritten
in a subclass for a different conversion method.
:return: converted result.
:rtype: :class:`rdflib.graph.Graph`
"""
try:
from rdflib.graph import ConjunctiveGraph
except ImportError:
from rdflib import ConjunctiveGraph
retval = ConjunctiveGraph()
# (DEPRECATED) this is a strange hack. If the publicID is not set, rdflib (or the underlying xml parser) makes a funny
# (DEPRECATED) (and, as far as I could see, meaningless) error message...
retval.load(self.response) # (DEPRECATED) publicID=' ')
return retval
def _convertN3(self):
"""
Convert a RDF Turtle/N3 result into a string. This method can be overwritten in a subclass
for a different conversion method.
:return: converted result.
:rtype: string
"""
return self.response.read()
def _convertCSV(self):
"""
Convert a CSV result into a string. This method can be overwritten in a subclass
for a different conversion method.
:return: converted result.
:rtype: string
"""
return self.response.read()
def _convertTSV(self):
"""
Convert a TSV result into a string. This method can be overwritten in a subclass
for a different conversion method.
:return: converted result.
:rtype: string
"""
return self.response.read()
def _convertJSONLD(self):
"""
Convert a RDF JSON-LD result into an RDFLib Graph. This method can be overwritten
in a subclass for a different conversion method.
:return: converted result
:rtype: :class:`rdflib.graph.Graph`
"""
from rdflib import ConjunctiveGraph
retval = ConjunctiveGraph()
retval.load(self.response, format='json-ld') # (DEPRECATED), publicID=' ')
return retval
def convert(self):
"""
Encode the return value depending on the return format:
* in the case of :data:`XML`, a DOM top element is returned
* in the case of :data:`JSON`, a json conversion will return a dictionary
* in the case of :data:`RDF/XML<RDFXML>`, the value is converted via RDFLib into a ``RDFLib Graph`` instance
* in the case of :data:`JSON-LD<JSONLD>`, the value is converted via RDFLib into a ``RDFLib Graph`` instance
* in the case of RDF :data:`Turtle<TURTLE>`/:data:`N3`, a string is returned
* in the case of :data:`CSV`/:data:`TSV`, a string is returned
* In all other cases the input simply returned.
:return: the converted query result. See the conversion methods for more details.
"""
def _content_type_in_list(real, expected):
""" Internal method for checking if the content-type header received matches any of the content types of the expected list.
:param real: The content-type header received.
:type real: string
:param expected: A list of expected content types.
:type expected: list
:return: Returns a boolean after checking if the content-type header received matches any of the content types of the expected list.
:rtype: boolean
"""
return True in [real.find(mime) != -1 for mime in expected]
def _validate_format(format_name, allowed, mime, requested):
""" Internal method for validating if the requested format is one of the allowed formats.
:param format_name: The format name (to be used in the warning message).
:type format_name: string
:param allowed: A list of allowed content types.
:type allowed: list
:param mime: The content-type header received (to be used in the warning message).
:type mime: string
:param requested: the requested format.
:type requested: string
"""
if requested not in allowed:
message = "Format requested was %s, but %s (%s) has been returned by the endpoint"
warnings.warn(message % (requested.upper(), format_name, mime), RuntimeWarning)
# TODO. In order to compare properly, the requested QueryType (SPARQL Query Form) is needed. For instance, the unexpected N3 requested for a SELECT would return XML
if "content-type" in self.info():
ct = self.info()["content-type"] # returned Content-Type value
if _content_type_in_list(ct, _SPARQL_XML):
_validate_format("XML", [XML], ct, self.requestedFormat)
return self._convertXML()
elif _content_type_in_list(ct, _XML):
_validate_format("XML", [XML], ct, self.requestedFormat)
return self._convertXML()
elif _content_type_in_list(ct, _SPARQL_JSON):
_validate_format("JSON", [JSON], ct, self.requestedFormat)
return self._convertJSON()
elif _content_type_in_list(ct, _RDF_XML):
_validate_format("RDF/XML", [RDF, XML, RDFXML], ct, self.requestedFormat)
return self._convertRDF()
elif _content_type_in_list(ct, _RDF_N3):
_validate_format("N3", [N3, TURTLE], ct, self.requestedFormat)
return self._convertN3()
elif _content_type_in_list(ct, _CSV):
_validate_format("CSV", [CSV], ct, self.requestedFormat)
return self._convertCSV()
elif _content_type_in_list(ct, _TSV):
_validate_format("TSV", [TSV], ct, self.requestedFormat)
return self._convertTSV()
elif _content_type_in_list(ct, _RDF_JSONLD):
_validate_format("JSON(-LD)", [JSONLD, JSON], ct, self.requestedFormat)
return self._convertJSONLD()
else:
warnings.warn("unknown response content type '%s' returning raw response..." %(ct), RuntimeWarning)
return self.response.read()
def _get_responseFormat(self):
"""
Get the response (return) format. The possible values are: :data:`JSON`, :data:`XML`, :data:`RDFXML`, :data:`TURTLE`, :data:`N3`, :data:`CSV`, :data:`TSV`, :data:`JSONLD`.
In case there is no Content-Type, ``None`` is return. In all other cases, the raw Content-Type is return.
.. versionadded:: 1.8.3
:return: the response format. The possible values are: :data:`JSON`, :data:`XML`, :data:`RDFXML`, :data:`TURTLE`, :data:`N3`, :data:`CSV`, :data:`TSV`, :data:`JSONLD`.
:rtype: string
"""
def _content_type_in_list(real, expected):
""" Internal method for checking if the content-type header received matches any of the content types of the expected list.
:param real: The content-type header received.
:type real: string
:param expected: A list of expected content types.
:type expected: list
:return: Returns a boolean after checking if the content-type header received matches any of the content types of the expected list.
:rtype: boolean
"""
return True in [real.find(mime) != -1 for mime in expected]
if "content-type" in self.info():
ct = self.info()["content-type"] # returned Content-Type value
if _content_type_in_list(ct, _SPARQL_XML):
return XML
elif _content_type_in_list(ct, _XML):
return XML
elif _content_type_in_list(ct, _SPARQL_JSON):
return JSON
elif _content_type_in_list(ct, _RDF_XML):
return RDFXML
elif _content_type_in_list(ct, _RDF_TURTLE):
return TURTLE
elif _content_type_in_list(ct, _RDF_N3):
return N3
elif _content_type_in_list(ct, _CSV):
return CSV
elif _content_type_in_list(ct, _TSV):
return TSV
elif _content_type_in_list(ct, _RDF_JSONLD):
return JSONLD
else:
warnings.warn("Unknown response content type. Returning raw content-type ('%s')." %(ct), RuntimeWarning)
return ct
return None
def print_results(self, minWidth=None):
"""This method prints a representation of a :class:`QueryResult` object that MUST has as response format :data:`JSON`.
:param minWidth: The minimum width, counting as characters. The default value is ``None``.
:type minWidth: string
"""
# Check if the requested format was JSON. If not, exit.
responseFormat = self._get_responseFormat()
if responseFormat != JSON:
message = "Format return was %s, but JSON was expected. No printing."
warnings.warn(message % (responseFormat), RuntimeWarning)
return
results = self._convertJSON()
if minWidth:
width = self.__get_results_width(results, minWidth)
else:
width = self.__get_results_width(results)
index = 0
for var in results["head"]["vars"]:
print(("?" + var).ljust(width[index]), "|",)
index += 1
print()
print("=" * (sum(width) + 3 * len(width)))
for result in results["results"]["bindings"]:
index = 0
for var in results["head"]["vars"]:
result_value = self.__get_prettyprint_string_sparql_var_result(result[var])
print(result_value.ljust(width[index]), "|",)
index += 1
print()
def __get_results_width(self, results, minWidth=2):
width = []
for var in results["head"]["vars"]:
width.append(max(minWidth, len(var) + 1))
for result in results["results"]["bindings"]:
index = 0
for var in results["head"]["vars"]:
result_value = self.__get_prettyprint_string_sparql_var_result(result[var])
width[index] = max(width[index], len(result_value))
index += 1
return width
def __get_prettyprint_string_sparql_var_result(self, result):
value = result["value"]
lang = result.get("xml:lang", None)
datatype = result.get("datatype", None)
if lang is not None:
value += "@" + lang
if datatype is not None:
value += " [" + datatype + "]"
return value
def __str__(self):
"""This method returns the string representation of a :class:`QueryResult` object.
:return: A human-readable string of the object.
:rtype: string
.. versionadded:: 1.8.3
"""
fullname = self.__module__ + "." + self.__class__.__name__
str_requestedFormat = '"requestedFormat" : ' + repr(self.requestedFormat)
str_url = self.response.url
str_code = self.response.code
str_headers = self.response.info()
str_response = '"response (a file-like object, as return by the urllib2.urlopen library call)" : {\n\t"url" : "%s",\n\t"code" : "%s",\n\t"headers" : %s}' % (str_url, str_code, str_headers)
return "<%s object at 0x%016X>\n{%s,\n%s}" % (fullname, id(self), str_requestedFormat, str_response)
| StarcoderdataPython |
3367269 | <gh_stars>1-10
import numpy as np
from scipy import sparse
from .topology import get_mesh_edges
def barycentric_matrix(uv, tris, num_verts):
"""
Return the barycentric coordinate matrix B such that
B * verts = verts_new
where verts_new yield the barycentric interpolation according to uv
of the triangles given as n*3 index array in tris.
Given the barycentric coordinates u and v of a triangle
with points p1, p2, p3 (given by indices i1, i2, i3),
barycentric interpolation yields the new point pb
pb = p1 + u * (p2 - p1) + v * (p3 - p1)
"""
uvw = np.column_stack((1 - uv[:,0] - uv[:,1], uv[:,0], uv[:,1]))
return sparse.csr_matrix((uvw.ravel(),
(np.mgrid[:len(uvw), :3][0].ravel(),
tris.ravel()) ),
shape=(len(uvw), num_verts))
def barycentric_interpolate(verts, tris, uv):
"""
Compute 3d points from a given set of barycentric coordinates
Given the barycentric coordinates u and v of a triangle in uv
with points p1, p2, p3 (given by indices i1, i2, i3 in tris),
barycentric interpolation yields the new point pb
pb = p1 + u * (p2 - p1) + v * (p3 - p1)
"""
edge1, edge2 = get_mesh_edges(verts, tris)
return verts[tris[:,0]] + \
uv[:,0][:,np.newaxis] * edge1 + \
uv[:,1][:,np.newaxis] * edge2
| StarcoderdataPython |
1694543 | # -*- coding: utf-8 -*-
import json
import os
import sys
try:
# For Python 3.0 and later
from urllib import parse
from urllib.request import urlopen, HTTPError, URLError, Request
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen, HTTPError, URLError, Request
import pkg_resources
def try_address(fqdn):
"""
Check if the fqdn is valid
Args:
fqdn (str): fully qualified domain name
"""
import socket
try:
socket.gethostbyname_ex(fqdn)
except (socket.gaierror, UnicodeEncodeError):
return False
else:
return True
def validate_uri(uri):
"""
Args:
uri (str): MongoDB URI
"""
parsed = parse_mongo_uri(uri)
if parsed and try_address(parsed['nodelist'][0][0]):
return parsed
else:
return None
def validate_email(email):
import re
valid = re.compile(r"^[^@]+@[^@]+\.[^@]+$")
return valid.match(email.strip())
def parse_mongo_uri(conn):
"""
Args:
conn (str): MongoDB URI
Returns:
dict(str: str) or None: parsed MongoDB URI
{
'nodelist': <list of (host, port) tuples>,
'username': <username> or None,
'password': <password> or None,
'database': <database name> or None,
'collection': <collection name> or None,
'options': <dict of MongoDB URI options>
}
"""
from pymongo import uri_parser
conn = conn.split('://')[-1]
try:
uri = uri_parser.parse_uri("mongodb://" + conn)
except (uri_parser.InvalidURI, ValueError, uri_parser.ConfigurationError):
return None
else:
return uri
def send_result(email, result, title, urn):
"""
Args:
email (str): address to send the results
result (obj): results to send
title (str):
urn (str): uniform resource name
Returns:
str: response from endpoint
"""
url = 'https://mongoaud.it/results'
headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
values = {'email': email, 'result': result, 'title': title, 'urn': urn, 'date': get_date()}
try:
try:
req = Request(url, json.dumps(values), headers)
response = urlopen(req)
except TypeError:
# Python 3 compatibility
req = Request(url, json.dumps(values).encode('utf-8'), headers)
response = urlopen(req)
return response.read()
except (HTTPError, URLError) as exc:
return "Sadly enough, we are having technical difficulties at the moment, " \
"please try again later.\n\n%s" % str(exc)
def load_test(filename):
path = getattr(sys, '_MEIPASS', None)
if path:
path = os.path.join(path, 'data/%s' % filename)
else:
path = pkg_resources.resource_filename(__name__, 'data/%s' % filename)
with open(path) as json_data:
return json.load(json_data)
def get_date():
import time
import calendar
local = time.localtime(time.time())
nth = ["st", "nd", "rd", None][min(3, local.tm_mday % 10 - 1)] or 'th'
return "%s %d%s %d @ %02d:%02d" % (
calendar.month_abbr[local.tm_mon], local.tm_mday,
nth, local.tm_year, local.tm_hour, local.tm_min)
def check_version(version):
# if application is binary then check for latest version
if getattr(sys, 'frozen', False):
try:
url = "https://api.github.com/repos/stampery/mongoaudit/releases/latest"
req = urlopen(url)
releases = json.loads(req.read())
latest = releases["tag_name"]
if version < latest:
print("mongoaudit version " + version)
print("There's a new version " + latest)
_upgrade(releases)
except (HTTPError, URLError):
print("Couldn't check for upgrades")
except os.error:
print("Couldn't write mongoaudit binary")
def _check_md5(file_path, md5):
import hashlib
with open(file_path) as mongoaudit_bin:
binary_md5 = hashlib.md5(mongoaudit_bin.read()).hexdigest()
return binary_md5 == md5
def _clean_upgrade(binary_ok, binary_path, path, temp_path):
if binary_ok:
import stat
# save the permissions from the current binary
old_stat = os.stat(binary_path)
# rename the current binary in order to replace it with the latest
os.rename(binary_path, path + "/old")
os.rename(temp_path, binary_path)
# set the same permissions that had the previous binary
os.chmod(binary_path, old_stat.st_mode | stat.S_IEXEC)
# delete the old binary
os.remove(path + "/old")
print("mongoaudit updated, restarting...")
os.execl(binary_path, binary_path, *sys.argv)
else:
os.remove(temp_path)
print("couldn't download the latest binary")
def _download_binary(release, temp_path):
req = urlopen(release["binary"])
binary_ok = False
attempts = 0
while not binary_ok and attempts < 3:
with open(temp_path, "wb+") as mongoaudit_bin:
mongoaudit_bin.write(req.read())
# verify integrity of downloaded file
print("Verifing mongoaudit integrity")
if _check_md5(temp_path, release["md5"]):
binary_ok = True
print("Integrity check passed")
attempts += 1
return binary_ok
def _upgrade(releases):
release = _get_release_link(releases["assets"])
if release:
print("Upgrading to latest version")
binary_path = sys.executable
path = os.path.dirname(binary_path)
temp_path = path + "/temp"
binary_ok = _download_binary(release, temp_path)
_clean_upgrade(binary_ok, binary_path, path, temp_path)
else:
print("There's no binary for this platform")
def _get_md5(link, uname):
md5 = urlopen(link).read().split("\n")
for line in md5:
if uname in line:
return line.split()[0]
return None
def _get_release_link(assets):
uname = get_platform()
release = {}
for asset in assets:
download_url = asset["browser_download_url"]
release_platform = download_url.rsplit('-', 1)[1]
if release_platform == uname:
release["binary"] = download_url
elif release_platform == "checksums.txt":
release["md5"] = _get_md5(download_url, uname)
if len(release) == 2:
return release
return None
def get_platform():
import platform
platform_system = platform.system().lower()
return "macosx" if platform_system == "darwin" else platform_system
| StarcoderdataPython |
3390404 | <gh_stars>1-10
"""Logarithm of another distribution."""
import numpy
import chaospy
from ..baseclass import Distribution, OperatorDistribution
class Logn(OperatorDistribution):
"""
Logarithm with base N.
Args:
dist (Distribution):
Distribution to perform transformation on.
base (int, float):
the logarithm base.
Example:
>>> distribution = chaospy.Logn(chaospy.Uniform(1, 2), 3)
>>> distribution
Logn(Uniform(lower=1, upper=2), 3)
>>> q = numpy.linspace(0,1,6)[1:-1]
>>> distribution.inv(q).round(4)
array([0.166 , 0.3063, 0.4278, 0.535 ])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0.2, 0.4, 0.6, 0.8])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([1.3183, 1.5381, 1.7578, 1.9775])
>>> distribution.sample(4).round(4)
array([0.4578, 0.0991, 0.608 , 0.3582])
>>> distribution.mom(1).round(4)
0.3516
"""
def __init__(self, dist, base=2):
assert isinstance(dist, Distribution)
assert numpy.all(dist.lower > 0)
assert base > 0 and base != 1
super(Logn, self).__init__(
left=dist,
right=base,
repr_args=[dist, base],
)
def _lower(self, idx, left, right, cache):
return numpy.log(left._get_lower(idx, cache))/numpy.log(right)
def _upper(self, idx, left, right, cache):
return numpy.log(left._get_upper(idx, cache))/numpy.log(right)
def _pdf(self, xloc, idx, left, right, cache):
return left._get_pdf(right**xloc, idx, cache)*right**xloc*numpy.log(right)
def _cdf(self, xloc, idx, left, right, cache):
return left._get_fwd(right.item(0)**xloc, idx, cache)
def _ppf(self, uloc, idx, left, right, cache):
return numpy.log(left._get_inv(uloc, idx, cache))/numpy.log(right)
def _mom(self, kloc, left, right, cache):
raise chaospy.UnsupportedFeature("%s: Analytical moments for logarithm not supported", self)
def _ttr(self, kloc, idx, left, right, cache):
raise chaospy.UnsupportedFeature("%s: Analytical TTR for logarithm not supported", self)
class Log(Logn):
"""
Logarithm with base Euler's constant.
Args:
dist (Distribution):
Distribution to perform transformation on.
Example:
>>> distribution = chaospy.Log(chaospy.Uniform(1, 2))
>>> distribution
Log(Uniform(lower=1, upper=2))
"""
def __init__(self, dist):
super(Log, self).__init__(dist=dist, base=numpy.e)
self._repr_args = [dist]
class Log10(Logn):
"""
Logarithm with base 10.
Args:
dist (Distribution): Distribution to perform transformation on.
Example:
>>> distribution = chaospy.Log10(chaospy.Uniform(1, 2))
>>> distribution
Log10(Uniform(lower=1, upper=2))
"""
def __init__(self, dist):
super(Log10, self).__init__(dist=dist, base=10)
self._repr_args = [dist]
| StarcoderdataPython |
1669487 | <filename>801-900/804.UniqueMorseCodeWords.py
#
# 804. Unique Morse Code Words
#
# International Morse Code defines a standard encoding where each letter is
# mapped to a series of dots and dashes, as follows: "a" maps to ".-", "b" maps
# to "-...", "c" maps to "-.-.", and so on.
#
# For convenience, the full table for the 26 letters of the English alphabet is
# given below:
#
# [
# ".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..", ".---",
# "-.-", ".-..", "--", "-.", "---", ".--.", "--.-", ".-.", "...", "-",
# "..-", "...-", ".--", "-..-", "-.--", "--.."
# ]
#
# Now, given a list of words, each word can be written as a concatenation of
# the Morse code of each letter. For example, "cba" can be written as
# "-.-..--...", (which is the concatenation "-.-." + "-..." + ".-"). We'll call
# such a concatenation, the transformation of a word.
#
# Return the number of different transformations among all words we have.
#
# What I think..
class Solution:
morse = [
".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..",
".---", "-.-", ".-..", "--", "-.", "---", ".--.", "--.-", ".-.",
"...", "-", "..-", "...-", ".--", "-..-", "-.--", "--.."
]
def convert(self, word):
res = ''
for letter in word:
res += self.morse[ord(letter) - 97]
return res
def uniqueMorseRepresentations(self, words):
answer = set()
for word in words:
answer.add(self.convert(word))
return len(answer)
# return len({self.convert(word) for word in words})
# What others think...
class Solution2:
def uniqueMorseRepresentations(self, words):
morse = [
".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..",
".---", "-.-", ".-..", "--", "-.", "---", ".--.", "--.-", ".-.",
"...", "-", "..-", "...-", ".--", "-..-", "-.--", "--.."
]
return ({''.join(morse[ord(l) - 97] for l in word) for word in words})
s = Solution()
print(s.uniqueMorseRepresentations(["gin", "zen", "gig", "msg"]))
| StarcoderdataPython |
1749396 | import numpy as np
from cv2 import cv2
import os
import pafy
import argparse
from tensorflow.keras.models import load_model
from collections import deque
output_directory = 'Youtube_Videos'
os.makedirs(output_directory, exist_ok = True)
Activities = ["Biking", "Drumming", "Basketball", "Diving","Billiards","HorseRiding","Mixing","PushUps","Skiing","Swing"]
image_height = 64
image_width = 64
def predict_video_frames(video_file_path, output_file_path, window_size,model):
''' this function predicts the activity from a video given
from the user,and writes the resulted video in output folder, the function uses deque object
with the size of the number of frames we want to average on over, because we don't want to
predict for every single frame which leads to flickering, window_size=25: it means we will average the prediction for
every 25 frames'''
# Initialize a Deque Object with a fixed size
predicted_labels_probabilities_deque = deque(maxlen = window_size)
video_reader = cv2.VideoCapture(video_file_path)
original_video_width = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
original_video_height = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_writer = cv2.VideoWriter(output_file_path, cv2.VideoWriter_fourcc('M', 'P', '4', 'V'), 24, (original_video_width, original_video_height))
while True:
status, frame = video_reader.read()
if not status:
break
#resize frame
resized_frame = cv2.resize(frame, (image_height, image_width))
# Normalize the resized frame
normalized_frame = resized_frame / 255
# make predictions
predicted_labels_probabilities = model.predict(np.expand_dims(normalized_frame, axis = 0))[0]
# Appending predicted label probabilities to the deque object
predicted_labels_probabilities_deque.append(predicted_labels_probabilities)
# Assuring that the Deque is completely filled before starting the averaging process
if len(predicted_labels_probabilities_deque) == window_size:
# Converting Predicted Labels Probabilities Deque into Numpy array
predicted_labels_probabilities_np = np.array(predicted_labels_probabilities_deque)
# Calculating Average of Predicted Labels Probabilities Column Wise
predicted_labels_probabilities_averaged = predicted_labels_probabilities_np.mean(axis = 0)
# Converting the predicted probabilities into labels by returning the index of the maximum value.
predicted_label = np.argmax(predicted_labels_probabilities_averaged)
activity = Activities[predicted_label]
# Overlaying activity Text Ontop of the Frame
cv2.putText(frame, activity, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# Writing The Frame
video_writer.write(frame)
cv2.imshow('Predicted Frames', frame)
key_pressed = cv2.waitKey(10)
if key_pressed == ord('q'):
break
cv2.destroyAllWindows()
video_reader.release()
video_writer.release()
if __name__ == '__main__':
model = load_model("model_VGG16_CNN_LSTM.h5")
#number of the frames that we will average the prediction on
window_size = 25
parser=argparse.ArgumentParser(description='This program predicts the human ctivity in a video \n current list Activities: \n [Biking, Drumming, Basketball, Diving,Billiards,HorseRiding,Mixing,PushUps,Skiing,Swing] ')
parser.add_argument('path_toVideo',help='give a video with one of those activities \n please give the path to a short video in your local disc, with good quality that contains one person doing that activity')
args=parser.parse_args()
output_video_file_path = f'{output_directory}/ video_HAR_CNN {window_size}.mp4'
predict_video_frames(args.path_toVideo,output_video_file_path, window_size,model)
| StarcoderdataPython |
3351780 | <gh_stars>1-10
"""
Django settings for oscardropship project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import secrets
from django.utils.translation import ugettext_lazy as _
from oscar.defaults import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
APPS_DIR = os.path.join("oscardropship")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.setdefault('DJANGO_SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'system.User'
ADMINS = os.environ.setdefault("ADMINS", '[["Hanz", "<EMAIL>"]]')
# convert admins (in json) to object
try:
ADMINS = json.loads(ADMINS)
ADMINS = [tuple(admin) for admin in ADMINS]
except Exception as e:
ADMINS = []
RANDOM_ADMIN_URL = '{}/'.format(secrets.token_urlsafe(6))
DJANGO_ADMIN_URL = os.environ.setdefault(
'DJANGO_ADMIN_URL', RANDOM_ADMIN_URL)
DEFAULT_FROM_EMAIL = os.environ.setdefault(
'DEFAULT_FROM_EMAIL', '<EMAIL>')
SERVER_EMAIL = os.environ.setdefault(
'SERVER_EMAIL', '<EMAIL>')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'system.apps.SystemConfig',
'django.contrib.sites',
'django.contrib.flatpages',
'custom_oscar.config.CustomShop',
'oscar.apps.analytics.apps.AnalyticsConfig',
# 'oscar.apps.checkout.apps.CheckoutConfig',
'oscardropship.custom_oscar.checkout.apps.CheckoutConfig',
'oscar.apps.address.apps.AddressConfig',
'oscar.apps.shipping.apps.ShippingConfig',
# 'oscar.apps.catalogue.apps.CatalogueConfig',
# 'oscar.apps.catalogue.reviews.apps.CatalogueReviewsConfig',
'oscardropship.custom_oscar.catalogue.apps.CatalogueConfig',
'oscardropship.custom_oscar.catalogue.reviews.apps.CatalogueReviewsConfig',
'oscar.apps.communication.apps.CommunicationConfig',
# 'oscar.apps.partner.apps.PartnerConfig',
'custom_oscar.partner.apps.PartnerConfig',
'oscar.apps.basket.apps.BasketConfig',
# 'oscar.apps.payment.apps.PaymentConfig',
'oscardropship.custom_oscar.payment.apps.PaymentConfig',
'oscar.apps.offer.apps.OfferConfig',
# 'oscar.apps.order.apps.OrderConfig',
'custom_oscar.order.apps.OrderConfig',
'oscar.apps.customer.apps.CustomerConfig',
'oscar.apps.search.apps.SearchConfig',
'oscar.apps.voucher.apps.VoucherConfig',
'oscar.apps.wishlists.apps.WishlistsConfig',
'oscar.apps.dashboard.apps.DashboardConfig',
'oscar.apps.dashboard.reports.apps.ReportsDashboardConfig',
'oscar.apps.dashboard.users.apps.UsersDashboardConfig',
'oscar.apps.dashboard.orders.apps.OrdersDashboardConfig',
'oscar.apps.dashboard.catalogue.apps.CatalogueDashboardConfig',
'oscar.apps.dashboard.offers.apps.OffersDashboardConfig',
'oscar.apps.dashboard.partners.apps.PartnersDashboardConfig',
'oscar.apps.dashboard.pages.apps.PagesDashboardConfig',
'oscar.apps.dashboard.ranges.apps.RangesDashboardConfig',
'oscar.apps.dashboard.reviews.apps.ReviewsDashboardConfig',
'oscar.apps.dashboard.vouchers.apps.VouchersDashboardConfig',
'oscar.apps.dashboard.communications.apps.CommunicationsDashboardConfig',
'oscar.apps.dashboard.shipping.apps.ShippingDashboardConfig',
# 3rd-party apps that oscar depends on
'widget_tweaks',
'haystack',
'treebeard',
'django_tables2',
'oscar_accounts.apps.AccountsConfig',
'oscar_accounts.dashboard.apps.AccountsDashboardConfig',
# must haves
'django_extensions',
'easy_thumbnails',
'sorl.thumbnail',
# wagtail
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.contrib.sitemaps',
'wagtail.contrib.routable_page',
'wagtail.contrib.settings',
'modelcluster',
'taggit',
# puput
'django_social_share',
'puput',
'colorful',
# other third parties
'tinymce',
'newsletter',
'colorfield',
# project
'wagtail_pages',
'multisite',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(APPS_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'oscar.apps.search.context_processors.search_form',
'oscar.apps.checkout.context_processors.checkout',
'oscar.apps.communication.notifications.context_processors.notifications',
'oscar.core.context_processors.metadata',
'wagtail.contrib.settings.context_processors.settings',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.setdefault('OSCARDROPSHIP_DATABASE_NAME', ''),
'USER': os.environ.setdefault('OSCARDROPSHIP_DATABASE_USER', ''),
'PASSWORD': os.environ.setdefault('OSCARDROPSHIP_DATABASE_PASSWORD', ''),
'HOST': os.environ.setdefault('OSCARDROPSHIP_DATABASE_HOST', ''),
'PORT': os.environ.setdefault('OSCARDROPSHIP_DATABASE_PORT', ''),
'ATOMIC_REQUESTS': True,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = True
USE_TZ = True
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
GOOGLE_ANALYTICS_PROPERTY_ID = os.environ.setdefault('GOOGLE_ANALYTICS_PROPERTY_ID', '')
ENVIRONMENT_FLOAT = True
ENVIRONMENT_NAME = "Production server"
ENVIRONMENT_COLOR = "#E74C3C"
# other django oscar settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
OSCAR_SHOP_NAME = "My Novelty Shop"
OSCAR_INITIAL_ORDER_STATUS = 'Pending'
OSCAR_INITIAL_LINE_STATUS = 'Pending'
OSCAR_ORDER_STATUS_PIPELINE = {
'Pending': (
'Being processed',
'Cancelled',
),
'Being processed': (
'Shipped',
'Cancelled',
),
'Shipped': (
'Received',
'Cancelled',
),
'Received': (),
'Cancelled': (),
}
OSCAR_ORDER_STATUS_CASCADE = {
'Being processed': 'Being processed',
'Shipped': 'Shipped',
'Received': 'Received',
'Cancelled': 'Cancelled',
}
OSCAR_LINE_STATUS_PIPELINE = {
'Pending': (
'Being processed',
'Cancelled',
),
'Being processed': (
'Shipped',
'Cancelled',
),
'Shipped': (
'Received',
'Cancelled',
),
'Received': (),
'Cancelled': (),
}
OSCAR_ALLOW_ANON_CHECKOUT = True
OSCAR_FROM_EMAIL = os.environ.setdefault("OSCAR_FROM_EMAIL", "<EMAIL>")
OSCAR_DEFAULT_CURRENCY = "USD"
OSCAR_USE_LESS = False
OSCAR_HOMEPAGE = reverse_lazy('wagtail_serve', args=[''])
# paypal
PAYPAL_SANDBOX_MODE = False
PAYPAL_CALLBACK_HTTPS = True
PAYPAL_API_ACCOUNT = os.environ.setdefault('PAYPAL_API_ACCOUNT', '')
PAYPAL_API_CLIENT_ID = os.environ.setdefault('PAYPAL_API_CLIENT_ID', '')
PAYPAL_API_SECRET = os.environ.setdefault('PAYPAL_API_SECRET', '')
PAYPAL_CURRENCY = PAYPAL_PAYFLOW_CURRENCY = 'USD'
PAYPAL_PAYFLOW_DASHBOARD_FORMS = True
# oscar-accounts
OSCAR_DASHBOARD_NAVIGATION.append(
{
'label': 'Accounts',
'icon': 'icon-globe',
'children': [
{
'label': 'Accounts',
'url_name': 'accounts_dashboard:accounts-list',
},
{
'label': 'Transfers',
'url_name': 'accounts_dashboard:transfers-list',
},
{
'label': 'Deferred income report',
'url_name': 'accounts_dashboard:report-deferred-income',
},
{
'label': 'Profit/loss report',
'url_name': 'accounts_dashboard:report-profit-loss',
},
]
})
# wagtail
WAGTAIL_SITE_NAME = 'My Example Site'
# puput
PUPUT_AS_PLUGIN = True
# django-newesletter
NEWSLETTER_CONFIRM_EMAIL = False
NEWSLETTER_RICHTEXT_WIDGET = "tinymce.widgets.TinyMCE"
# Amount of seconds to wait between each email. Here 100ms is used.
NEWSLETTER_EMAIL_DELAY = 0.1
# Amount of seconds to wait between each batch. Here one minute is used.
NEWSLETTER_BATCH_DELAY = 60
# Number of emails in one batch
NEWSLETTER_BATCH_SIZE = 100
| StarcoderdataPython |
3361439 | # -*- coding: utf-8 -*-
import itertools
from prompt_smart_menu.helpers import InvalidArgError, Kwarg, NestedDict
from prompt_smart_menu.smart_menu import MenuNode
import pytest
def dummy(*args, **kwargs):
pass
class TestMenuNode:
_nest = {'prompt': {'toolkit', 'menu'}, 'exit': None}
def test_init_children_mismatch_type(self):
mismatch_1 = ['string', {'k': 'v'}]
mismatch_2 = [NestedDict, 'string']
node_1 = {'command': 'test', 'function': None, 'children': mismatch_1}
node_2 = {'command': 'test', 'function': None, 'children': mismatch_2}
with pytest.raises(TypeError):
MenuNode(**node_1)
with pytest.raises(TypeError):
MenuNode(**node_2)
def test_init_missing_kwargs(self):
no_command = {'function': None, 'children': [1]}
no_child = {'command': 'test', 'function': dummy}
no_func = {'command': 'test', 'children': [no_child]}
with pytest.raises(TypeError):
MenuNode(**no_command)
MenuNode(**no_func)
MenuNode(**no_child)
def test_init_children_none_no_function_raises(self):
node = {'command': 'test', 'function': None, 'children': None}
with pytest.raises(TypeError):
MenuNode(**node)
def test_init_children_none_with_function_successful(self):
node = {'command': 'test', 'function': dummy, 'children': None}
MenuNode(**node)
def test_init_children_empty_list_no_function_raises(self):
node = {'command': 'test', 'function': None, 'children': []}
with pytest.raises(TypeError):
MenuNode(**node)
def test_init_children_empty_list_with_function_successful(self):
node = {'command': 'test', 'function': dummy, 'children': []}
MenuNode(**node)
def test_init_children_nested_dict_no_function_raises(self):
nested_dict = NestedDict(self._nest)
node = {'command': 'test', 'function': None, 'children': nested_dict}
with pytest.raises(TypeError):
MenuNode(**node)
def test_init_children_nested_dict_with_function_successful(self):
nested_dict = NestedDict(self._nest)
node = {'command': 'test', 'function': dummy, 'children': nested_dict}
MenuNode(**node)
def test_init_children_list_string_no_function_raises(self):
node = {'command': 'test', 'function': None,
'children': ['prompt', 'menu']}
with pytest.raises(TypeError):
MenuNode(**node)
def test_init_children_list_string_with_function_successful(self):
node = {'command': 'test', 'function': dummy,
'children': ['prompt', 'menu']}
MenuNode(**node)
def test_init_children_list_dict_no_function_successful(self):
child = {'command': 'bob', 'function': dummy, 'children': None}
node = {'command': 'test', 'function': None, 'children': [child]}
MenuNode(**node)
def test_init_children_list_dict_with_function_raises(self):
child = {'command': 'bob', 'function': dummy, 'children': None}
node = {'command': 'test', 'function': dummy, 'children': [child]}
with pytest.raises(TypeError):
MenuNode(**node)
def test_init_duplicate_child_commands(self):
child_1 = {'command': 'bob', 'function': dummy, 'children': None}
child_2 = child_1
node = {'command': 'test', 'function': None,
'children': [child_1, child_2]}
with pytest.raises(TypeError):
MenuNode(**node)
def test_get_menu_children_none(self):
expected = {'test': None}
node_1 = {'command': 'test', 'function': dummy, 'children': None}
node_2 = {'command': 'test', 'function': dummy, 'children': []}
assert MenuNode(**node_1).get_menu() == expected
assert MenuNode(**node_2).get_menu() == expected
def test_get_menu_children_nested_dict(self):
expected = {'test': self._nest}
nested_dict = NestedDict(self._nest)
node = {'command': 'test', 'function': dummy, 'children': nested_dict}
assert MenuNode(**node).get_menu() == expected
def test_get_menu_children_list_strings(self):
expected = {'test': {'prompt', 'menu'}}
node = {'command': 'test', 'function': dummy,
'children': ['prompt', 'menu']}
assert MenuNode(**node).get_menu() == expected
def test_get_menu_children_list_nodes(self):
expected = {'test': {'prompt': {'toolkit', 'menu'}, 'exit': None}}
children = [{'command': 'prompt', 'function': dummy,
'children': ['toolkit', 'menu']},
{'command': 'exit', 'function': dummy, 'children': None}]
node = {'command': 'test', 'function': None, 'children': children}
assert MenuNode(**node).get_menu() == expected
class TestMenuNodeProcessArg:
def test_no_function_no_args(self):
child = {'command': 'child', 'function': dummy}
node = {'command': 'test', 'children': [child]}
menu_node = MenuNode(**node)
with pytest.raises(InvalidArgError):
menu_node.process_arg('')
def test_no_function_no_matching_child(self):
child = {'command': 'child', 'function': dummy}
node = {'command': 'test', 'children': [child]}
menu_node = MenuNode(**node)
with pytest.raises(InvalidArgError):
menu_node.process_arg('prompt')
def test_calls_function(self):
node = {'command': 'test', 'function': lambda: 42}
menu_node = MenuNode(**node)
assert menu_node.process_arg('') == 42
def test_calls_child_function(self):
child = {'command': 'child', 'function': lambda: 42}
node = {'command': 'test', 'children': [child]}
menu_node = MenuNode(**node)
assert menu_node.process_arg('child') == 42
class TestMenuNodeSplitKwargs:
def test_empty(self):
expected = ([], [])
assert MenuNode._split_kwargs([]) == expected
def test_only_args(self):
args = [1, 'test']
expected = (args, [])
assert MenuNode._split_kwargs(args) == expected
def test_only_kwargs(self):
kwargs = [Kwarg('test', 1), Kwarg('test', 2)]
expected = ([], kwargs)
assert MenuNode._split_kwargs(kwargs) == expected
def test_args_first(self):
args = [1, 'test']
kwargs = [Kwarg('test', 1), Kwarg('test', 2)]
expected = (args, kwargs)
assert MenuNode._split_kwargs([*args, *kwargs]) == expected
def test_kwargs_first(self):
args = [1, 'test']
kwargs = [Kwarg('test', 1), Kwarg('test', 2)]
with pytest.raises(SyntaxError):
MenuNode._split_kwargs([*kwargs, *args])
def test_mixed(self):
li = [1, Kwarg('test', 1), 'test']
with pytest.raises(SyntaxError):
MenuNode._split_kwargs(li)
def no_args_func():
pass
# # Can only test with python 3.8
# def positional_only(a, /):
# pass
def positional_or_keyword(b, c='c'):
pass
def var_positional(*d):
pass
def keyword_only(*, e, f='f'):
pass
def var_keyword(**g):
pass
# def validate_dummy(a, /, b, c='c', *d, e, f='f', **g):
# pass
def pack_args(letters: str, number_args: int):
args = [a for a in range(number_args)]
for k in letters:
args.append(Kwarg(k, k))
return args
def get_arg_combos(
letters: str,
number_args_min: int = 0,
number_args_max: int = 1
):
results = []
combinations = ['']
for r in range(1, len(letters) + 1):
for combo in itertools.combinations(letters, r):
combinations.append(''.join(combo))
for combo in combinations:
for number_args in range(number_args_min, number_args_max):
results.append((combo, number_args))
return results
class TestValidateFunctionArgs:
def template_test(self, func, letters, number_args):
args_packed = pack_args(letters, number_args)
args, kwarg_objects = MenuNode._split_kwargs(args_packed)
kwargs = {kw.key(): kw.value() for kw in kwarg_objects}
print(kwargs)
menu = {'command': 'test', 'function': func}
mn = MenuNode(**menu)
try:
mn._function(*args, **kwargs)
except TypeError:
with pytest.raises(InvalidArgError):
mn._validate_function_args(args_packed)
else:
# assert anything here?
mn._validate_function_args(args_packed)
# @pytest.mark.parametrize('letters,number_args',
# get_arg_combos('bcefg', 1,5))
# def test_validate_function_args(self, letters, number_args):
# self.template_test(validate_dummy, letters, number_args)
@pytest.mark.parametrize('letters,number_args', get_arg_combos('g', 0, 2))
def test_no_args_func(self, letters, number_args):
self.template_test(no_args_func, letters, number_args)
# # Can only test with python 3.8
# @pytest.mark.parametrize('letters,number_args', get_arg_combos('', 0, 3))
# def test_positional_only(self, letters, number_args):
# self.template_test(positional_only, letters, number_args)
@pytest.mark.parametrize('letters,number_args', get_arg_combos('bc', 0, 2))
def test_positional_or_keyword(self, letters, number_args):
self.template_test(positional_or_keyword, letters, number_args)
@pytest.mark.parametrize('letters,number_args', get_arg_combos('bc', 0, 2))
def test_var_positional(self, letters, number_args):
self.template_test(var_positional, letters, number_args)
@pytest.mark.parametrize('letters,number_args', get_arg_combos('ef', 0, 1))
def test_keyword_only(self, letters, number_args):
self.template_test(keyword_only, letters, number_args)
@pytest.mark.parametrize('letters,number_args', get_arg_combos('g', 0, 1))
def test_var_keyword(self, letters, number_args):
self.template_test(var_keyword, letters, number_args)
@pytest.mark.parametrize('func,exception',
[(var_keyword, InvalidArgError),
(keyword_only, InvalidArgError)])
def test_duplicate_keyword(self, func, exception):
menu = {'command': 'test', 'function': func}
mn = MenuNode(**menu)
kwargs = [Kwarg('e', 'prompt'), Kwarg('e', 'prompt')]
with pytest.raises(exception):
mn._validate_function_args(kwargs)
| StarcoderdataPython |
3359468 | <gh_stars>0
from flask import Flask, render_template, request
import numpy as np
import pandas as pd
import joblib as joblib
app = Flask(__name__)
@app.route('/a')
def test():
return "Flask is being used for Development today"
@app.route('/')
def home():
return render_template('home.html')
@app.route('/predict', methods=['GET', 'POST'])
def predict():
if request.method == 'POST':
try:
Time = float(request.form['Time'])
V1 = float(request.form['V1'])
V2 = float(request.form['V2'])
V3 = float(request.form['V3'])
V4 = float(request.form['V4'])
V5 = float(request.form['V5'])
V6 = float(request.form['V6'])
V7 = float(request.form['V7'])
V8 = float(request.form['V8'])
V9 = float(request.form['V9'])
V10 = float(request.form['V10'])
V11 = float(request.form['V11'])
V12 = float(request.form['V12'])
V13 = float(request.form['V13'])
V14 = float(request.form['V14'])
V15 = float(request.form['V15'])
V16 = float(request.form['V16'])
V17 = float(request.form['V17'])
V18 = float(request.form['V18'])
V19 = float(request.form['V19'])
V20 = float(request.form['V20'])
V21 = float(request.form['V21'])
V22 = float(request.form['V22'])
V23 = float(request.form['V23'])
V24 = float(request.form['V24'])
V25 = float(request.form['V25'])
V26 = float(request.form['V26'])
V27 = float(request.form['V27'])
V28 = float(request.form['V28'])
Amount = float(request.form['Amount'])
# Now we will create the list inorder to pass the value to the model
pred_args = [Time, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, \
V12, V13, V14, V15, V16, V17, V18, V19,V20, V21, V22,\
V23, V24, V25, V26, V27, V28, Amount]
pred_agrs_arr = np.array(pred_args)
pred_agrs_arr = pred_agrs_arr.reshape(1,-1)
ml_rdm_frt = open("Random_forest.pkl", "rb")
ml_model = joblib.load(ml_rdm_frt)
model_prediction = ml_model.predict(pred_agrs_arr)
model_prediction = int(model_prediction)
except ValueError:
return "Please check if the values are entered correctly"
return render_template('predict.html', prediction = model_prediction)
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
17016 | <filename>tests/test_get_value.py
#!/usr/bin/env python
from numpy.testing import assert_array_almost_equal, assert_array_less
import numpy as np
from heat import BmiHeat
def test_get_initial_value():
model = BmiHeat()
model.initialize()
z0 = model.get_value_ptr("plate_surface__temperature")
assert_array_less(z0, 1.0)
assert_array_less(0.0, z0)
def test_get_value_copy():
model = BmiHeat()
model.initialize()
dest0 = np.empty(model.get_grid_size(0), dtype=float)
dest1 = np.empty(model.get_grid_size(0), dtype=float)
z0 = model.get_value("plate_surface__temperature", dest0)
z1 = model.get_value("plate_surface__temperature", dest1)
assert z0 is not z1
assert_array_almost_equal(z0, z1)
def test_get_value_pointer():
model = BmiHeat()
model.initialize()
dest1 = np.empty(model.get_grid_size(0), dtype=float)
z0 = model.get_value_ptr("plate_surface__temperature")
z1 = model.get_value("plate_surface__temperature", dest1)
assert z0 is not z1
assert_array_almost_equal(z0.flatten(), z1)
for _ in range(10):
model.update()
assert z0 is model.get_value_ptr("plate_surface__temperature")
def test_get_value_at_indices():
model = BmiHeat()
model.initialize()
dest = np.empty(3, dtype=float)
z0 = model.get_value_ptr("plate_surface__temperature")
z1 = model.get_value_at_indices("plate_surface__temperature", dest, [0, 2, 4])
assert_array_almost_equal(z0.take((0, 2, 4)), z1)
def test_value_size():
model = BmiHeat()
model.initialize()
z = model.get_value_ptr("plate_surface__temperature")
assert model.get_grid_size(0) == z.size
def test_value_nbytes():
model = BmiHeat()
model.initialize()
z = model.get_value_ptr("plate_surface__temperature")
assert model.get_var_nbytes("plate_surface__temperature") == z.nbytes
| StarcoderdataPython |
108532 | import unittest, penmon as pm
class Test(unittest.TestCase):
def test_daylight_hours(self):
station = pm.Station(41.42, 109)
day = station.day_entry(135)
day.temp_min = 19.5
day.temp_max = 28
self.assertEqual(day.daylight_hours(), 14.3, "daylighth_hours")
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
159649 | import frameworks.tc_scikit.features.bag_of_words as bag_of_words
import frameworks.tc_scikit.features.character_embeddings as character_embeddings
import frameworks.tc_scikit.features.character_ngrams as character_ngrams
import frameworks.tc_scikit.features.dependency_distribution_spacy as dependency_distribution_spacy
import frameworks.tc_scikit.features.embedding_centroid as embedding_centroid
import frameworks.tc_scikit.features.long_word_count as long_word_count
import frameworks.tc_scikit.features.pos_distribution_spacy as pos_distribution_spacy
import frameworks.tc_scikit.features.sentiws_average_polarity_feature as sentiws_average_polarity_feature
import frameworks.tc_scikit.features.sentiws_polarity_bearing_tokens_feature as sentiws_polarity_bearing_tokens_feature
import frameworks.tc_scikit.features.sentiws_polarity_distribution as sentiws_polarity_distribution
import frameworks.tc_scikit.features.structural_features_spacy as structural_features_spacy
import frameworks.tc_scikit.features.textdepth_feature as textdepth_feature
import frameworks.tc_scikit.representations.stopwords as stopwords
from frameworks.tc_scikit.features.tfidf import build_tfidf
import frameworks.tc_scikit.features.lda_distribution as lda_distribution
STRATEGIES = {'unigram': [bag_of_words.build(ngram_range=(1, 1))],
'unigram_stopwords': [bag_of_words.build(ngram_range=(1, 1), stopwords=stopwords.german_stopwords_nltk())],
'unigram_lowercase': [bag_of_words.build(ngram_range=(1, 1), lowercase=True)],
'unigram_iwnlp': [bag_of_words.build(ngram_range=(1, 1), token_form='IWNLP_lemma')],
'unigram_iwnlp_lowercase': [bag_of_words.build(ngram_range=(1, 1), token_form='IWNLP_lemma', lowercase=True)],
'unigram_frequency_test': [bag_of_words.build(ngram_range=1, min_df=3, max_features=None,
stopwords=stopwords.german_stopwords_nltk())],
'character_ngrams': [character_ngrams.build(ngram_range=(3, 5), min_df=20)],
'n_unigram': [bag_of_words.build(ngram_range=(1, 1), normalize=True)],
'n_unigram_lowercase': [bag_of_words.build(ngram_range=(1, 1), lowercase=True, normalize=True)],
'n_unigram_iwnlp': [bag_of_words.build(ngram_range=(1, 1), token_form='IWNLP_lemma', normalize=True)],
'n_unigram_iwnlp_lowercase': [
bag_of_words.build(ngram_range=(1, 1), token_form='IWNLP_lemma', lowercase=True, normalize=True)],
'unigram_lowercase_tfidf': [
build_tfidf(ngram_range=(1, 1))],
'bigram': [bag_of_words.build(ngram_range=(2, 2))],
'bigram_lowercase': [bag_of_words.build(ngram_range=(2, 2), lowercase=True)],
'bigram_iwnlp': [bag_of_words.build(ngram_range=(2, 2), token_form='IWNLP_lemma')],
'bigram_iwnlp_lowercase': [bag_of_words.build(ngram_range=(2, 2), token_form='IWNLP_lemma', lowercase=True)],
'n_bigram': [bag_of_words.build(ngram_range=(2, 2), normalize=True)],
'n_bigram_lowercase': [bag_of_words.build(ngram_range=(2, 2), lowercase=True, normalize=True)],
'n_bigram_iwnlp': [bag_of_words.build(ngram_range=(2, 2), token_form='IWNLP_lemma', normalize=True)],
'n_bigram_iwnlp_lowercase': [
bag_of_words.build(ngram_range=(2, 2), token_form='IWNLP_lemma', lowercase=True, normalize=True)],
'unigram_bigram': [bag_of_words.build(ngram_range=(1, 1), feature_name='unigram'),
bag_of_words.build(ngram_range=(2, 2), feature_name='bigram')],
'dependency_distribution_spacy': [dependency_distribution_spacy.build()],
'structural_spacy': [structural_features_spacy.build()],
'structural_spacy_without_token_length': [structural_features_spacy.build(use_sentence_length=False)],
'sentiws_polarity': [sentiws_average_polarity_feature.build(),
sentiws_polarity_bearing_tokens_feature.build()],
'sentiws_distribution': [sentiws_polarity_distribution.build(bins='auto')],
'character_embeddings_centroid_100': [character_embeddings.build(embedding_length=100)],
'embedding_centroid_100': [embedding_centroid.build(embedding_length=100)],
'embedding_centroid_stopwords_100': [
embedding_centroid.build(embedding_length=100, stopwords=stopwords.german_stopwords_nltk())],
'n_unigram+pos_distribution+embedding_centroid': [
bag_of_words.build(ngram_range=(1, 1), normalize=True, stopwords=stopwords.german_stopwords_nltk()),
embedding_centroid.build(embedding_length=100, stopwords=stopwords.german_stopwords_nltk())],
'pos_distribution_spacy': [pos_distribution_spacy.build()],
'pos_distribution_spacy_universal': [pos_distribution_spacy.build(coarse_grained=False)],
'textdepth_feature': [textdepth_feature.build()],
'lda_distribution': [lda_distribution.build()],
'n_unigram+lda_distribution': [
bag_of_words.build(ngram_range=(1, 1), normalize=True, stopwords=stopwords.german_stopwords_nltk()),
lda_distribution.build()],
'n_unigram_shape': [bag_of_words.build(ngram_range=(1, 1), token_form='shape', normalize=True)],
'n_unigram_shape_lemma': [bag_of_words.build(ngram_range=(1, 1), token_form='shape_lemma', normalize=True)],
'long_word_count': [long_word_count.build(length=3)],
}
| StarcoderdataPython |
4808984 | <reponame>dysposin/python-ircbot
#!/usr/bin/python3
from vote import vote
class Elections:
def __init__(self):
self.elections = {}
def add_election(self, name):
self.elections[name] = vote.Vote(name)
def close_election(self, name):
self.elections[name].close_voting()
def vote(self, name, points, voter_id):
self.elections[name].cast_vote(points, voter_id)
def results(self, name=None):
if name==None:
for name, vote in self.elections.items():
return name, vote.list_results()
else:
return name, self.elections[name].list_results() | StarcoderdataPython |
3273249 | <reponame>acc-cosc-1336/cosc-1336-spring-2018-artgonzalezacc<gh_stars>0
import unittest
from src.homework.homework3 import sum_odd_numbers
from src.homework.homework3 import list_of_even_numbers
class TestHomework3(unittest.TestCase):
def test_sum_odd_numbers_w_value_11(self):
self.assertEqual(36, sum_odd_numbers(11))
def test_sum_odd_numbers_w_value_20(self):
self.assertEqual(100, sum_odd_numbers(20))
def test_sum_odd_numbers_w_value_100(self):
self.assertEqual(2500, sum_odd_numbers(100))
def test_list_even_numbers_w_value_1(self):
self.assertEqual('', list_of_even_numbers(1))
def test_list_even_numbers_w_value_11(self):
self.assertEqual('2,4,6,8,10,', list_of_even_numbers(11))
def test_list_even_numbers_w_value_10(self):
self.assertEqual('2,4,6,8,10,', list_of_even_numbers(10))
def test_list_even_numbers_w_value_20(self):
self.assertEqual('2,4,6,8,10,12,14,16,18,20,', list_of_even_numbers(20))
| StarcoderdataPython |
191409 | <reponame>dcdanko/AriesK<gh_stars>0
import sqlite3
from os.path import join, dirname
from unittest import TestCase
from ariesk.ram import RotatingRamifier
from ariesk.grid_builder import GridCoverBuilder
from ariesk.dbs.kmer_db import GridCoverDB
from ariesk.pre_db import PreDB
from ariesk.utils.parallel_build import coordinate_parallel_build
KMER_TABLE = join(dirname(__file__), 'small_31mer_table.csv')
KMER_FASTA = join(dirname(__file__), 'small_fasta.fa')
KMER_ROTATION = join(dirname(__file__), '../data/rotation_minikraken.json')
GRID_COVER = join(dirname(__file__), 'small_grid_cover.sqlite')
PRE_DB = join(dirname(__file__), 'small_pre_grid.sqlite')
KMER_31 = 'AATACGTCCGGAGTATCGACGCACACATGGT'
class TestBuildGridCover(TestCase):
def test_build_grid_cover(self):
ramifier = RotatingRamifier.from_file(4, KMER_ROTATION)
db = GridCoverDB(sqlite3.connect(':memory:'), ramifier=ramifier, box_side_len=0.5)
grid = GridCoverBuilder(db)
grid.fast_add_kmers_from_file(KMER_TABLE)
grid.commit()
n_centers = grid.db.centroids().shape[0]
n_points = len(grid.db.get_kmers())
self.assertGreater(n_centers, 0)
self.assertLess(n_centers, 100)
self.assertEqual(n_points, 100)
def test_fast_build_grid_cover(self):
ramifier = RotatingRamifier.from_file(4, KMER_ROTATION)
db = GridCoverDB(sqlite3.connect(':memory:'), ramifier=ramifier, box_side_len=0.5)
grid = GridCoverBuilder(db)
grid.fast_add_kmers_from_file(KMER_TABLE)
grid.commit()
n_centers = grid.db.centroids().shape[0]
n_points = len(grid.db.get_kmers())
self.assertGreater(n_centers, 0)
self.assertLess(n_centers, 100)
self.assertEqual(n_points, 100)
def test_build_grid_cover_from_fasta(self):
ramifier = RotatingRamifier.from_file(4, KMER_ROTATION)
db = GridCoverDB(sqlite3.connect(':memory:'), ramifier=ramifier, box_side_len=0.5)
grid = GridCoverBuilder(db)
grid.fast_add_kmers_from_fasta(KMER_FASTA)
grid.commit()
n_centers = grid.db.centroids().shape[0]
n_points = len(grid.db.get_kmers())
self.assertGreater(n_centers, 0)
self.assertLess(n_centers, 98)
self.assertEqual(n_points, 98)
def test_build_grid_cover_from_pre(self):
predb = PreDB.load_from_filepath(PRE_DB)
grid = GridCoverBuilder.build_from_predb(':memory:', predb, 0.5)
grid.db._build_indices()
grid.commit()
n_centers = grid.db.centroids().shape[0]
n_points = len(grid.db.get_kmers())
self.assertGreater(n_centers, 0)
self.assertLess(n_centers, 98)
self.assertEqual(n_points, 98)
''' Test is slow, not really that useful
def test_build_parallel(self):
out_name = 'temp.test_parallel_build.sqlite'
coordinate_parallel_build(out_name, KMER_TABLE, KMER_ROTATION, 2, 0, 100, 0.5, 8, chunk_size=25)
db = GridCoverDB.load_from_filepath(out_name)
n_centers = db.centroids().shape[0]
n_points = len(db.get_kmers())
self.assertGreater(n_centers, 0)
self.assertLess(n_centers, 100)
self.assertEqual(n_points, 100)
os.remove(out_name)
'''
| StarcoderdataPython |
1731211 | <reponame>antopen/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SpiDetectionDetail(object):
def __init__(self):
self._code = None
self._content = None
self._data_id = None
self._details = None
self._label = None
self._msg = None
self._rate = None
self._scene = None
self._suggestion = None
self._task_id = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def content(self):
return self._content
@content.setter
def content(self, value):
self._content = value
@property
def data_id(self):
return self._data_id
@data_id.setter
def data_id(self, value):
self._data_id = value
@property
def details(self):
return self._details
@details.setter
def details(self, value):
if isinstance(value, list):
self._details = list()
for i in value:
self._details.append(i)
@property
def label(self):
return self._label
@label.setter
def label(self, value):
self._label = value
@property
def msg(self):
return self._msg
@msg.setter
def msg(self, value):
self._msg = value
@property
def rate(self):
return self._rate
@rate.setter
def rate(self, value):
self._rate = value
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, value):
self._scene = value
@property
def suggestion(self):
return self._suggestion
@suggestion.setter
def suggestion(self, value):
self._suggestion = value
@property
def task_id(self):
return self._task_id
@task_id.setter
def task_id(self, value):
self._task_id = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.content:
if hasattr(self.content, 'to_alipay_dict'):
params['content'] = self.content.to_alipay_dict()
else:
params['content'] = self.content
if self.data_id:
if hasattr(self.data_id, 'to_alipay_dict'):
params['data_id'] = self.data_id.to_alipay_dict()
else:
params['data_id'] = self.data_id
if self.details:
if isinstance(self.details, list):
for i in range(0, len(self.details)):
element = self.details[i]
if hasattr(element, 'to_alipay_dict'):
self.details[i] = element.to_alipay_dict()
if hasattr(self.details, 'to_alipay_dict'):
params['details'] = self.details.to_alipay_dict()
else:
params['details'] = self.details
if self.label:
if hasattr(self.label, 'to_alipay_dict'):
params['label'] = self.label.to_alipay_dict()
else:
params['label'] = self.label
if self.msg:
if hasattr(self.msg, 'to_alipay_dict'):
params['msg'] = self.msg.to_alipay_dict()
else:
params['msg'] = self.msg
if self.rate:
if hasattr(self.rate, 'to_alipay_dict'):
params['rate'] = self.rate.to_alipay_dict()
else:
params['rate'] = self.rate
if self.scene:
if hasattr(self.scene, 'to_alipay_dict'):
params['scene'] = self.scene.to_alipay_dict()
else:
params['scene'] = self.scene
if self.suggestion:
if hasattr(self.suggestion, 'to_alipay_dict'):
params['suggestion'] = self.suggestion.to_alipay_dict()
else:
params['suggestion'] = self.suggestion
if self.task_id:
if hasattr(self.task_id, 'to_alipay_dict'):
params['task_id'] = self.task_id.to_alipay_dict()
else:
params['task_id'] = self.task_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SpiDetectionDetail()
if 'code' in d:
o.code = d['code']
if 'content' in d:
o.content = d['content']
if 'data_id' in d:
o.data_id = d['data_id']
if 'details' in d:
o.details = d['details']
if 'label' in d:
o.label = d['label']
if 'msg' in d:
o.msg = d['msg']
if 'rate' in d:
o.rate = d['rate']
if 'scene' in d:
o.scene = d['scene']
if 'suggestion' in d:
o.suggestion = d['suggestion']
if 'task_id' in d:
o.task_id = d['task_id']
return o
| StarcoderdataPython |
1746182 | '''Escreva um programa que lê um inteiro N e uma seqüência de N números inteiros, e imprime a soma dos números pares da
seqüência lida.'''
soma = 0
n = int(input('Quantos números deseja ler? '))
for valores in range(1,n+1):
valores = int(input(f'digite o {valores}º valor: '))
if valores % 2 == 0:
soma += valores
print(f'Soma dos Pares {soma}') | StarcoderdataPython |
4834024 | <reponame>softformance/django-social-photostream<filename>tests/urls.py
# -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from django_social_photostream.urls import urlpatterns as django_social_photostream_urls
urlpatterns = [
url(r'^', include(django_social_photostream_urls, namespace='django_social_photostream')),
]
| StarcoderdataPython |
3322193 | import numpy as np
from c4.evaldiff import evaldiff
from c4.engine.base import Engine
from c4.evaluate import Evaluator, INF
class GreedyEngine(Engine):
def __init__(self):
self._evaluator = Evaluator()
self.evaluate = self._evaluator.evaluate
def choose(self, board):
moves = board.moves()
m = moves[0]
moves = moves[1:]
bestmove = m
bestscore = -self.evaluate(board.move(m))
for m in moves:
score = -self.evaluate(board.move(m))
if score > bestscore:
bestmove = m
bestscore = score
print('Bestscore:', bestscore)
return bestmove
def __str__(self):
return 'Greedy'
class WeightedGreedyEngine(Engine):
"""Same as GreedyEngine but move randomly using scores as weights
"""
def __init__(self, verbose=True):
self._evaluator = Evaluator()
self._verbose = verbose
self.evaluate = self._evaluator.evaluate
def choose(self, board):
moves = board.moves()
# forced move?
if len(moves) < 2:
return moves[0]
# winning move or threat blocking?
scores = [evaldiff(board, m) for m in moves]
if max(scores) >= INF - 1:
return max(zip(scores, moves))[1]
weights = np.array(scores, dtype=float) + 1
if weights.sum() == 0:
weights = np.array([1 / len(moves)] * len(moves), dtype=float)
else:
weights /= weights.sum()
selected_move = np.random.choice(moves, p=weights)
if self._verbose:
selected_score = scores[list(moves).index(selected_move)]
print('Selected move %d with score %s' % (selected_move,
selected_score))
return selected_move
def __str__(self):
return 'Weighted Greedy'
| StarcoderdataPython |
1642689 | from django.urls import path
from . import views
urlpatterns = [
path('get_products', views.GetProductsInfo.as_view(), name="get_products"),
path('get_orders', views.GetOrdersInfo.as_view(), name="get_orders"),
path('get_customers', views.GetCustomersInfo.as_view(), name="get_customers"),
path('get_all_info', views.GetAllInfo.as_view(), name="get_all_info"),
path('check_status', views.CheckTaskStatus.as_view(), name="check_status"),
] | StarcoderdataPython |
13474 | ##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
Sra.py - Methods for dealing with short read archive files
==========================================================
Utility functions for dealing with :term:`SRA` formatted files from
the Short Read Archive.
Requirements:
* fastq-dump >= 2.1.7
Code
----
'''
import os
import glob
import tempfile
import shutil
import CGAT.Experiment as E
import CGAT.Fastq as Fastq
import CGAT.IOTools as IOTools
def peek(sra, outdir=None):
"""return the full file names for all files which will be extracted
Parameters
----------
outdir : path
perform extraction in outdir. If outdir is None, the extraction
will take place in a temporary directory, which will be deleted
afterwards.
Returns
-------
files : list
A list of fastq formatted files that are contained in the archive.
format : string
The quality score format in the :term:`fastq` formatted files.
"""
if outdir is None:
workdir = tempfile.mkdtemp()
else:
workdir = outdir
# --split-files creates files called prefix_#.fastq.gz,
# where # is the read number.
# If file cotains paired end data:
# output = prefix_1.fastq.gz, prefix_2.fastq.gz
# *special case: unpaired reads in a paired end --> prefix.fastq.gz
# *special case: if paired reads are stored in a single read,
# fastq-dump will split. There might be a joining
# sequence. The output would thus be:
# prefix_1.fastq.gz, prefix_2.fastq.gz, prefix_3.fastq.gz
# You want files 1 and 3.
E.run("""fastq-dump --split-files --gzip -X 1000
--outdir %(workdir)s %(sra)s""" % locals())
f = sorted(glob.glob(os.path.join(workdir, "*.fastq.gz")))
ff = [os.path.basename(x) for x in f]
if len(f) == 1:
# sra file contains one read: output = prefix.fastq.gz
pass
elif len(f) == 2:
# sra file contains read pairs:
# output = prefix_1.fastq.gz, prefix_2.fastq.gz
assert ff[0].endswith(
"_1.fastq.gz") and ff[1].endswith("_2.fastq.gz")
elif len(f) == 3:
if ff[2].endswith("_3.fastq.gz"):
f = glob.glob(os.path.join(workdir, "*_[13].fastq.gz"))
else:
f = glob.glob(os.path.join(workdir, "*_[13].fastq.gz"))
# check format of fastqs in .sra
fastq_format = Fastq.guessFormat(IOTools.openFile(f[0], "r"), raises=False)
fastq_datatype = Fastq.guessDataType(IOTools.openFile(f[0], "r"), raises=True)
if outdir is None:
shutil.rmtree(workdir)
return f, fastq_format, fastq_datatype
def extract(sra, outdir, tool="fastq-dump"):
"""return statement for extracting the SRA file in `outdir`.
possible tools are fastq-dump and abi-dump. Use abi-dump for colorspace"""
if tool == "fastq-dump":
tool += " --split-files"
statement = """%(tool)s --gzip --outdir %(outdir)s %(sra)s""" % locals()
return statement
| StarcoderdataPython |
3363614 | <reponame>pyvec/arca<filename>arca/__init__.py
from ._arca import Arca
from .backend import BaseBackend, VenvBackend, DockerBackend, CurrentEnvironmentBackend, VagrantBackend
from .result import Result
from .task import Task
__all__ = ["Arca", "BaseBackend", "VenvBackend", "DockerBackend", "Result", "Task", "CurrentEnvironmentBackend",
"VagrantBackend"]
__version__ = "0.3.3"
| StarcoderdataPython |
46325 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import rospy
import message_filters
from std_msgs.msg import Int32, Float32
from pololu_drv8835_rpi import motors
rospy.init_node('message_sync', anonymous=False)
speed_desired = 0.5 # desired wheel speed in rpm
angle_desired = 0.0 # desired angle - 0
k_p_angle = 4*480.0/90.0 # propotional gain for angle control
k_i_angle = k_p_angle/4. # integral gain for angle control
k_d_angle = 1 # derivatibe gain for angle control
k_p_speed = 15 # proportional gain for speed control (60)
k_i_speed = 35 # integral gain for speed control(30)
def drive_motor(speed): # send speed command to motor
max_speed = 380
if speed > max_speed:
drive_speed = max_speed
elif speed < -max_speed:
drive_speed = -max_speed
else:
drive_speed = round(speed)
motors.motor1.setSpeed(int(drive_speed))
return drive_speed
def PID_control(IMU_message,Encoder_message):
global current_wheel_speed
global current_imu_angle
global speed_error_cum
global angle_error_cum
global time_current
current_wheel_speed = Encoder_message.data
angle_prev = current_imu_angle
current_imu_angle = IMU_message.data
#### time update
time_old = time_current # set previous time reading
time_current = rospy.get_rostime() # set current time reading
dt = time_current - time_old # time step
# P
speed_error = speed_desired - current_wheel_speed
# I
speed_error_cum += speed_error * dt
# Effort
# not sure what speed_direction_comp is used for
angle_desired = 1 * (k_p_speed * speed_error + k_i_speed * speed_error_cum)
if current_imu_angle <= 90 and current_imu_angle >= -90:
# P
angle_error = angle_desired - current_imu_angle
# I
angle_error_cum += angle_error*dt
# D
angle_diff = (angle_prev - current_imu_angle)/dt
# Effort
motor_output = -1*(k_p_angle*angle_error + k_i_angle*angle_error_cum + k_d_angle*angle_diff)
drive_motor(motor_output)
def message_sync():
global speed_error_cum
global angle_error_cum
global time_current
speed_error_cum = 0.0
angle_error_cum = 0.0
time_current = rospy.get_rostime()
IMU_message = message_filters.Subscriber('/IMU_angle', Float32)
Encoder_message = message_filters.Subscriber('/Encoder_data', Float32)
sync = message_filters.ApproximateTimeSynchronizer([IMU_message,Encoder_message],queue_size = 1,slop = 0.05) #what happens if queue size is not one and what should the slop be
sync.registerCallback(PID_control)
rospy.spin()
if __name__ == "__main__":
try:
message_sync()
except rospy.ROSInterruptException:
pass | StarcoderdataPython |
1703819 | <reponame>SharadRawat/AV-Robo
#!/usr/bin/python
import numpy as np
class MotorController(object):
def __init__(self, max_speed, max_omega):
# These params are to be tuned.
self.kp = 3
self.ka = 8
self.kb = 0
self.max_speed = max_speed
self.max_omega = max_omega
def compute_vel(self, state, goal):
delx = goal[0] - state[0]
dely = goal[1] - state[1]
theta = state[2]
p = np.sqrt((delx ** 2) + (dely ** 2))
alpha = -theta + np.arctan2(dely, delx)
beta = -theta - alpha
v = p * self.kp
omega = (alpha * self.ka) + (beta * self.kb)
if np.all(v > self.max_speed):
v = self.max_speed
if np.all(omega > self.max_omega):
omega = self.max_omega
if np.all(p < 0.15):
done = True
else:
done = False
vw = (v, omega, done)
return vw
| StarcoderdataPython |
49375 | #!python3.6
import math
import MusicTheory.NaturalTone
import MusicTheory.Accidental
#NaturalToneに変化記号+,-を付与した値や名前を返す
class ToneAccidentaler:
def __init__(self):
self.__NatulasTone = MusicTheory.NaturalTone.NaturalTone()
self.__Accidental = MusicTheory.Accidental.Accidental()
# tone: C+,B-のような形式。G++など複数の変化記号も付与できる。
# 1文字目: NaturalTone
# 2文字目以降: 任意。Accidentalの文字。3文字目以降は2文字目が連続したもの
# pitch: 音高。-1〜9の整数値。
def ToValue(self, tone:str, pitch:int=4) -> int:
if pitch < -1 or 9 < pitch: raise Exception(f'pitchは-1〜9までの整数値にしてください。: {pitch}')
return self.CycleTone(
self.__NatulasTone.ToValue(tone[0]) + sum([self.__Accidental.ToValue(a) for a in tone[1:]]),
pitch)
# ToneとPitchの算出
# Toneが0〜11の範囲を超えたとき、Pitchを変化させてToneは0〜11にする。
def CycleTone(self, toneValue, pitch):
# value: tone, pitch, pitchName
# -12 0, -2 B2
# -11 1, -1 C3
# - 2: 10, -1 A#3
# - 1: 11, -1 B3
# 0: 0, 0 C4
# 1: 1, 0 C#4
# 11: 11, 0 B4
# 12: 0, +1 C5
# if 0 <= toneValue: return toneValue % 12, pitch + (toneValue // 12)
# elif toneValue < 0: return (12 + toneValue) % 12, pitch + (toneValue // 12)
p = pitch + (toneValue // 12)
if 0 <= toneValue:
if 9 < p: raise Exception(f'pitchの最大値9を超えてしまいます。9以下になるようにしてください。')
return toneValue % 12, p
elif toneValue < 0:
if p < -1: raise Exception(f'pitchの最小値-1を超えてしまいます。-1以上になるようにしてください。')
return (12 + toneValue) % 12, p
| StarcoderdataPython |
1625249 | <reponame>joshiaj7/CodingChallenges
class Solution:
def findWords(self, words: List[str]) -> List[str]:
ans = []
truth = {
'q': 1,
'w': 1,
'e': 1,
'r': 1,
't': 1,
'y': 1,
'u': 1,
'i': 1,
'o': 1,
'p': 1,
'a': 2,
's': 2,
'd': 2,
'f': 2,
'g': 2,
'h': 2,
'j': 2,
'k': 2,
'l': 2,
'z': 3,
'x': 3,
'c': 3,
'v': 3,
'b': 3,
'n': 3,
'm': 3,
}
for item in words:
oneline = True
check = item.lower()
line = truth[check[0]]
for i in range(1, len(check)):
if truth[check[i]] != line:
oneline = False
if oneline:
ans.append(item)
return ans
| StarcoderdataPython |
3336342 | <reponame>HsOjo/OjoPyADB<gh_stars>1-10
import re
from pyadb import common
from pyadb.utils import ShellLib
from .sub_command import *
class ADB(ShellLib):
MODE_BOOTLOADER = 'bootloader'
MODE_RECOVERY = 'recovery'
MODE_SIDELOAD = 'sideload'
MODE_SIDELOAD_AUTO_REBOOT = 'sideload-auto-reboot'
STATE_DEVICE = 'device'
STATE_BOOTLOADER = 'bootloader'
STATE_OFFLINE = 'offline'
def __init__(self, path=None):
if path is None:
[_, path, _] = common.execute(['which', 'adb'])
if path == '':
raise FileNotFoundError("Couldn't find ADB.")
super().__init__(path)
self._current_sn = None
self._forward = Forward(self)
self._reverse = Reverse(self)
self._logcat = Logcat(self)
@property
def devices(self):
out = self.execute_out('devices')
devices = {}
for line in out.splitlines()[1:]:
device = re.match(r'(?P<device>.+)\s+(?P<state>.+)', line)
if device is not None:
device = device.groupdict()
devices[device['device']] = device['state']
return devices
@property
def version(self):
out = self.execute_out('version')
version = re.match(r'Android Debug Bridge version (?P<adb_version>.+)\nVersion (?P<sdk_version>.+)', out)
if version is not None:
version = version.groupdict()
return version
@property
def _device_args(self):
args = []
if self._current_sn is not None:
args += ['-s', self._current_sn]
return args
@property
def current_sn(self):
return self._current_sn
def set_current_sn(self, sn):
if sn in self.devices:
self._current_sn = sn
def device_execute(self, *args, **kwargs):
p_args = self._device_args + list(args)
return self.execute(*p_args, **kwargs)
def device_execute_out(self, *args, **kwargs):
p_args = self._device_args + list(args)
return self.execute_out(*p_args, **kwargs)
def connect(self, host, port=None):
if port is not None:
target = '%s:%s' % (host, port)
else:
target = host
out = self.execute_out('connect', target)
return 'connected' in out
def disconnect(self, host, port=None):
if port is not None:
target = '%s:%s' % (host, port)
else:
target = host
out = self.execute_out('disconnect', target)
return 'disconnected' in out
def reconnect(self, host, port=None):
if port is not None:
target = '%s:%s' % (host, port)
else:
target = host
self.execute('reconnect', target)
def push(self, *local, remote='/sdcard/', sync: bool = False):
out = self.device_execute_out('push', *local, remote, '--sync' if sync else None)
result = re.findall(r'(\d+) files? pushed', out)
if len(result) == 1:
[num] = result
return int(num) == len(local)
return False
def pull(self, *remote, local='./', preserve: bool = True):
out = self.device_execute_out('pull', *remote, local, '-a' if preserve else None)
result = re.findall(r'(\d+) files? pulled', out)
if len(result) == 1:
[num] = result
return int(num) == len(local)
return False
def shell(self, *command, escape: str = None, no_stdin=False, disable_pty_alloc=False, force_pty_alloc=False,
disable_exec_separation=False, **kwargs):
return self.device_execute('shell',
'-e %s' % escape if escape is not None else None,
'-n' if no_stdin else None,
'-T' if disable_pty_alloc else None,
'-t' if force_pty_alloc else None,
'-x' if disable_exec_separation else None,
*command, **kwargs)
def exec_out(self, *command, **kwargs):
return self.device_execute('exec-out', *command, **kwargs)
def install(self, package: str):
return 'Success' in self.device_execute_out(
'install', package,
)
def uninstall(self, package: str, keep_data=False):
return 'Success' in self.device_execute_out(
'uninstall', package,
'-k' if keep_data else None
)
def reboot(self, mode: str = ''):
return self.device_execute('reboot', mode, timeout=1)
def tcpip(self, port: int = 5555):
return self.device_execute('tcpip', port)
def usb(self):
return self.device_execute('usb')
def start_server(self):
return self.execute('start-server')
def kill_server(self):
return self.execute('kill-server')
def root(self):
return self.device_execute('root')
def unroot(self):
return self.device_execute('unroot')
def sideload(self, ota_package: str):
return self.device_execute('sideload', ota_package)
def get_state(self):
return self.device_execute_out('get-state')
def copy(self, sn=None):
obj = self.__class__(path=self.path)
obj._current_sn = self._current_sn if sn is None else sn
return obj
@property
def forward(self):
return self._forward
@property
def reverse(self):
return self._reverse
@property
def logcat(self):
return self._logcat
| StarcoderdataPython |
16938 | from ubuntui.utils import Padding
from ubuntui.widgets.hr import HR
from conjureup.app_config import app
from conjureup.ui.views.base import BaseView, SchemaFormView
from conjureup.ui.widgets.selectors import MenuSelectButtonList
class NewCredentialView(SchemaFormView):
title = "New Credential Creation"
def __init__(self, *args, **kwargs):
cloud_type = app.provider.cloud_type.upper()
self.subtitle = "Enter your {} credentials".format(cloud_type)
super().__init__(*args, **kwargs)
class CredentialPickerView(BaseView):
title = "Choose a Credential"
subtitle = "Please select an existing credential, " \
"or choose to add a new one."
footer = 'Please press [ENTER] on highlighted credential to proceed.'
def __init__(self, credentials, default, submit_cb, back_cb):
self.credentials = credentials
self.default = default
self.submit_cb = submit_cb
self.prev_screen = back_cb
super().__init__()
def build_widget(self):
widget = MenuSelectButtonList(self.credentials, self.default)
widget.append(Padding.line_break(""))
widget.append(HR())
widget.append_option("Add a new credential", None)
return widget
def submit(self):
self.submit_cb(self.widget.selected)
| StarcoderdataPython |
1674992 | <gh_stars>1-10
#!/usr/bin/env python
"""
WordAPI.py
Copyright 2014 Wordnik, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
import sys
import os
from .models import *
class WordApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getExamples(self, word, **kwargs):
"""Returns examples for a word
Args:
word, str: Word to return examples for (required)
includeDuplicates, str: Show duplicate examples from different sources (optional)
useCanonical, str: If true will try to return the correct word root ('cats' -> 'cat'). If false returns exactly what was requested. (optional)
skip, int: Results to skip (optional)
limit, int: Maximum number of results to return (optional)
Returns: ExampleSearchResults
"""
allParams = ['word', 'includeDuplicates', 'useCanonical', 'skip', 'limit']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getExamples" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/examples'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('includeDuplicates' in params):
queryParams['includeDuplicates'] = self.apiClient.toPathValue(params['includeDuplicates'])
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('skip' in params):
queryParams['skip'] = self.apiClient.toPathValue(params['skip'])
if ('limit' in params):
queryParams['limit'] = self.apiClient.toPathValue(params['limit'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'ExampleSearchResults')
return responseObject
def getWord(self, word, **kwargs):
"""Given a word as a string, returns the WordObject that represents it
Args:
word, str: String value of WordObject to return (required)
useCanonical, str: If true will try to return the correct word root ('cats' -> 'cat'). If false returns exactly what was requested. (optional)
includeSuggestions, str: Return suggestions (for correct spelling, case variants, etc.) (optional)
Returns: WordObject
"""
allParams = ['word', 'useCanonical', 'includeSuggestions']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getWord" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('includeSuggestions' in params):
queryParams['includeSuggestions'] = self.apiClient.toPathValue(params['includeSuggestions'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'WordObject')
return responseObject
def getDefinitions(self, word, **kwargs):
"""Return definitions for a word
Args:
word, str: Word to return definitions for (required)
partOfSpeech, str: CSV list of part-of-speech types (optional)
sourceDictionaries, str: Source dictionary to return definitions from. If 'all' is received, results are returned from all sources. If multiple values are received (e.g. 'century,wiktionary'), results are returned from the first specified dictionary that has definitions. If left blank, results are returned from the first dictionary that has definitions. By default, dictionaries are searched in this order: ahd, wiktionary, webster, century, wordnet (optional)
limit, int: Maximum number of results to return (optional)
includeRelated, str: Return related words with definitions (optional)
useCanonical, str: If true will try to return the correct word root ('cats' -> 'cat'). If false returns exactly what was requested. (optional)
includeTags, str: Return a closed set of XML tags in response (optional)
Returns: list[Definition]
"""
allParams = ['word', 'partOfSpeech', 'sourceDictionaries', 'limit', 'includeRelated', 'useCanonical', 'includeTags']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getDefinitions" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/definitions'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('limit' in params):
queryParams['limit'] = self.apiClient.toPathValue(params['limit'])
if ('partOfSpeech' in params):
queryParams['partOfSpeech'] = self.apiClient.toPathValue(params['partOfSpeech'])
if ('includeRelated' in params):
queryParams['includeRelated'] = self.apiClient.toPathValue(params['includeRelated'])
if ('sourceDictionaries' in params):
queryParams['sourceDictionaries'] = self.apiClient.toPathValue(params['sourceDictionaries'])
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('includeTags' in params):
queryParams['includeTags'] = self.apiClient.toPathValue(params['includeTags'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'list[Definition]')
return responseObject
def getTopExample(self, word, **kwargs):
"""Returns a top example for a word
Args:
word, str: Word to fetch examples for (required)
useCanonical, str: If true will try to return the correct word root ('cats' -> 'cat'). If false returns exactly what was requested. (optional)
Returns: Example
"""
allParams = ['word', 'useCanonical']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getTopExample" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/topExample'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'Example')
return responseObject
def getRelatedWords(self, word, **kwargs):
"""Given a word as a string, returns relationships from the Word Graph
Args:
word, str: Word to fetch relationships for (required)
relationshipTypes, str: Limits the total results per type of relationship type (optional)
useCanonical, str: If true will try to return the correct word root ('cats' -> 'cat'). If false returns exactly what was requested. (optional)
limitPerRelationshipType, int: Restrict to the supplied relatinship types (optional)
Returns: list[Related]
"""
allParams = ['word', 'relationshipTypes', 'useCanonical', 'limitPerRelationshipType']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getRelatedWords" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/relatedWords'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('relationshipTypes' in params):
queryParams['relationshipTypes'] = self.apiClient.toPathValue(params['relationshipTypes'])
if ('limitPerRelationshipType' in params):
queryParams['limitPerRelationshipType'] = self.apiClient.toPathValue(params['limitPerRelationshipType'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'list[Related]')
return responseObject
def getTextPronunciations(self, word, **kwargs):
"""Returns text pronunciations for a given word
Args:
word, str: Word to get pronunciations for (required)
sourceDictionary, str: Get from a single dictionary (optional)
typeFormat, str: Text pronunciation type (optional)
useCanonical, str: If true will try to return a correct word root ('cats' -> 'cat'). If false returns exactly what was requested. (optional)
limit, int: Maximum number of results to return (optional)
Returns: list[TextPron]
"""
allParams = ['word', 'sourceDictionary', 'typeFormat', 'useCanonical', 'limit']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getTextPronunciations" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/pronunciations'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('sourceDictionary' in params):
queryParams['sourceDictionary'] = self.apiClient.toPathValue(params['sourceDictionary'])
if ('typeFormat' in params):
queryParams['typeFormat'] = self.apiClient.toPathValue(params['typeFormat'])
if ('limit' in params):
queryParams['limit'] = self.apiClient.toPathValue(params['limit'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'list[TextPron]')
return responseObject
def getHyphenation(self, word, **kwargs):
"""Returns syllable information for a word
Args:
word, str: Word to get syllables for (required)
sourceDictionary, str: Get from a single dictionary. Valid options: ahd, century, wiktionary, webster, and wordnet. (optional)
useCanonical, str: If true will try to return a correct word root ('cats' -> 'cat'). If false returns exactly what was requested. (optional)
limit, int: Maximum number of results to return (optional)
Returns: list[Syllable]
"""
allParams = ['word', 'sourceDictionary', 'useCanonical', 'limit']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getHyphenation" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/hyphenation'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('sourceDictionary' in params):
queryParams['sourceDictionary'] = self.apiClient.toPathValue(params['sourceDictionary'])
if ('limit' in params):
queryParams['limit'] = self.apiClient.toPathValue(params['limit'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'list[Syllable]')
return responseObject
def getWordFrequency(self, word, **kwargs):
"""Returns word usage over time
Args:
word, str: Word to return (required)
useCanonical, str: If true will try to return the correct word root ('cats' -> 'cat'). If false returns exactly what was requested. (optional)
startYear, int: Starting Year (optional)
endYear, int: Ending Year (optional)
Returns: FrequencySummary
"""
allParams = ['word', 'useCanonical', 'startYear', 'endYear']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getWordFrequency" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/frequency'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('startYear' in params):
queryParams['startYear'] = self.apiClient.toPathValue(params['startYear'])
if ('endYear' in params):
queryParams['endYear'] = self.apiClient.toPathValue(params['endYear'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'FrequencySummary')
return responseObject
def getPhrases(self, word, **kwargs):
"""Fetches bi-gram phrases for a word
Args:
word, str: Word to fetch phrases for (required)
limit, int: Maximum number of results to return (optional)
wlmi, int: Minimum WLMI for the phrase (optional)
useCanonical, str: If true will try to return the correct word root ('cats' -> 'cat'). If false returns exactly what was requested. (optional)
Returns: list[Bigram]
"""
allParams = ['word', 'limit', 'wlmi', 'useCanonical']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getPhrases" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/phrases'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('limit' in params):
queryParams['limit'] = self.apiClient.toPathValue(params['limit'])
if ('wlmi' in params):
queryParams['wlmi'] = self.apiClient.toPathValue(params['wlmi'])
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'list[Bigram]')
return responseObject
def getEtymologies(self, word, **kwargs):
"""Fetches etymology data
Args:
word, str: Word to return (required)
useCanonical, str: If true will try to return the correct word root ('cats' -> 'cat'). If false returns exactly what was requested. (optional)
Returns: list[str]
"""
allParams = ['word', 'useCanonical']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getEtymologies" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/etymologies'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'list[str]')
return responseObject
def getAudio(self, word, **kwargs):
"""Fetches audio metadata for a word.
Args:
word, str: Word to get audio for. (required)
useCanonical, str: Use the canonical form of the word (optional)
limit, int: Maximum number of results to return (optional)
Returns: list[AudioFile]
"""
allParams = ['word', 'useCanonical', 'limit']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getAudio" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/audio'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('useCanonical' in params):
queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical'])
if ('limit' in params):
queryParams['limit'] = self.apiClient.toPathValue(params['limit'])
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'list[AudioFile]')
return responseObject
def getScrabbleScore(self, word, **kwargs):
"""Returns the Scrabble score for a word
Args:
word, str: Word to get scrabble score for. (required)
Returns: ScrabbleScoreResult
"""
allParams = ['word']
params = locals()
for (key, val) in params['kwargs'].items():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getScrabbleScore" % key)
params[key] = val
del params['kwargs']
resourcePath = '/word.{format}/{word}/scrabbleScore'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('word' in params):
replacement = str(self.apiClient.toPathValue(params['word']))
resourcePath = resourcePath.replace('{' + 'word' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'ScrabbleScoreResult')
return responseObject
| StarcoderdataPython |
4116 | <reponame>JohnnySn0w/BabbleBot
import random
prefix = [
'Look at you! ',
'Bless ',
'Bless! ',
'I heard about that! ',
'Amen!',
'You and the kids doing alright?',
'Miss ya\'ll!'
]
suffix = [
'. Amen!',
'. God bless america',
'. God bless!',
' haha',
'. love ya!',
'. love ya\'ll!',
]
def add_pre_suf(sentence):
if random.randint(1,10) <= 6:
if random.randint(1,10) <= 5:
sentence = prefix[random.randint(0, len(prefix) - 1)] + sentence
else:
sentence += suffix[random.randint(0, len(suffix) - 1)]
return sentence
def add_elipses(sentence):
words = sentence.split()
for i in range(4, len(words), 5):
if random.randint(1,10) <= 7:
words[i] += "..."
return " ".join(words)
def boomer_caps(sentence):
seed = random.randint(1, 10)
sent_array = sentence.split()
if seed in (1, 2, 3):
return sentence
elif seed in (4, 5):
temp_sent = []
for x in sent_array:
if random.random() < 0.25:
x = x.upper()
temp_sent.append(x)
return " ".join(temp_sent)
elif seed in (6, 7):
temp_sent = []
for x in sent_array:
if random.random() < 0.5:
x = x.upper()
temp_sent.append(x)
return " ".join(temp_sent)
elif seed in (8, 9):
return sentence.title()
elif seed == 10:
return sentence.upper()
| StarcoderdataPython |
3237153 | #!/usr/bin/env python
from flask import Flask, jsonify, abort, request, make_response
from flask_script import Manager, Server
import requests
import json
import os
import time
import yaml
import config
import issuer
# Load application settings (environment)
config_root = os.environ.get('CONFIG_ROOT', '../config')
ENV = config.load_settings(config_root=config_root)
# custom server class to inject startup initialization routing
class CustomServer(Server):
def __call__(self, app, *args, **kwargs):
issuer.startup_init(ENV)
#Hint: Here you could manipulate app
return Server.__call__(self, app, *args, **kwargs)
app = Flask(__name__)
manager = Manager(app)
# Remeber to add the command to your Manager instance
manager.add_command('runserver', CustomServer())
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/issue-credential', methods=['POST'])
def submit_credential():
"""
Exposed method to proxy credential issuance requests.
"""
if not request.json:
abort(400)
cred_input = request.json
return issuer.handle_send_credential(cred_input)
@app.route('/api/agentcb/topic/<topic>/', methods=['POST'])
def agent_callback(topic):
"""
Main callback for aries agent. Dispatches calls based on the supplied topic.
"""
if not request.json:
abort(400)
message = request.json
# dispatch based on the topic type
if topic == issuer.TOPIC_CONNECTIONS:
if "state" in message:
return issuer.handle_connections(message["state"], message)
return jsonify({})
elif topic == issuer.TOPIC_CREDENTIALS:
if "state" in message:
return issuer.handle_credentials(message["state"], message)
return jsonify({})
elif topic == issuer.TOPIC_PRESENTATIONS:
if "state" in message:
return issuer.handle_presentations(message["state"], message)
return jsonify({})
elif topic == issuer.TOPIC_GET_ACTIVE_MENU:
return issuer.handle_get_active_menu(message)
elif topic == issuer.TOPIC_PERFORM_MENU_ACTION:
return issuer.handle_perform_menu_action(message)
elif topic == issuer.TOPIC_ISSUER_REGISTRATION:
return issuer.handle_register_issuer(message)
elif topic == issuer.TOPIC_PROBLEM_REPORT:
return issuer.handle_problem_report(message)
else:
print("Callback: topic=", topic, ", message=", message)
abort(400, {'message': 'Invalid topic: ' + topic})
if __name__ == '__main__':
manager.run()
| StarcoderdataPython |
1675022 | <reponame>NextThought/pypy-numpy
from __future__ import division, print_function
from os.path import join, split, dirname
import os
import sys
from distutils.dep_util import newer
from distutils.msvccompiler import get_build_version as get_msvc_build_version
def needs_mingw_ftime_workaround():
# We need the mingw workaround for _ftime if the msvc runtime version is
# 7.1 or above and we build with mingw ...
# ... but we can't easily detect compiler version outside distutils command
# context, so we will need to detect in randomkit whether we build with gcc
msver = get_msvc_build_version()
if msver and msver >= 8:
return True
return False
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
tc = testcode_wincrypt()
if config_cmd.try_run(tc):
libs.append('Advapi32')
ext.libraries.extend(libs)
return None
# enable unix large file support on 32 bit systems
# (64 bit off_t, lseek -> lseek64 etc.)
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1'),
]
if needs_mingw_ftime_workaround():
defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
libs = []
# Configure mtrand
try:
import cffi
have_cffi = True
except ImportError:
have_cffi = False
if have_cffi:
#create the dll/so for the cffi version
if sys.platform == 'win32':
libs.append('Advapi32')
defs.append(('_MTRAND_DLL',None))
config.add_shared_library('_mtrand',
sources=[join('mtrand', x) for x in
['randomkit.c', 'distributions.c', 'initarray.c']],
build_info = {
'libraries': libs,
'depends': [join('mtrand', '*.h'),
],
'macros': defs,
}
)
else:
config.add_extension('mtrand',
sources=[join('mtrand', x) for x in
['mtrand.c', 'randomkit.c', 'initarray.c',
'distributions.c']]+[generate_libraries],
libraries=libs,
depends=[join('mtrand', '*.h'),
join('mtrand', '*.pyx'),
join('mtrand', '*.pxi'),],
define_macros=defs,
)
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
def testcode_wincrypt():
return """\
/* check to see if _WIN32 is defined */
int main(int argc, char *argv[])
{
#ifdef _WIN32
return 0;
#else
return 1;
#endif
}
"""
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| StarcoderdataPython |
1666466 | from enum import Enum
Stage = Enum("Stage", "Interphase Mitosis")
# class DNA(object):
class Cell(object):
def __init__(self, cell_id, non_mitosis_len, no_food):
self.is_mitosis = False
self.cell_id = int(cell_id)
self.mitosis_countdown = int(non_mitosis_len)
self.mitosis_countdown_legnth = self.mitosis_countdown
self.no_food_alive = int(no_food)
self.no_food_alive_backup = self.no_food_alive
self.alive = True
def __resetNFA(self):
self.no_food_alive = self.no_food_alive_backup
def progress(self, tick, food, isRoom):
# print("Cell: " + str(self.cell_id) + " Simulated progression on tick " + str(tick) + " with food "+ str(food))
food_required = 2
food_gen = 0.1
if food - food_required >=0:
if self.mitosis_countdown < 1 and isRoom:
self.__resetNFA()
self.mitosis_countdown = self.mitosis_countdown_legnth
return [food - food_required, self.alive, True]
else:
# print(self.mitosis_countdown_legnth)
self.mitosis_countdown = self.mitosis_countdown - 1
return [food + food_gen, self.alive, False]
else:
if self.no_food_alive < 0:
self.alive = False
else:
self.no_food_alive -= 1
return [food, self.alive, False]
| StarcoderdataPython |
4819159 | <reponame>tbsschroeder/dbas
from nose.tools import *
from splinter import Browser
import logging
from selenium.webdriver.remote.remote_connection import LOGGER
LOGGER.setLevel(logging.WARNING)
_multiprocess_can_split_ = True # if the pipeline crashes please disable the multiprocess
ROOT = 'http://localhost:4284'
BROWSER = 'phantomjs'
'''
.. codeauthor:: <NAME> <<EMAIL>>
'''
PATH = '/switch_language?_LOCALE_='
LANGUAGE = {
'GERMAN': 'de',
'ENGLISH': 'en'
} # to check if the cookies had changed
TEST_STRING = {
'GERMAN': 'Teil des Graduierten-Kollegs',
'ENGLISH': 'part of the graduate'
} # to check if the content of the source_page(html) had changed (ugly)
# english page has a german flag and vice versa
TEST_IMG = {
'GERMAN': 'flags/us-gb',
'ENGLISH': 'flags/de'
} # to check if the flag in the source_page(html) had changed
TIME_TO_PREPARE = 1 # used in setup() of test_LanguageSwitch.py to make sure everything had loaded (important but ugly)
| StarcoderdataPython |
63020 | <reponame>Lznah/SrealityAdresarScrapper<gh_stars>0
import re
from classes.page import Page
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from unidecode import unidecode
class AgentsPage(Page):
def __init__(self, url, agent_arr):
Page.__init__(self, url)
self._agent_arr = agent_arr
def scrape(self):
try:
if self.isNot404():
agents = self._driver.find_elements_by_css_selector(".seller-contact")
for agent in agents:
title = agent.find_element_by_css_selector("a.link.ng-binding")
name = title.text
try:
ic = agent.find_element_by_css_selector(".contact-item[ng-if='data.brokerIco'] .value").text
except NoSuchElementException:
ic_replacement = name.replace(' ', "-").replace(',','').replace('.','')
# remove accents
ic = unidecode(ic_replacement)
ic = ic.lower()
self._agent_arr.append({
'ic': ic,
'name': name,
'estates_count': 0 #todo
})
if not re.match(".*strana=[0-9]*", self._url):
pagination_elements = Page._driver.find_elements_by_css_selector(".numero.ng-binding")
if len(pagination_elements) > 0:
delimiter = int(pagination_elements[0].text.split('–')[1])
total = int(pagination_elements[1].text.replace(" ", ""))
num_pages = int(total/delimiter)+1
for p in range(2,num_pages+1):
Page._stack.append(AgentsPage(self._url+"?strana="+str(p), self._agent_arr))
except NoSuchElementException:
print("Element not found!")
except TimeoutException:
print("Loading took too much time!") | StarcoderdataPython |
74855 | <filename>test/core/end2end/fuzzers/generate_client_examples_of_bad_closing_streams.py
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
os.chdir(os.path.dirname(sys.argv[0]))
streams = {
'server_hanging_response_1_header': (
[0,0,0,4,0,0,0,0,0] + # settings frame
[0,0,0,1,5,0,0,0,1] # trailers
),
'server_hanging_response_2_header2': (
[0,0,0,4,0,0,0,0,0] + # settings frame
[0,0,0,1,4,0,0,0,1] + # headers
[0,0,0,1,5,0,0,0,1] # trailers
),
}
for name, stream in streams.items():
open('client_fuzzer_corpus/%s' % name, 'w').write(bytearray(stream))
| StarcoderdataPython |
3384957 | # -*- coding: utf-8 -*-
from collections import deque
import gym
import numpy as np
import sys
from RL.PPO import PPO
class Chief(object):
def __init__(self, scope, parameter_dict, SESS, MEMORY_DICT, COORD, workers):
env = gym.make(parameter_dict['GAME'])
self.ppo = PPO(scope, parameter_dict, env, workers)
self.sess = SESS
self.MEMORY_DICT = MEMORY_DICT
self.workers = workers
self.UPDATE_STEPS = parameter_dict['UPDATE_STEPS']
self.COORD = COORD
self.NUM_WORKERS = parameter_dict['NUM_WORKERS']
def check(self, PUSH_EVENT, UPDATE_EVENT):
while not self.COORD.should_stop():
UPDATE_EVENT.wait()
min_data_size, _ = self._get_data_size()
if min_data_size >= 1:
PUSH_EVENT.clear()
self._train()
self._update_local_workers_weight()
PUSH_EVENT.set()
UPDATE_EVENT.clear()
def _train(self):
data_stack = deque()
_, max_data_size= self._get_data_size()
while max_data_size > 0:
for key, value in self.MEMORY_DICT.items():
if len(value) > 0:
value = list(value)
tmp = deque()
tmp.append(value[0][0]) # buffer_states
tmp.append(value[0][1]) # buffer_actions
tmp.append(value[0][2]) # buffer_advantage
tmp.append(value[0][3]) # buffer_estimatedReturn
tmp.append(value[0][4]) # current_learningRate
tmp.append(value[0][5]) # buffer_score
self.MEMORY_DICT[key].popleft()
tmp = list(tmp)
data_stack.append(tmp)
_, max_data_size = self._get_data_size()
data_stack = list(data_stack)
data_stack = reversed(sorted(data_stack,key=lambda x: x[5][2]))
data_stack = list(data_stack)
feed_dict = {}
l_mul = data_stack[0][4]
feed_dict[self.ppo.l_mul] = l_mul
for i in range(self.NUM_WORKERS):
feed_dict[self.workers[i].ppo.s] = data_stack[i][0]
feed_dict[self.workers[i].ppo.act] = data_stack[i][1]
feed_dict[self.workers[i].ppo.adv] = data_stack[i][2]
feed_dict[self.workers[i].ppo.str] = data_stack[i][3]
feed_dict[self.workers[i].ppo.l_mul] = l_mul
[self.sess.run(self.ppo.train, feed_dict=feed_dict) for _ in range(self.UPDATE_STEPS)]
self._logs_writer(data_stack)
def _update_local_workers_weight(self):
for worker in self.workers:
update_weight = [localp.assign(chiefp) for chiefp, localp in zip(self.ppo.pipara, worker.ppo.pipara)]
self.sess.run(update_weight)
def _get_data_size(self):
min_data_size = sys.maxint
max_data_size = -1
for key, value in self.MEMORY_DICT.items():
min_data_size = min(min_data_size, len(value))
max_data_size = max(max_data_size, len(value))
return min_data_size, max_data_size
def _logs_writer(self, data_stack):
logs = []
for item in data_stack:
logs.append(item[5])
localsteps = logs[0][8]
if localsteps >= 500:
self.COORD.request_stop()
def act(self, s):
s = np.array([s])
action = self.sess.run(self.ppo.ca, {self.ppo.s: s})
return action[0][0]
| StarcoderdataPython |
1653333 | <reponame>adonayab/python_proj1_manager_app
from flask import redirect, render_template, session, flash, request
from models import User, Message
from app import db
from messages.forms import TaskForm
from utils.helpers import badge_general, badge_urgent
from flask import Blueprint
tasks = Blueprint('tasks', __name__)
@tasks.route('/daily-tasks')
def daily_task():
new_u = True if badge_urgent(
) else False # This is for the urgent note badge
new_g = True if badge_general(
) else False # This is for the general note badge
form = TaskForm()
# mornings = Message.query.order_by(Message.status).filter_by(
# category='high').filter_by(shift='Morning').all()
afternoons = Message.query.order_by(Message.status).filter(
(Message.category == 'high') | (Message.category == 'low')).filter_by(shift='Afternoon').all()
evenings = Message.query.order_by(Message.status).filter(
(Message.category == 'high') | (Message.category == 'low')).filter_by(shift='Evening').all()
mornings = Message.query.order_by(Message.status).filter(
(Message.category == 'high') | (Message.category == 'low')).filter_by(shift='Morning').all()
return render_template('messages/tasks.html',
title="Daily Tasks",
mornings=mornings,
afternoons=afternoons,
evenings=evenings,
form=form,
new_g=new_g,
new_u=new_u)
@tasks.route('/daily-tasks/add', methods=['POST'])
def daily_task_add():
form = TaskForm()
if 'email' not in session:
flash('Login to Add a Task', 'danger')
return redirect('/login')
owner = User.query.filter_by(email=session['email']).first()
if not owner.admin:
flash("Log in as Admin to create a Task", 'danger')
return redirect('/daily-tasks')
if form.validate_on_submit():
message = Message(title='daily-task',
content=form.content.data,
category=form.category.data,
shift=form.shift.data,
owner=owner)
db.session.add(message)
db.session.commit()
flash("Task added Successfully", 'success')
return redirect('/daily-tasks')
return render_template('messages/tasks.html', form=form)
@tasks.route('/daily-tasks/', defaults={'id': ''})
@tasks.route('/daily-tasks/<int:id>/delete')
def delete_task(id):
if 'email' not in session:
flash('Login to Delete this Note', 'danger')
return redirect('/login')
owner = User.query.filter_by(email=session['email']).first()
if not owner.admin:
flash("Log in as admin to delete a Task", 'danger')
return redirect('/daily-tasks')
message = Message.query.filter_by(id=id).first()
db.session.delete(message)
db.session.commit()
flash('Task deleted successfully', 'success')
return redirect('/daily-tasks')
@tasks.route('/daily-tasks/', defaults={'id': ''})
@tasks.route('/daily-tasks/<int:id>/mark')
def mark_task(id):
if 'email' not in session:
flash('Login to Mark this Note', 'danger')
return redirect('/login')
task = Message.query.filter_by(id=id).first()
user = User.query.filter_by(email=session['email']).first()
if task.status == 1:
task.status = 0
task.completed_by = ''
db.session.commit()
flash(f"Marked not complete", 'warning')
return redirect('/daily-tasks')
else:
task.status = 1
task.completed_by = user.name
db.session.commit()
flash(f"Marked complete", 'success')
return redirect('/daily-tasks')
db.session.delete(task)
db.session.commit()
flash('Task deleted successfully', 'success')
return redirect('/daily-tasks')
| StarcoderdataPython |
1766026 | <gh_stars>1-10
__author__ = 'tomaszroszko'
| StarcoderdataPython |
1712063 | <reponame>julienc91/utools<filename>utools/math.py
# -*- coding: utf-8 -*-
""" Useful mathematical functions.
"""
from math import factorial
try:
from math import gcd # python 3.5
except ImportError:
from fractions import gcd
def is_prime(n):
""" Miller-Rabin primality test. Keep in mind that this is not a deterministic algorithm: if it return True,
it means that n is probably a prime.
Args:
n (int): the integer to check
Returns:
True if n is probably a prime number, False if it is not
Raises:
TypeError: if n is not an integer
Note:
Adapted from https://rosettacode.org/wiki/Miller%E2%80%93Rabin_primality_test#Python
"""
if not isinstance(n, int):
raise TypeError("Expecting an integer")
if n < 2:
return False
if n in __known_primes:
return True
if any((n % p) == 0 for p in __known_primes):
return False
d, s = n - 1, 0
while not d % 2:
d, s = d >> 1, s + 1
def try_composite(a):
if pow(a, d, n) == 1:
return False
for i in range(s):
if pow(a, 2 ** i * d, n) == n - 1:
return False
return True
return not any(try_composite(a) for a in __known_primes[:16])
__known_primes = [2, 3]
__known_primes += [x for x in range(5, 1000, 2) if is_prime(x)]
def find_divisors(n):
""" Find all the positive divisors of the given integer n.
Args:
n (int): strictly positive integer
Returns:
A generator of all the positive divisors of n
Raises:
TypeError: if n is not an integer
ValueError: if n is negative
"""
if not isinstance(n, int):
raise TypeError("Expecting a strictly positive integer")
if n <= 0:
raise ValueError("Expecting a strictly positive integer")
for i in range(1, int(n**0.5) + 1):
if n % i == 0:
divisors = {i, n//i}
for divisor in divisors:
yield divisor
def count_divisors(n):
""" Count the number of divisors of an integer n
Args:
n (int): strictly positive integer
Returns:
The number of distinct divisors of n
Raises:
TypeError: if n is not an integer
ValueError: if n is negative
"""
if not isinstance(n, int):
raise TypeError("Expecting a strictly positive integer")
if n <= 0:
raise ValueError("Expecting a strictly positive integer")
number_of_divisors = 1
remain = n
for p in prime_generator():
if p > n:
return number_of_divisors
exponent = 1
while remain % p == 0:
remain = remain // p
exponent += 1
number_of_divisors *= exponent
if remain == 1:
return number_of_divisors
def prime_generator(p_min=2, p_max=None):
""" Generator of prime numbers using the sieve of Eratosthenes.
Args:
p_min (int): prime numbers lower than p_min will not be in the resulting primes
p_max (int): the generator will stop when this value is reached, it means that there
will be no prime bigger than this number in the resulting primes. If p_max
is None, there will not be any upper limit
Returns:
A generator of all the consecutive primes between p_min and p_max
Raises:
TypeError: if p_min or p_max is not an integer
"""
if not isinstance(p_min, int):
raise TypeError("Expecting an integer")
if p_max is not None and not isinstance(p_max, int):
raise TypeError("Expecting an integer")
q = max(p_min, 3)
if q % 2 == 0:
q += 1
if p_min <= 2 and (p_max is None or p_max >= 2):
yield 2 # outside the while block to make the double increment optimization work
while p_max is None or q <= p_max:
if is_prime(q):
yield q
q += 2 # avoid losing time in checking primality of even numbers
def sieve_of_eratosthenes(p_min=2, p_max=None):
""" Generator of prime numbers using the sieve of Eratosthenes.
Note:
Adapted from http://code.activestate.com/recipes/117119/
Args:
p_min (int): prime numbers lower than p_min will not be in the resulting primes
p_max (int): the generator will stop when this value is reached, it means that there
will be no prime bigger than this number in the resulting primes. If p_max
is None, there will not be any upper limit
Returns:
A generator of all the consecutive primes between p_min and p_max
Raises:
TypeError: if p_min or p_max is not an integer
"""
if not isinstance(p_min, int):
raise TypeError("Expecting an integer")
if p_max is not None and not isinstance(p_max, int):
raise TypeError("Expecting an integer")
sieve = {}
q = 2
while p_max is None or q <= p_max:
if q not in sieve:
if q >= p_min:
yield q
sieve[q * q] = [q]
else:
for p in sieve[q]:
sieve.setdefault(p + q, []).append(p)
del sieve[q]
q += 1
def binomial_coefficient(n, k):
""" Calculate the binomial coefficient indexed by n and k.
Args:
n (int): positive integer
k (int): positive integer
Returns:
The binomial coefficient indexed by n and k
Raises:
TypeError: If either n or k is not an integer
ValueError: If either n or k is negative, or if k is strictly greater than n
"""
if not isinstance(k, int) or not isinstance(n, int):
raise TypeError("Expecting positive integers")
if k > n:
raise ValueError("k must be lower or equal than n")
if k < 0 or n < 0:
raise ValueError("Expecting positive integers")
return factorial(n) // (factorial(k) * factorial(n - k))
def eulers_totient(n):
""" Calculate the value of Euler's totient for a given integer
Args:
n (int): strictly positive integer
Returns:
The value of Euler's totient for n
Raises:
TypeError: If either n or k is not an integer
ValueError: If either n or k is negative, or if k is strictly greater than n
"""
if not isinstance(n, int):
raise TypeError("Expecting a strictly positive integer")
if n <= 0:
raise ValueError("Expecting a strictly positive integer")
if n == 1:
return 1
result = 0
for i in range(1, n):
if gcd(i, n) == 1:
result += 1
return result
| StarcoderdataPython |
3295492 | # Copyright 2017 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logical sessions for ordering sequential operations.
Requires MongoDB 3.6.
.. versionadded:: 3.6
Causally Consistent Reads
=========================
.. code-block:: python
with client.start_session(causal_consistency=True) as session:
collection = client.db.collection
collection.update_one({'_id': 1}, {'$set': {'x': 10}}, session=session)
secondary_c = collection.with_options(
read_preference=ReadPreference.SECONDARY)
# A secondary read waits for replication of the write.
secondary_c.find_one({'_id': 1}, session=session)
If `causal_consistency` is True (the default), read operations that use
the session are causally after previous read and write operations. Using a
causally consistent session, an application can read its own writes and is
guaranteed monotonic reads, even when reading from replica set secondaries.
.. mongodoc:: causal-consistency
.. _transactions-ref:
Transactions
============
MongoDB 4.0 adds support for transactions on replica set primaries. A
transaction is associated with a :class:`ClientSession`. To start a transaction
on a session, use :meth:`ClientSession.start_transaction` in a with-statement.
Then, execute an operation within the transaction by passing the session to the
operation:
.. code-block:: python
orders = client.db.orders
inventory = client.db.inventory
with client.start_session() as session:
with session.start_transaction():
orders.insert_one({"sku": "abc123", "qty": 100}, session=session)
inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}},
{"$inc": {"qty": -100}}, session=session)
Upon normal completion of ``with session.start_transaction()`` block, the
transaction automatically calls :meth:`ClientSession.commit_transaction`.
If the block exits with an exception, the transaction automatically calls
:meth:`ClientSession.abort_transaction`.
For multi-document transactions, you can only specify read/write (CRUD)
operations on existing collections. For example, a multi-document transaction
cannot include a create or drop collection/index operations, including an
insert operation that would result in the creation of a new collection.
A session may only have a single active transaction at a time, multiple
transactions on the same session can be executed in sequence.
.. versionadded:: 3.7
.. seealso:: The MongoDB beta documentation for
`transactions <https://docs-beta-transactions.mongodb.com/transactions/>`_
Classes
=======
"""
import collections
import uuid
from bson.binary import Binary
from bson.int64 import Int64
from bson.py3compat import abc
from bson.timestamp import Timestamp
from pymongo import monotonic
from pymongo.errors import (ConfigurationError,
ConnectionFailure,
InvalidOperation,
OperationFailure)
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import ReadPreference, _ServerMode
from pymongo.write_concern import WriteConcern
class SessionOptions(object):
"""Options for a new :class:`ClientSession`.
:Parameters:
- `causal_consistency` (optional): If True (the default), read
operations are causally ordered within the session.
- `auto_start_transaction` (optional): If True, any operation using
the session automatically starts a transaction.
- `default_transaction_options` (optional): The default
TransactionOptions to use for transactions started on this session.
"""
def __init__(self,
causal_consistency=True,
auto_start_transaction=False,
default_transaction_options=None):
self._causal_consistency = causal_consistency
self._auto_start_transaction = auto_start_transaction
if default_transaction_options is not None:
if not isinstance(default_transaction_options, TransactionOptions):
raise TypeError(
"default_transaction_options must be an instance of "
"pymongo.client_session.TransactionOptions, not: %r" %
(default_transaction_options,))
self._default_transaction_options = default_transaction_options
@property
def causal_consistency(self):
"""Whether causal consistency is configured."""
return self._causal_consistency
@property
def auto_start_transaction(self):
"""Whether any operation using the session automatically starts a
transaction.
.. versionadded:: 3.7
"""
return self._auto_start_transaction
@property
def default_transaction_options(self):
"""The default TransactionOptions to use for transactions started on
this session.
.. versionadded:: 3.7
"""
return self._default_transaction_options
class TransactionOptions(object):
"""Options for :meth:`ClientSession.start_transaction`.
:Parameters:
- `read_concern`: The :class:`~read_concern.ReadConcern` to use for this
transaction.
- `write_concern`: The :class:`~write_concern.WriteConcern` to use for
this transaction.
.. versionadded:: 3.7
"""
def __init__(self, read_concern=None, write_concern=None,
read_preference=None):
self._read_concern = read_concern
self._write_concern = write_concern
self._read_preference = read_preference
if read_concern is not None:
if not isinstance(read_concern, ReadConcern):
raise TypeError("read_concern must be an instance of "
"pymongo.read_concern.ReadConcern, not: %r" %
(read_concern,))
if write_concern is not None:
if not isinstance(write_concern, WriteConcern):
raise TypeError("write_concern must be an instance of "
"pymongo.write_concern.WriteConcern, not: %r" %
(write_concern,))
if not write_concern.acknowledged:
raise ConfigurationError(
"transactions must use an acknowledged write concern, "
"not: %r" % (write_concern,))
if read_preference is not None:
if not isinstance(read_preference, _ServerMode):
raise TypeError("%r is not valid for read_preference. See "
"pymongo.read_preferences for valid "
"options." % (read_preference,))
@property
def read_concern(self):
"""This transaction's :class:`~read_concern.ReadConcern`."""
return self._read_concern
@property
def write_concern(self):
"""This transaction's :class:`~write_concern.WriteConcern`."""
return self._write_concern
@property
def read_preference(self):
"""This transaction's :class:`~read_preference.ReadPreference`."""
return self._read_preference
class _TransactionContext(object):
"""Internal transaction context manager for start_transaction."""
def __init__(self, session):
self.__session = session
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.__session._in_transaction:
if exc_val is None:
self.__session.commit_transaction()
else:
self.__session.abort_transaction()
class _Transaction(object):
"""Internal class to hold transaction information in a ClientSession."""
def __init__(self, opts):
self.opts = opts
self.sent_command = False
class ClientSession(object):
"""A session for ordering sequential operations."""
def __init__(self, client, server_session, options, authset):
# A MongoClient, a _ServerSession, a SessionOptions, and a set.
self._client = client
self._server_session = server_session
self._options = options
self._authset = authset
self._cluster_time = None
self._operation_time = None
self._transaction = None
def end_session(self):
"""Finish this session. If a transaction has started, abort it.
It is an error to use the session or any derived
:class:`~pymongo.database.Database`,
:class:`~pymongo.collection.Collection`, or
:class:`~pymongo.cursor.Cursor` after the session has ended.
"""
self._end_session(lock=True)
def _end_session(self, lock):
if self._server_session is not None:
try:
if self._in_transaction:
self.abort_transaction()
finally:
self._client._return_server_session(self._server_session, lock)
self._server_session = None
def _check_ended(self):
if self._server_session is None:
raise InvalidOperation("Cannot use ended session")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._end_session(lock=True)
@property
def client(self):
"""The :class:`~pymongo.mongo_client.MongoClient` this session was
created from.
"""
return self._client
@property
def options(self):
"""The :class:`SessionOptions` this session was created with."""
return self._options
@property
def session_id(self):
"""A BSON document, the opaque server session identifier."""
self._check_ended()
return self._server_session.session_id
@property
def cluster_time(self):
"""The cluster time returned by the last operation executed
in this session.
"""
return self._cluster_time
@property
def operation_time(self):
"""The operation time returned by the last operation executed
in this session.
"""
return self._operation_time
def _inherit_option(self, name, val):
"""Return the inherited TransactionOption value."""
if val:
return val
txn_opts = self.options.default_transaction_options
val = txn_opts and getattr(txn_opts, name)
if val:
return val
return getattr(self.client, name)
def start_transaction(self, read_concern=None, write_concern=None,
read_preference=None):
"""Start a multi-statement transaction.
Takes the same arguments as :class:`TransactionOptions`.
.. versionadded:: 3.7
"""
self._check_ended()
if self._in_transaction:
raise InvalidOperation("Transaction already in progress")
read_concern = self._inherit_option("read_concern", read_concern)
write_concern = self._inherit_option("write_concern", write_concern)
read_preference = self._inherit_option(
"read_preference", read_preference)
self._transaction = _Transaction(TransactionOptions(
read_concern, write_concern, read_preference))
self._server_session._transaction_id += 1
return _TransactionContext(self)
def commit_transaction(self):
"""Commit a multi-statement transaction.
.. versionadded:: 3.7
"""
self._finish_transaction("commitTransaction")
def abort_transaction(self):
"""Abort a multi-statement transaction.
.. versionadded:: 3.7
"""
try:
self._finish_transaction("abortTransaction")
except (OperationFailure, ConnectionFailure):
pass
def _finish_transaction(self, command_name):
self._check_ended()
if not self._in_transaction_or_auto_start():
raise InvalidOperation("No transaction started")
try:
if not self._transaction.sent_command:
# Not really started.
return
# TODO: commitTransaction should be a retryable write.
# Use _command directly because commit/abort are writes and must
# always go to the primary.
with self._client._socket_for_writes() as sock_info:
return self._client.admin._command(
sock_info,
command_name,
session=self,
write_concern=self._transaction.opts.write_concern,
parse_write_concern_error=True)
finally:
self._transaction = None
def _advance_cluster_time(self, cluster_time):
"""Internal cluster time helper."""
if self._cluster_time is None:
self._cluster_time = cluster_time
elif cluster_time is not None:
if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]:
self._cluster_time = cluster_time
def advance_cluster_time(self, cluster_time):
"""Update the cluster time for this session.
:Parameters:
- `cluster_time`: The
:data:`~pymongo.client_session.ClientSession.cluster_time` from
another `ClientSession` instance.
"""
if not isinstance(cluster_time, abc.Mapping):
raise TypeError(
"cluster_time must be a subclass of collections.Mapping")
if not isinstance(cluster_time.get("clusterTime"), Timestamp):
raise ValueError("Invalid cluster_time")
self._advance_cluster_time(cluster_time)
def _advance_operation_time(self, operation_time):
"""Internal operation time helper."""
if self._operation_time is None:
self._operation_time = operation_time
elif operation_time is not None:
if operation_time > self._operation_time:
self._operation_time = operation_time
def advance_operation_time(self, operation_time):
"""Update the operation time for this session.
:Parameters:
- `operation_time`: The
:data:`~pymongo.client_session.ClientSession.operation_time` from
another `ClientSession` instance.
"""
if not isinstance(operation_time, Timestamp):
raise TypeError("operation_time must be an instance "
"of bson.timestamp.Timestamp")
self._advance_operation_time(operation_time)
@property
def has_ended(self):
"""True if this session is finished."""
return self._server_session is None
@property
def _in_transaction(self):
"""True if this session has an active multi-statement transaction."""
return self._transaction is not None
def _in_transaction_or_auto_start(self):
"""True if this session has an active transaction or will have one."""
if self._in_transaction:
return True
if self.options.auto_start_transaction:
self.start_transaction()
return True
return False
def _txn_read_preference(self):
"""Return read preference of this transaction or None."""
if self._in_transaction_or_auto_start():
return self._transaction.opts.read_preference
return None
def _apply_to(self, command, is_retryable, read_preference):
self._check_ended()
self._in_transaction_or_auto_start()
self._server_session.last_use = monotonic.time()
command['lsid'] = self._server_session.session_id
if is_retryable:
self._server_session._transaction_id += 1
command['txnNumber'] = self._server_session.transaction_id
return
if self._in_transaction:
# TODO: hack
name = next(iter(command))
if name not in ('commitTransaction', 'abortTransaction'):
command.pop('writeConcern', None)
if read_preference != ReadPreference.PRIMARY:
raise InvalidOperation(
'read preference in a transaction must be primary, not: '
'%r' % (read_preference,))
if not self._transaction.sent_command:
# First command begins a new transaction.
self._transaction.sent_command = True
command['startTransaction'] = True
if self._transaction.opts.read_concern:
rc = self._transaction.opts.read_concern.document
else:
rc = {}
if (self.options.causal_consistency
and self.operation_time is not None):
rc['afterClusterTime'] = self.operation_time
if rc:
command['readConcern'] = rc
command['txnNumber'] = self._server_session.transaction_id
command['autocommit'] = False
def _retry_transaction_id(self):
self._check_ended()
self._server_session.retry_transaction_id()
class _ServerSession(object):
def __init__(self):
# Ensure id is type 4, regardless of CodecOptions.uuid_representation.
self.session_id = {'id': Binary(uuid.uuid4().bytes, 4)}
self.last_use = monotonic.time()
self._transaction_id = 0
def timed_out(self, session_timeout_minutes):
idle_seconds = monotonic.time() - self.last_use
# Timed out if we have less than a minute to live.
return idle_seconds > (session_timeout_minutes - 1) * 60
@property
def transaction_id(self):
"""Positive 64-bit integer."""
return Int64(self._transaction_id)
def retry_transaction_id(self):
self._transaction_id -= 1
class _ServerSessionPool(collections.deque):
"""Pool of _ServerSession objects.
This class is not thread-safe, access it while holding the Topology lock.
"""
def pop_all(self):
ids = []
while self:
ids.append(self.pop().session_id)
return ids
def get_server_session(self, session_timeout_minutes):
# Although the Driver Sessions Spec says we only clear stale sessions
# in return_server_session, PyMongo can't take a lock when returning
# sessions from a __del__ method (like in Cursor.__die), so it can't
# clear stale sessions there. In case many sessions were returned via
# __del__, check for stale sessions here too.
self._clear_stale(session_timeout_minutes)
# The most recently used sessions are on the left.
while self:
s = self.popleft()
if not s.timed_out(session_timeout_minutes):
return s
return _ServerSession()
def return_server_session(self, server_session, session_timeout_minutes):
self._clear_stale(session_timeout_minutes)
if not server_session.timed_out(session_timeout_minutes):
self.appendleft(server_session)
def return_server_session_no_lock(self, server_session):
self.appendleft(server_session)
def _clear_stale(self, session_timeout_minutes):
# Clear stale sessions. The least recently used are on the right.
while self:
if self[-1].timed_out(session_timeout_minutes):
self.pop()
else:
# The remaining sessions also haven't timed out.
break
| StarcoderdataPython |
197110 | # -*- coding: utf-8 -*-
# @Time : 2018/05/18
# @Author : <NAME>
import datetime
import json
import cv2
import numpy as np
import time
import core
import os
from PIL import Image, ImageDraw
def transformation_points(src_img, src_points, dst_img, dst_points):
src_points = src_points.astype(np.float64)
dst_points = dst_points.astype(np.float64)
# print(src_points.shape)
# print(dst_points)
c1 = np.mean(src_points, axis=0)
c2 = np.mean(dst_points, axis=0)
src_points -= c1
dst_points -= c2
s1 = np.std(src_points)
s2 = np.std(dst_points)
src_points /= s1
dst_points /= s2
u, s, vt = np.linalg.svd(src_points.T * dst_points)
r = (u * vt).T
m = np.vstack([np.hstack(((s2 / s1) * r, c2.T - (s2 / s1) * r * c1.T)), np.matrix([0., 0., 1.])])
output = cv2.warpAffine(dst_img, m[:2],
(src_img.shape[1], src_img.shape[0]),
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output
def tran_matrix(src_img, src_points, dst_img, dst_points):
h = cv2.findHomography(dst_points, src_points)
output = cv2.warpAffine(dst_img, h[0][:2], (src_img.shape[1], src_img.shape[0]),
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output
def correct_color(img1, img2, landmark):
blur_amount = 0.4 * np.linalg.norm(
np.mean(landmark[core.LEFT_EYE_POINTS], axis=0)
- np.mean(landmark[core.RIGHT_EYE_POINTS], axis=0)
)
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
img1_blur = cv2.GaussianBlur(img1, (blur_amount, blur_amount), 0)
img2_blur = cv2.GaussianBlur(img2, (blur_amount, blur_amount), 0)
img2_blur += (128 * (img2_blur <= 1.0)).astype(img2_blur.dtype)
return img2.astype(np.float64) * img1_blur.astype(np.float64) / img2_blur.astype(np.float64)
def tran_src(src_img, src_points, dst_points, face_area=None):
# print(1111111)
print(src_img.shape)
jaw = core.JAW_END
dst_list = dst_points \
+ core.matrix_rectangle(face_area[0], face_area[1], face_area[2], face_area[3]) \
+ core.matrix_rectangle(0, 0, src_img.shape[1], src_img.shape[0])
src_list = src_points \
+ core.matrix_rectangle(face_area[0], face_area[1], face_area[2], face_area[3]) \
+ core.matrix_rectangle(0, 0, src_img.shape[1], src_img.shape[0])
jaw_points = []
for i in range(0, jaw):
# print(i)
jaw_points.append(dst_list[i])
jaw_points.append(src_list[i])
warp_jaw = cv2.convexHull(np.array(jaw_points), returnPoints=False)
warp_jaw = warp_jaw.tolist()
for i in range(0, len(warp_jaw)):
warp_jaw[i] = warp_jaw[i][0]
warp_jaw.sort()
if len(warp_jaw) <= jaw:
dst_list = dst_list[jaw - len(warp_jaw):]
src_list = src_list[jaw - len(warp_jaw):]
for i in range(0, len(warp_jaw)):
dst_list[i] = jaw_points[int(warp_jaw[i])]
src_list[i] = jaw_points[int(warp_jaw[i])]
else:
for i in range(0, jaw):
if len(warp_jaw) > jaw and warp_jaw[i] == 2 * i and warp_jaw[i + 1] == 2 * i + 1:
warp_jaw.remove(2 * i)
dst_list[i] = jaw_points[int(warp_jaw[i])]
dt = core.measure_triangle(src_img, dst_list,src_points,dst_points)
res_img = np.zeros(src_img.shape, dtype=src_img.dtype)
for i in range(0, len(dt)):
t_src = []
t_dst = []
for j in range(0, 3):
t_src.append(src_list[dt[i][j]])
t_dst.append(dst_list[dt[i][j]])
if(checkLine(t_src) or checkLine(t_dst)):
# print("not checked")
continue
else:
core.affine_triangle(src_img, res_img, t_src, t_dst)
return res_img
def merge_img(src_img, dst_img, dst_matrix, dst_points, k_size=None, mat_multiple=None):
face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)
for group in core.OVERLAY_POINTS:
cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))
r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))
center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))
if mat_multiple:
mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))
if k_size:
face_mask = cv2.blur(face_mask, k_size, center)
return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE)
def drawLine(src_img,points):
src_img = src_img.astype(np.uint8)
im = Image.fromarray(src_img)
draw = ImageDraw.Draw(im)
# for i in points:
draw.line(points,width = 5,fill = (255, 0, 0))
return im
def morph_img(src_img, src_points, dst_img, dst_points, alpha=0.5):
morph_points = []
src_img = src_img.astype(np.float32)
dst_img = dst_img.astype(np.float32)
res_img = np.zeros(src_img.shape, src_img.dtype)
# for i in src_points:
# print(i)
# 这一步的目的是调整脸型,将原图关键点和目标图关键点之间取中间点,根据alpha值来取
for i in range(0, len(src_points)):
x = (1 - alpha) * src_points[i][0] + alpha * dst_points[i][0]
y = (1 - alpha) * src_points[i][1] + alpha * dst_points[i][1]
morph_points.append((x, y))
dt = core.measure_triangle(src_img, morph_points,src_points,dst_points)
for i in range(0, len(dt)):
t1 = []
t2 = []
t = []
for j in range(0, 3):
t1.append(src_points[dt[i][j]])
t2.append(dst_points[dt[i][j]])
t.append(morph_points[dt[i][j]])
if(checkLine(t) or checkLine(t1) or checkLine(t2)):
continue
core.morph_triangle(src_img, dst_img, res_img, t1, t2, t, alpha,i)
return res_img
def checkLine(t):
if(len(t) != 3):
return True
if(t[0] == t[1] or t[1] == t[2] or t[0] == t[2]) :
return True
return False
def face_merge(
src_img,
dst_img,
out_img,
alpha=0.75,
k_size=(10,5),
mat_multiple=0.5
):
src_matrix, src_points, src_faces,err = core.face_points(src_img)
##直接将第一次寻找目标人物读取的人脸数据作为参数传过来,减少查询人脸识别API次数
dst_matrix, dst_points, dst_faces,err = core.face_points(dst_img)
if not (isinstance(src_img,np.ndarray) and isinstance(dst_img,np.ndarray)):
src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)
dst_img = transformation_points(src_img=src_img, src_points=src_matrix[core.FACE_POINTS],
dst_img=dst_img, dst_points=dst_matrix[core.FACE_POINTS])
# 转换
trans_file = 'images/' + "trans"+ '.jpg'
cv2.imwrite(trans_file, dst_img)
_, dst_points, trans_faces, err = core.face_points(dst_img)
dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)
# 融合
# morph_file = 'images/' + "merge" + '.jpg'
# cv2.imwrite(morph_file, dst_img)
dst_matrix, dst_points, morph_faces,err = core.face_points(dst_img)
if isinstance(src_faces,dict):
src_img = tran_src(src_img, src_points, dst_points,
[int(src_faces['x']), int(src_faces['y']), int(src_faces['width']),
int(src_faces['height'])])
else:
src_img = tran_src(src_img, src_points, dst_points, [int(src_faces[-1][0]),int(src_faces[-1][1]),int(src_faces[-1][2]),int(src_faces[-1][3])])
# cv2.imwrite('images/' + "tran_src" + '.jpg',src_img)
dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points, k_size, mat_multiple)
# 删除掉临时生成的文件
# os.remove(trans_file)
# os.remove(morph_file)
cv2.imwrite(out_img, dst_img)
return err
def face_merge_ret(
src_img,
dst_img,
out_img,
alpha=0.75,
k_size=(10,5),
mat_multiple=0.5
):
src_matrix, src_points, src_faces,err = core.face_points(src_img)
if(err != 0 or len(src_points) == 0):
return src_img
##直接将第一次寻找目标人物读取的人脸数据作为参数传过来,减少查询人脸识别API次数
dst_matrix, dst_points, dst_faces,err = core.face_points(dst_img)
if(err != 0 or len(dst_points) == 0):
return src_img
if not (isinstance(src_img,np.ndarray)):
print("read")
src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
if not (isinstance(dst_img,np.ndarray)):
dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)
dst_img = transformation_points(src_img=src_img, src_points=src_matrix[core.FACE_POINTS],
dst_img=dst_img, dst_points=dst_matrix[core.FACE_POINTS])
# 转换
trans_file = 'images/' + "trans"+ '.jpg'
cv2.imwrite(trans_file, dst_img)
_, dst_points, trans_faces, err = core.face_points(dst_img)
dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)
# 融合
morph_file = 'images/' + "merge" + '.jpg'
cv2.imwrite(morph_file, dst_img)
dst_matrix, dst_points, morph_faces,err = core.face_points(dst_img)
if isinstance(src_faces,dict):
src_img = tran_src(src_img, src_points, dst_points,
[int(src_faces['x']), int(src_faces['y']), int(src_faces['width']),
int(src_faces['height'])])
else:
src_img = tran_src(src_img, src_points, dst_points, [int(src_faces[-1][0]),int(src_faces[-1][1]),int(src_faces[-1][2]),int(src_faces[-1][3])])
cv2.imwrite('images/' + "tran_src" + '.jpg',src_img)
dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points, k_size, mat_multiple)
cv2.imwrite(out_img,dst_img)
return dst_img | StarcoderdataPython |
4833834 | <gh_stars>10-100
import os
from amitools.vamos.path import VolumeManager, resolve_sys_path
from amitools.vamos.cfgcore import ConfigDict
def path_volume_resolve_sys_path_test(tmpdir):
rsp = resolve_sys_path
p = str(tmpdir)
assert rsp(p) == p
# user home
assert rsp("~") == os.path.expanduser("~")
# env var
os.environ["TEST_PATH"] = p
assert rsp("${TEST_PATH}") == p
assert rsp("${TEST_PATH}/bla") == os.path.join(p, "bla")
def path_volume_add_del_test(tmpdir):
v = VolumeManager()
assert v.setup()
assert v.get_all_names() == []
my_path = str(tmpdir.mkdir("bla"))
no_path = str(tmpdir.join("hugo"))
# ok
vol = v.add_volume("My:" + my_path)
assert vol
assert v.get_all_names() == ["My"]
assert v.is_volume("MY")
assert vol.is_setup
assert vol.get_path() == my_path
assert v.add_volume("foo:" + my_path)
assert v.get_all_names() == ["My", "foo"]
# duplicate path mapping
assert not v.add_volume("foo:" + my_path)
# duplicate path name
assert not v.add_volume("my:" + no_path)
# invalid path
assert not v.add_volume("foo:" + no_path)
# ok
assert v.del_volume("my")
assert not vol.is_setup
assert v.get_all_names() == ["foo"]
# invalid name
assert not v.del_volume("baz")
# shutdown
v.shutdown()
def path_volume_add_local_test(tmpdir):
vols_dir = str(tmpdir.join("volumes"))
v = VolumeManager(vols_base_dir=vols_dir)
v.setup()
# without create
assert not v.add_volume("My")
# with create
vol = v.add_volume("My?create")
assert vol
# check for vol dir
vol_path = os.path.join(vols_dir, "My")
assert os.path.isdir(vol_path)
assert vol.get_path() == vol_path
# create multiple
vols = v.add_volumes(["foo?create", "bar?create"])
assert vols
for vol in vols:
vol_path = os.path.join(vols_dir, vol.get_name())
assert os.path.isdir(vol_path)
assert vol.get_path() == vol_path
# shutdown
v.shutdown()
def path_volume_create_rel_sys_path_test(tmpdir):
v = VolumeManager()
org = tmpdir.mkdir("bla")
my_path = str(org)
# ok
vol = v.add_volume("My:" + my_path)
assert vol
# single path
path = vol.create_rel_sys_path("bla")
assert path == str(org.join("bla"))
assert os.path.isdir(path)
# multi path
path = vol.create_rel_sys_path(["foo", "bar"])
assert path == str(org.join("foo").join("bar"))
assert os.path.isdir(path)
def path_volume_sys_to_ami_test(tmpdir):
v = VolumeManager()
mp = tmpdir.mkdir("bla")
my_path = str(mp)
no_path = str(tmpdir.join("hugo"))
mp2 = mp.mkdir("blub")
my_path2 = str(mp2)
assert v.add_volume("My:" + my_path)
assert v.add_volume("nested:" + my_path2)
# exisitng path
s2a = v.sys_to_ami_path
assert s2a(my_path) == "My:"
assert s2a(str(mp.join("foo"))) == "My:foo"
# expect nested path
assert s2a(my_path2) == "nested:"
assert s2a(str(mp2.join("bla/blub"))) == "nested:bla/blub"
# non existing
assert s2a(str(tmpdir)) is None
# not abosulte
assert s2a("bla") is None
def path_volume_ami_to_sys_test(tmpdir):
v = VolumeManager()
mp = tmpdir.mkdir("bla")
my_path = str(mp)
mp2 = mp.mkdir("Foo").mkdir("BAR").mkdir("baZ")
# case insensitive file system?
ci_fs = os.path.exists(os.path.join(my_path, "foo"))
sub_path = str(mp2)
assert v.add_volume("My:" + my_path)
# base path
a2s = v.ami_to_sys_path
assert a2s("my:") == my_path
assert a2s("my:unkown/PATH") == os.path.join(my_path, "unkown", "PATH")
# follow along case of path in sys fs
assert a2s("my:foo/bar/baz") == sub_path
# fast mode on case insensitive fs does not adjust ami path
if ci_fs:
assert a2s("my:foo", True) == os.path.join(my_path, "foo")
else:
assert a2s("my:foo", True) == os.path.join(my_path, "Foo")
def path_volume_cfg_test(tmpdir):
my_path = str(tmpdir.mkdir("bla"))
v = VolumeManager()
cfg = ConfigDict({"volumes": ["my:" + my_path]})
assert v.parse_config(cfg)
assert v.get_all_names() == ["my"]
assert v.is_volume("MY")
def path_volume_create_test(tmpdir):
v = VolumeManager(str(tmpdir))
assert v.setup()
spec = "my:" + str(tmpdir) + "/bla"
# dir does not exist -> can't create
assert not v.add_volume(spec)
# create
assert v.add_volume(spec + "?create")
# check
assert tmpdir.join("bla").check(dir=1)
# shutdown
v.shutdown()
def path_volume_temp_test(tmpdir):
v = VolumeManager(str(tmpdir))
assert v.setup()
spec = "my:" + str(tmpdir)
# dir does exist -> no temp possible
assert not v.add_volume(spec + "?temp")
# create temp
spec += "/bla"
assert v.add_volume(spec + "?temp")
# check that temp dir exists
assert tmpdir.join("bla").check(dir=1)
# shutdown
v.shutdown()
# now temp is gone
assert not tmpdir.join("bla").check()
| StarcoderdataPython |
119100 | <reponame>Tree-frog-code/trans
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# main_app.py
import trans as tr
opt = None
text = None
try:
opt = sys.argv[1]
text = " ".join(sys.argv[2:])
except:
logger.critical('書式が正しくない可能性があります。')
exit()
opt = opt.strip("-")
print(tr.convert(text=text, lang=opt))
| StarcoderdataPython |
1710317 | # -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for the cache.py module."""
from __future__ import print_function
import datetime
import os
import mock
from chromite.lib import cache
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import gs_unittest
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import retry_util
class CacheReferenceTest(cros_test_lib.TestCase):
"""Tests for CacheReference.
Largely focused on exercising the API other objects expect from it.
"""
# pylint: disable=protected-access
def setUp(self):
# These are the funcs CacheReference expects the cache object to have.
spec = (
'GetKeyPath',
'_Insert',
'_InsertText',
'_KeyExists',
'_LockForKey',
'_Remove',
)
self.cache = mock.Mock(spec=spec)
self.lock = mock.MagicMock()
self.lock.path = 'some/path'
self.cache._LockForKey.return_value = self.lock
def testContext(self):
"""Verify we can use it as a context manager."""
# We should set the acquire member and grab/release the lock.
ref = cache.CacheReference(self.cache, 'key')
self.assertFalse(ref.acquired)
self.assertFalse(self.lock.__enter__.called)
with ref as newref:
self.assertEqual(ref, newref)
self.assertTrue(ref.acquired)
self.assertTrue(self.lock.__enter__.called)
self.assertFalse(self.lock.__exit__.called)
self.assertFalse(ref.acquired)
self.assertTrue(self.lock.__exit__.called)
def testPath(self):
"""Verify we get a file path for the ref."""
self.cache.GetKeyPath.return_value = '/foo/bar'
ref = cache.CacheReference(self.cache, 'key')
self.assertEqual(ref.path, '/foo/bar')
self.cache.GetKeyPath.assert_called_once_with('key')
def testLocking(self):
"""Verify Acquire & Release work as expected."""
ref = cache.CacheReference(self.cache, 'key')
# Check behavior when the lock is free.
self.assertRaises(AssertionError, ref.Release)
self.assertFalse(ref.acquired)
# Check behavior when the lock is held.
self.assertEqual(ref.Acquire(), None)
self.assertRaises(AssertionError, ref.Acquire)
self.assertTrue(ref.acquired)
# Check behavior after the lock is freed.
self.assertEqual(ref.Release(), None)
self.assertFalse(ref.acquired)
def testExists(self):
"""Verify Exists works when the entry is not in the cache."""
ref = cache.CacheReference(self.cache, 'key')
self.cache._KeyExists.return_value = False
self.assertFalse(ref.Exists())
def testExistsMissing(self):
"""Verify Exists works when the entry is in the cache."""
ref = cache.CacheReference(self.cache, 'key')
self.cache._KeyExists.return_value = True
self.assertTrue(ref.Exists())
def testAssign(self):
"""Verify Assign works as expected."""
ref = cache.CacheReference(self.cache, 'key')
ref.Assign('/foo')
self.cache._Insert.assert_called_once_with('key', '/foo')
def testAssignText(self):
"""Verify AssignText works as expected."""
ref = cache.CacheReference(self.cache, 'key')
ref.AssignText('text!')
self.cache._InsertText.assert_called_once_with('key', 'text!')
def testRemove(self):
"""Verify Remove works as expected."""
ref = cache.CacheReference(self.cache, 'key')
ref.Remove()
self.cache._Remove.assert_called_once_with('key')
def testSetDefault(self):
"""Verify SetDefault works when the entry is not in the cache."""
ref = cache.CacheReference(self.cache, 'key')
self.cache._KeyExists.return_value = False
ref.SetDefault('/foo')
self.cache._Insert.assert_called_once_with('key', '/foo')
def testSetDefaultExists(self):
"""Verify SetDefault works when the entry is in the cache."""
ref = cache.CacheReference(self.cache, 'key')
self.cache._KeyExists.return_value = True
ref.SetDefault('/foo')
self.assertFalse(self.cache._Insert.called)
class CacheTestCase(cros_test_lib.MockTempDirTestCase):
"""Tests for any type of Cache object."""
def setUp(self):
self.gs_mock = self.StartPatcher(gs_unittest.GSContextMock())
def _testAssign(self):
"""Verify we can assign a file to the cache and get it back out."""
key = ('foo', 'bar')
data = r'text!\nthere'
path = os.path.join(self.tempdir, 'test-file')
osutils.WriteFile(path, data)
with self.cache.Lookup(key) as ref:
self.assertFalse(ref.Exists())
ref.Assign(path)
self.assertTrue(ref.Exists())
self.assertEqual(osutils.ReadFile(ref.path), data)
with self.cache.Lookup(key) as ref:
self.assertTrue(ref.Exists())
self.assertEqual(osutils.ReadFile(ref.path), data)
def _testAssignData(self):
"""Verify we can assign data to the cache and get it back out."""
key = ('foo', 'bar')
data = r'text!\nthere'
with self.cache.Lookup(key) as ref:
self.assertFalse(ref.Exists())
ref.AssignText(data)
self.assertTrue(ref.Exists())
self.assertEqual(osutils.ReadFile(ref.path), data)
with self.cache.Lookup(key) as ref:
self.assertTrue(ref.Exists())
self.assertEqual(osutils.ReadFile(ref.path), data)
def _testRemove(self):
"""Verify we can remove entries from the cache."""
key = ('foo', 'bar')
data = r'text!\nthere'
with self.cache.Lookup(key) as ref:
self.assertFalse(ref.Exists())
ref.AssignText(data)
self.assertTrue(ref.Exists())
ref.Remove()
self.assertFalse(ref.Exists())
class DiskCacheTest(CacheTestCase):
"""Tests for DiskCache."""
def setUp(self):
self.cache = cache.DiskCache(self.tempdir)
testAssign = CacheTestCase._testAssign
testAssignData = CacheTestCase._testAssignData
testRemove = CacheTestCase._testRemove
def testListKeys(self):
"""Verifies that ListKeys() returns any items present in the cache."""
osutils.Touch(os.path.join(self.tempdir, 'file1'))
cache.CacheReference(self.cache, ('key1',)).Assign(
os.path.join(self.tempdir, 'file1'))
osutils.Touch(os.path.join(self.tempdir, 'file2'))
cache.CacheReference(self.cache, ('key2',)).Assign(
os.path.join(self.tempdir, 'file2'))
keys = self.cache.ListKeys()
self.assertEqual(len(keys), 2)
self.assertIn(('key1',), keys)
self.assertIn(('key2',), keys)
def testDeleteStale(self):
"""Ensures that DeleteStale removes a sufficiently old item in the cache."""
osutils.Touch(os.path.join(self.tempdir, 'file1'))
cache_ref = cache.CacheReference(self.cache, ('key1',))
cache_ref.Assign(os.path.join(self.tempdir, 'file1'))
now = datetime.datetime.now()
# 'Now' will be 10 days in the future, but max_age is 20 days. So no items
# should be deleted.
ten_days_ahead = now + datetime.timedelta(days=10)
with mock.patch('chromite.lib.cache.datetime') as mock_datetime:
mock_datetime.datetime.now.return_value = ten_days_ahead
mock_datetime.datetime.fromtimestamp.side_effect = (
datetime.datetime.fromtimestamp)
mock_datetime.timedelta = datetime.timedelta
self.cache.DeleteStale(datetime.timedelta(days=20))
self.assertTrue(cache_ref.Exists())
# Running it again 30 days in the future should delete everything.
thirty_days_ahead = now + datetime.timedelta(days=30)
with mock.patch('chromite.lib.cache.datetime') as mock_datetime:
mock_datetime.datetime.now.return_value = thirty_days_ahead
mock_datetime.datetime.fromtimestamp.side_effect = (
datetime.datetime.fromtimestamp)
mock_datetime.timedelta = datetime.timedelta
self.cache.DeleteStale(datetime.timedelta(days=20))
self.assertFalse(cache_ref.Exists())
class RemoteCacheTest(CacheTestCase):
"""Tests for RemoteCache."""
def setUp(self):
self.cache = cache.RemoteCache(self.tempdir)
testAssign = CacheTestCase._testAssign
testAssignData = CacheTestCase._testAssignData
testRemove = CacheTestCase._testRemove
def testFetchFile(self):
"""Verify we handle file:// URLs."""
key = ('file', 'foo')
data = 'daaaaata'
path = os.path.join(self.tempdir, 'test-file')
url = 'file://%s' % path
osutils.WriteFile(path, data)
with self.cache.Lookup(key) as ref:
self.assertFalse(ref.Exists())
ref.Assign(url)
self.assertTrue(ref.Exists())
self.assertEqual(osutils.ReadFile(ref.path), data)
def testFetchNonGs(self):
"""Verify we fetch remote URLs and save the result."""
def _Fetch(*args, **_kwargs):
# Probably shouldn't assume this ordering, but best way for now.
cmd = args[0]
local_path = cmd[-1]
osutils.Touch(local_path)
self.PatchObject(retry_util, 'RunCurl', side_effect=_Fetch)
schemes = ('ftp', 'http', 'https')
for scheme in schemes:
key = (scheme, 'foo')
url = '%s://some.site.localdomain/file_go_boom' % scheme
with self.cache.Lookup(key) as ref:
self.assertFalse(ref.Exists())
ref.Assign(url)
self.assertTrue(ref.Exists())
def testFetchGs(self):
"""Verify we fetch from Google Storage and save the result."""
# pylint: disable=unused-argument
def _Fetch(_ctx, cmd, capture_output):
# Touch file we tried to copy too.
osutils.Touch(cmd[-1])
self.gs_mock.AddCmdResult(
['cp', '-v', '--', partial_mock.Ignore(), partial_mock.Ignore()],
side_effect=_Fetch)
key = ('gs',)
url = 'gs://some.site.localdomain/file_go_boom'
with self.cache.Lookup(key) as ref:
self.assertFalse(ref.Exists())
ref.Assign(url)
self.assertTrue(ref.Exists())
class TarballCacheTest(CacheTestCase):
"""Tests for TarballCache."""
def setUp(self):
self.cache = cache.RemoteCache(self.tempdir)
testAssign = CacheTestCase._testAssign
testAssignData = CacheTestCase._testAssignData
testRemove = CacheTestCase._testRemove
class UntarTest(cros_test_lib.RunCommandTestCase):
"""Tests cache.Untar()."""
@mock.patch('chromite.lib.cros_build_lib.CompressionExtToType')
def testNoneCompression(self, mock_compression_type):
"""Tests Untar with an uncompressed tarball."""
mock_compression_type.return_value = cros_build_lib.COMP_NONE
cache.Untar('/some/tarball.tar.gz', '/')
self.assertCommandContains(['tar', '-xpf', '/some/tarball.tar.gz'])
@mock.patch('chromite.lib.cros_build_lib.CompressionExtToType')
@mock.patch('chromite.lib.cros_build_lib.FindCompressor')
def testCompression(self, mock_find_compressor, mock_compression_type):
"""Tests Untar with a compressed tarball."""
mock_compression_type.return_value = 'some-compression'
mock_find_compressor.return_value = '/bin/custom/xz'
cache.Untar('/some/tarball.tar.xz', '/')
self.assertCommandContains(
['tar', '-I', '/bin/custom/xz', '-xpf', '/some/tarball.tar.xz'])
@mock.patch('chromite.lib.cros_build_lib.CompressionExtToType')
@mock.patch('chromite.lib.cros_build_lib.FindCompressor')
def testPbzip2Compression(self, mock_find_compressor, mock_compression_type):
"""Tests decompressing a tarball using pbzip2."""
mock_compression_type.return_value = 'some-compression'
mock_find_compressor.return_value = '/bin/custom/pbzip2'
cache.Untar('/some/tarball.tbz2', '/')
self.assertCommandContains(
['tar', '-I', '/bin/custom/pbzip2 --ignore-trailing-garbage=1',
'-xpf', '/some/tarball.tbz2'])
| StarcoderdataPython |
4831581 | #!/usr/bin/env python
# coding=utf-8
# ====================================================
# File Name : pg_degw.py
# Creation Date : 05-09-2018
# Created By : <NAME>
# Contact : <EMAIL>
# ====================================================
from __future__ import print_function, absolute_import
import sys
import os
from argparse import ArgumentParser
import numpy as np
import matplotlib.pyplot as plt
def __datafile_name(mainname, fnpre=None, fnsuf=None):
'''
Generate the filename with specific suffix and prefix of .dat file
Parameters
----------
mainname : str
fnsuf : str
fnpre : str
Returns
-------
str : final name of the .dat file
Examples
--------
>>>
'''
DataFile = mainname
if fnpre is not None:
DataFile = fnpre + '_' + DataFile
if fnsuf is not None:
DataFile = DataFile + '_' + fnsuf
DataFile = DataFile + '.dat'
return DataFile
# ==============================
def read_eqpeV(eqpeV_file, use_EKS_x, col=-4):
'''
Read eqpeV_file to get quasi-particle correction
Parameters
----------
eqpeV_file : str
filename to extract the degw data
use_EKS_x : bool
flag for using Kohn-Sham energy level as x-axis
col : int
this param indicates the location of y data,
default degw is in the 4 column to the last
Returns
-------
int : number of valence bands
list :
list :
Examples
--------
>>>
'''
degw_data = []
x_data = []
k_points = []
vb = 0
with open(eqpeV_file, 'r') as h_eqp:
eqp_lines = h_eqp.readlines()
bandl, bandh = eqp_lines[4].split()[-2:]
bandl = int(bandl)
bandh = int(bandh)
nk = int(eqp_lines[3].split()[-1])
nspin = int(eqp_lines[2].split()[-1])
bandgw = bandh - bandl + 1
for ik in range(nk):
degw_data.append([])
x_data.append([])
kline = eqp_lines[10 + ik * (bandgw + 2)]
kint = [int(kline[25 + i * 4:29 + i * 4]) for i in range(3)]
kdiv = int(kline[44:48])
k_points.append([float(x)/kdiv for x in kint])
datalines = eqp_lines[12 + ik * (bandgw + 2):12 + ik * (bandgw + 2) + bandgw]
for line in datalines:
degw_data[-1].append(float(line.split()[col]))
EKS = line.split()[2]
iband = int(line.split()[1])
# automatic find the index of top valence band
# currently only for semiconductor and insulator,
# since all kpoints have the same top valence band index
if EKS == "0.000" and iband > vb:
vb = iband
if use_EKS_x:
x_data[-1].append(float(EKS))
else:
x_data[-1].append(float(iband))
# arrange the DEGWs of each band in corresponding list, i.e. transpose the data
degw_data = np.transpose(np.array(degw_data))
x_data = np.transpose(np.array(x_data))
nvb = vb - bandl + 1
return nvb, x_data, degw_data, k_points
# ==============================
def __export_data(x_data, degw_data, nvb, data_index, filename, fnpre, fnsuf):
'''
Export the degw data
Parameters
----------
x_data : ndarray
degw_data : ndarray
nvb : int
data_index : int
filename : str
fnpre : str
fnsuf : str
Returns
-------
None
Examples
--------
>>>
'''
# split data into valence and conduction part for coloring
VB_data = [[], []]
CB_data = [[], []]
for i in range(len(x_data)):
if i < nvb:
data_region = VB_data
else:
data_region = CB_data
for x, degw in zip(x_data[i], degw_data[i]):
data_region[0].append(x)
data_region[1].append(degw)
bandtypes = {"VB": VB_data, "CB": CB_data}
for btype in bandtypes.iterkeys():
OutFile = __datafile_name("degw_%s_%02d" % (btype, data_index), fnpre, fnsuf)
with open(OutFile, 'w') as h_out:
h_out.write("#data from %s\n" % os.path.abspath(filename))
h_out.write("#x-axis could be band index or KS energy level\n")
h_out.write("#x degw(eV)\n")
for x, degw in zip(bandtypes[btype][0], bandtypes[btype][1]):
h_out.write("%7.3f %9.3f\n" % (x, degw))
# ==============================
def __plot_degw(axs, use_EKS_x, f_compare, fnpre, fnsuf):
'''
Plot the degw with matplotlib. In maximum 2 sets of data will be plot
Parameters
----------
axs : matplotlib axis object
use_EKS_x : bool
f_compare : bool
fnpre : str
fnsuf : str
Returns
-------
None
Examples
--------
>>>
'''
if use_EKS_x:
axs.set_xlabel("$\epsilon_{KS}$ (eV)", fontsize=14)
else:
axs.set_xlabel("Band index", fontsize=14)
axs.set_ylabel("$\Delta\epsilon$ (eV)", fontsize=14)
if f_compare:
len_datafile = 2
else:
len_datafile = 1
bandtypes = {"VB": 'r', "CB": 'b'}
# check the range of x and y in data
xmins = []
xmaxs = []
ymins = []
ymaxs = []
for i in range(len_datafile):
data_index = i + 1
for btype in bandtypes.iterkeys():
x = []
y = []
DataFile = __datafile_name("degw_%s_%02d" % (btype, data_index), fnpre, fnsuf)
with open(DataFile, 'r') as h_in:
datalines = h_in.readlines()[3:]
for line in datalines:
x.append(float(line.split()[0]))
y.append(float(line.split()[1]))
xmins.append(min(x))
xmaxs.append(max(x))
ymins.append(min(y))
ymaxs.append(max(y))
if i:
axs.plot(x, y, 'o'+bandtypes[btype], markersize=10)
else:
axs.plot(x, y, 'o'+bandtypes[btype], markersize=10, mfc="none")
# for legend only
if i:
axs.plot([1000], [100], 'ok', markersize=10, label='data 2')
else:
axs.plot([1000], [100], 'ok', markersize=10, mfc="none", label='data 1')
# plot zero
ngrid = 200
axs.set_xlim([min(xmins) - 2, max(xmaxs) + 2])
axs.set_ylim([min(ymins) - 0.5, max(ymaxs) + 0.5])
zero_x = np.linspace(min(xmins ) -2, max(xmaxs) + 2, ngrid)
zero_y = np.zeros(ngrid)
axs.plot(zero_x, zero_y, linestyle="dashed", color="black")
axs.legend()
# ==============================
def __Main(ArgList):
description = '''Extract the self-energy correction ($\Delta\epsilon$,`degw`) \
from GAP calculation (case.eqpeV_GW/GW0)'''
parser = ArgumentParser(description=description)
parser.add_argument(dest="filenames", nargs='+', \
help="eqpeV file names. No more than 2 for comparison")
parser.add_argument("-e", dest="f_EKS_x", action="store_true", \
help="Flag to use KS energy level as x-axis")
parser.add_argument("-p", dest="f_plot", action="store_true", \
help="Flag to plot the first two data")
parser.add_argument("--suf", dest="fnsuf", default=None, \
help="Suffix to output files")
parser.add_argument("--pre", dest="fnpre", default=None, \
help="Prefix to output files")
# initialize options as 'opts'
opts = parser.parse_args()
if opts.f_plot:
fig, axs = plt.subplots(1, 1, figsize=(6, 6))
for i in range(len(opts.filenames)):
filename = opts.filenames[i]
nvb, x_data, degw_data, k_points = read_eqpeV(filename, opts.f_EKS_x)
__export_data(x_data, degw_data, nvb, i+1, filename, opts.fnpre, opts.fnsuf)
# print VB and CD correction
print(opts.filenames[i])
print("%3s%-18s%12s%12s" % ("ik", " kvec ", "DEGW_CB", "DEGWVB_VB"))
for ik in range(len(k_points)):
print("%3d%6.3f%6.3f%6.3f%12.6f%12.6f" % (ik+1, \
k_points[ik][0], k_points[ik][1], k_points[ik][2], \
degw_data[nvb][ik] , degw_data[nvb-1][ik]))
if len(opts.filenames) > 1:
f_compare = True
else:
f_compare = False
if opts.f_plot:
__plot_degw(axs, opts.f_EKS_x, f_compare, opts.fnpre, opts.fnsuf)
plt.show()
# ==============================
if __name__ == "__main__":
__Main(sys.argv)
| StarcoderdataPython |
1743396 | """Test Dynalite __init__."""
import homeassistant.components.dynalite.const as dynalite
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT, CONF_ROOM
from homeassistant.setup import async_setup_component
from tests.async_mock import call, patch
from tests.common import MockConfigEntry
async def test_empty_config(hass):
"""Test with an empty config."""
assert await async_setup_component(hass, dynalite.DOMAIN, {}) is True
assert len(hass.config_entries.flow.async_progress()) == 0
assert len(hass.config_entries.async_entries(dynalite.DOMAIN)) == 0
async def test_async_setup(hass):
"""Test a successful setup with all of the different options."""
with patch(
"homeassistant.components.dynalite.bridge.DynaliteDevices.async_setup",
return_value=True,
):
assert await async_setup_component(
hass,
dynalite.DOMAIN,
{
dynalite.DOMAIN: {
dynalite.CONF_BRIDGES: [
{
CONF_HOST: "1.2.3.4",
CONF_PORT: 1234,
dynalite.CONF_AUTO_DISCOVER: True,
dynalite.CONF_POLL_TIMER: 5.5,
dynalite.CONF_AREA: {
"1": {
CONF_NAME: "Name1",
dynalite.CONF_CHANNEL: {"4": {}},
dynalite.CONF_PRESET: {"7": {}},
dynalite.CONF_NO_DEFAULT: True,
},
"2": {CONF_NAME: "Name2"},
"3": {
CONF_NAME: "Name3",
dynalite.CONF_TEMPLATE: CONF_ROOM,
},
"4": {
CONF_NAME: "Name4",
dynalite.CONF_TEMPLATE: dynalite.CONF_TIME_COVER,
},
},
dynalite.CONF_DEFAULT: {dynalite.CONF_FADE: 2.3},
dynalite.CONF_ACTIVE: dynalite.ACTIVE_INIT,
dynalite.CONF_PRESET: {
"5": {CONF_NAME: "pres5", dynalite.CONF_FADE: 4.5}
},
dynalite.CONF_TEMPLATE: {
CONF_ROOM: {
dynalite.CONF_ROOM_ON: 6,
dynalite.CONF_ROOM_OFF: 7,
},
dynalite.CONF_TIME_COVER: {
dynalite.CONF_OPEN_PRESET: 8,
dynalite.CONF_CLOSE_PRESET: 9,
dynalite.CONF_STOP_PRESET: 10,
dynalite.CONF_CHANNEL_COVER: 3,
dynalite.CONF_DURATION: 2.2,
dynalite.CONF_TILT_TIME: 3.3,
dynalite.CONF_DEVICE_CLASS: "awning",
},
},
}
]
}
},
)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(dynalite.DOMAIN)) == 1
async def test_async_setup_bad_config1(hass):
"""Test a successful with bad config on templates."""
with patch(
"homeassistant.components.dynalite.bridge.DynaliteDevices.async_setup",
return_value=True,
):
assert not await async_setup_component(
hass,
dynalite.DOMAIN,
{
dynalite.DOMAIN: {
dynalite.CONF_BRIDGES: [
{
CONF_HOST: "1.2.3.4",
dynalite.CONF_AREA: {
"1": {
dynalite.CONF_TEMPLATE: dynalite.CONF_TIME_COVER,
CONF_NAME: "Name",
dynalite.CONF_ROOM_ON: 7,
}
},
}
]
}
},
)
await hass.async_block_till_done()
async def test_async_setup_bad_config2(hass):
"""Test a successful with bad config on numbers."""
host = "1.2.3.4"
with patch(
"homeassistant.components.dynalite.bridge.DynaliteDevices.async_setup",
return_value=True,
):
assert not await async_setup_component(
hass,
dynalite.DOMAIN,
{
dynalite.DOMAIN: {
dynalite.CONF_BRIDGES: [
{
CONF_HOST: host,
dynalite.CONF_AREA: {"WRONG": {CONF_NAME: "Name"}},
}
]
}
},
)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(dynalite.DOMAIN)) == 0
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
host = "1.2.3.4"
entry = MockConfigEntry(domain=dynalite.DOMAIN, data={CONF_HOST: host})
entry.add_to_hass(hass)
with patch(
"homeassistant.components.dynalite.bridge.DynaliteDevices.async_setup",
return_value=True,
):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(dynalite.DOMAIN)) == 1
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as mock_unload:
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert mock_unload.call_count == len(dynalite.ENTITY_PLATFORMS)
expected_calls = [
call(entry, platform) for platform in dynalite.ENTITY_PLATFORMS
]
for cur_call in mock_unload.mock_calls:
assert cur_call in expected_calls
| StarcoderdataPython |
3214589 | # coding=utf-8
"""
翻转链表中第m个节点到第n个节点的部分
这个是翻转链表的升级版
还是困惑了我2个小时的时间
再一次强化了链表不关心位置,只关心指针指向
大体思路:
找到 翻转的前一位 prev
翻转需要翻转的那一部分,同时找到翻转前最后一位指向元素 以及翻转前的第一位 (因为成为翻转后的最后一位)
然后 前一位 prv 指向翻转后
然后链接后的最后一位 指向翻转前指向的最后那位
m =3 , n = 5
input: 0->1-2->3->4->5->6->7->null
可见翻转的部分为 2->3->5
prev = 1
bf = 6
output: 0->1->4->3->2->5->6->7->null
"""
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: ListNode head is the head of the linked list
@param m: An integer
@param n: An integer
@return: The head of the reversed ListNode
"""
def reverseBetween(self, head, m, n):
# write your code here
if m == n:
return head
btn = ListNode(0) # 定义一个新的链表
btn.next = head # 为新的链表下一元素指向为head
prev = btn # 定义需要翻转链表的前一位元素
for _ in range(m - 1):
prev = prev.next # 找到该元素
cur, bf = None, None # 定义翻转后的链表 以及 需要翻转链表指向的后一位
start = prev.next # 翻转链表的第一位
bf_prev = start # 也是最后连接需要翻转链表指向的后一位的前一位
for _ in range(n -m + 1):
temp = start.next
start.next = cur
if _ == (n - m): # 也就是最后一次迭代时候,找到需要翻转链表指向的后一位
bf = temp
cur, start = start, temp
bf_prev.next = bf # 重新指向
prev.next = cur # 重新指向
return btn.next
| StarcoderdataPython |
3272324 | <filename>main.py
from json import dumps
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
# Api routes
from routes.Main import Main
from routes.v1.Index import Index
from routes.v1.Ship import Ship
app = Flask(__name__)
api = Api(app)
api.add_resource(Main, "/")
api.add_resource(Index, "/v1")
api.add_resource(Ship, "/v1/ship")
if __name__ == "__main__":
app.run(port="5003")
| StarcoderdataPython |
4817275 | <reponame>kolea2/synthtool
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
from typing import List
import os
from pathlib import Path
import click
import jinja2
@click.command()
@click.option(
"--folder", help="Path to folder of templates",
)
@click.option("--file", help="Path to template file")
@click.option(
"--data",
help="Path to JSON file with template values",
multiple=True,
required=True,
)
@click.option(
"--output", help="Path to output", default=".",
)
def main(folder: str, file: str, data: List[str], output: str):
"""Generate templates"""
variables = {}
for data_file in data:
with open(data_file, "r") as fp:
variables = {**variables, **json.load(fp)}
if folder is not None:
location = Path(folder)
filenames = glob.glob(f"{folder}/**/*.j2", recursive=True)
elif file is not None:
location = Path(file).parent
filenames = [f"{file}.j2"]
else:
raise Exception("Need to specify either folder or file")
output_path = Path(output)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(str(location)),
autoescape=False,
keep_trailing_newline=True,
)
for filename in filenames:
template_name = Path(filename).relative_to(location)
template = env.get_template(str(template_name))
output = template.stream(**variables)
destination = output_path / os.path.splitext(template_name)[0]
destination.parent.mkdir(parents=True, exist_ok=True)
with destination.open("w") as fp:
output.dump(fp)
# Copy file mode over
source_path = Path(template.filename)
mode = source_path.stat().st_mode
destination.chmod(mode)
if __name__ == "__main__":
main()
| StarcoderdataPython |
49439 | <filename>desktop/core/ext-py/greenlet-0.3.1/tests/test_weakref.py
import gc
import greenlet
import weakref
import unittest
class WeakRefTests(unittest.TestCase):
def test_dead_weakref(self):
def _dead_greenlet():
g = greenlet.greenlet(lambda:None)
g.switch()
return g
o = weakref.ref(_dead_greenlet())
gc.collect()
self.assertEquals(o(), None)
def test_inactive_weakref(self):
o = weakref.ref(greenlet.greenlet())
gc.collect()
self.assertEquals(o(), None)
| StarcoderdataPython |
3276868 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
class Migration(DataMigration):
languages = (
('English', 'en'),
('Russian', 'ru'),
)
def forwards(self, orm):
for name, iso639_1 in self.languages:
try:
orm['videos.Language'].objects.get(name=name)
except ObjectDoesNotExist:
orm['videos.Language'].objects.create(name=name, iso639_1=iso639_1)
def backwards(self, orm):
names = map(lambda lang: lang[0], self.languages)
orm['videos.Language'].objects.filter(name__in=names).delete()
models = {
'videos.category': {
'Meta': {'object_name': 'Category', 'ordering': "['title']"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True', 'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'blank': 'True', 'max_length': '200', 'default': "''"}),
'whiteboard': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '255', 'default': "''"})
},
'videos.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso639_1': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'videos.relatedurl': {
'Meta': {'object_name': 'RelatedUrl'},
'description': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '255', 'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']", 'related_name': "'related_urls'"})
},
'videos.speaker': {
'Meta': {'object_name': 'Speaker', 'ordering': "['name']"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'videos.tag': {
'Meta': {'object_name': 'Tag', 'ordering': "['tag']"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'videos.video': {
'Meta': {'object_name': 'Video', 'ordering': "['-recorded', 'title']"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Category']", 'related_name': "'videos'"}),
'copyright_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True', 'default': "''"}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'embed': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Language']", 'null': 'True'}),
'quality_notes': ('django.db.models.fields.TextField', [], {'blank': 'True', 'default': "''"}),
'recorded': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'source_url': ('django.db.models.fields.URLField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Speaker']", 'blank': 'True', 'symmetrical': 'False', 'related_name': "'videos'"}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True', 'default': "''"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Tag']", 'blank': 'True', 'symmetrical': 'False', 'related_name': "'videos'"}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'video_flv_download_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'video_flv_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'video_flv_url': ('django.db.models.fields.URLField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'video_mp4_download_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'video_mp4_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'video_mp4_url': ('django.db.models.fields.URLField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'video_ogv_download_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'video_ogv_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'video_ogv_url': ('django.db.models.fields.URLField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'video_webm_download_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'video_webm_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'video_webm_url': ('django.db.models.fields.URLField', [], {'null': 'True', 'blank': 'True', 'max_length': '255'}),
'whiteboard': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '255', 'default': "''"})
},
'videos.videourlstatus': {
'Meta': {'object_name': 'VideoUrlStatus'},
'check_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_code': ('django.db.models.fields.IntegerField', [], {}),
'status_message': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
}
}
complete_apps = ['videos', 'sergey']
symmetrical = True
depends_on = (
('videos', '0004_auto__add_language__add_field_video_language'),
)
| StarcoderdataPython |
3244061 | from tableauscraper import TableauScraper as TS
url = "https://public.tableau.com/views/Covid-19ImpactDashboard/Covid-19Impact?:embed=y&:showVizHome=no&:host_url=https%3A%2F%2Fpublic.tableau.com%2F&:embed_code_version=3&:tabs=no&:toolbar=yes&:animate_transition=yes&:display_static_image=no&:display_spinner=no&:display_overlay=yes&:display_count=yes&:language=en&publish=yes&:loadOrderID=0"
ts = TS()
ts.loads(url)
workbook = ts.getWorkbook()
# I saved all of the raw data directly from the Tableau dashboard.
for t in workbook.worksheets:
name = t.name
name = name.lower().replace(" ", "_")
data = t.data
data.to_csv(f"~/Desktop/data-512-a4-data/raw_data/shelter_animals_raw/{name}.csv")
| StarcoderdataPython |
3359223 | <filename>Tutorial_Kivy_HashLDash/22kivy.py
# 19 - Python Kivy - Criando um Popup
# https://www.youtube.com/watch?v=w0BwoGl18Fk&list=PLsMpSZTgkF5AV1FmALMgW8W-TvrfR3nrs&index=19
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.core.window import Window
from kivy.uix.label import Label
from kivy.uix.behaviors.button import ButtonBehavior
from kivy.graphics import Color, Ellipse, Rectangle
from kivy.utils import get_color_from_hex
from kivy.properties import ListProperty
from kivy.uix.popup import Popup
from kivy.uix.image import Image
from kivy.lang import Builder
Builder.load_file('22kivy.kv')
class Gerenciador_de_telas(ScreenManager):
pass
class Menu(Screen):
# Esta função será o pop-up perguntando se queremos sair do aplicativo
def confirmar_saida(self, *args):
# Boxlayout do popup
box_popup_sair = BoxLayout(
orientation='vertical', padding=10, spacing=10)
# Boxlayout dos botões, obs. não declaramos orientation, pois o padrão
# é horizontal
box_botoes_popup_sair = BoxLayout(padding=10, spacing=10)
# Declarando o Popup e o content é o Boxlayout que ele faz parte
popup_sair = Popup(title='Deseja mesmo sair?',
content=box_popup_sair,
size_hint=(None, None),
size=(300, 180))
# Botão de sim, obs que estamos usando o botão dinamico
# App.get_running_app().stop para sair do app
botao_sim_popup_sair = Botao_dinamico(
text='Sim', on_release=App.get_running_app().stop)
# botão de não, obs que estamos usando o botão dinamico
# popup_sair.dismiss para sair do popup
botao_nao_popup_sair = Botao_dinamico(
text='Não', on_release=popup_sair.dismiss)
# Adicionando os botões em seu Boxlayout
box_botoes_popup_sair.add_widget(botao_sim_popup_sair)
box_botoes_popup_sair.add_widget(botao_nao_popup_sair)
# Criando o ícone com Image
atenção_icon = Image(source='Tutoriais_Kivy_KivyMD/Icones/atencao.png')
# Adicionando o ícone no layout geral
box_popup_sair.add_widget(atenção_icon)
# Adicionando o layout dos botões no layout geral
box_popup_sair.add_widget(box_botoes_popup_sair)
# Executando o popup
popup_sair.open()
class Botao_dinamico(ButtonBehavior, Label):
cor_dinamica_release = ListProperty(get_color_from_hex('#23a3bc'))
cor_dinamica_press = ListProperty(get_color_from_hex('#7FFF00'))
def __init__(self, **kwargs):
super(Botao_dinamico, self).__init__(**kwargs)
self.atualizar_canvas()
def on_pos(self, *args):
self.atualizar_canvas()
def on_size(self, *args):
self.atualizar_canvas()
def on_press(self, *args):
self.cor_dinamica_release = self.cor_dinamica_press
self.cor_dinamica_press = self.cor_dinamica_release
self.atualizar_canvas()
def on_release(self, *args):
self.cor_dinamica_release = self.cor_dinamica_press
self.cor_dinamica_press = self.cor_dinamica_release
self.atualizar_canvas()
def atualizar_canvas(self, *args):
self.canvas.before.clear()
with self.canvas.before:
Color(rgba=self.cor_dinamica_release)
Ellipse(size=(self.height, self.height), pos=(self.pos))
Ellipse(size=(self.height, self.height), pos=(
self.x + self.width - self.height, self.y))
Rectangle(size=(self.width - self.height, self.height),
pos=(self.x + self.height / 2.0, self.y))
class Widget_geral(Screen):
def __init__(self, tarefas=[], **kwargs):
super().__init__(**kwargs)
for tarefa in tarefas:
self.ids.scroll_para_colocar_as_tarefas.add_widget(
Tarefa_mais_botao_remover(text=tarefa))
def on_pre_enter(self):
Window.bind(on_keyboard=self.voltar)
def voltar(self, window, key, *args):
if key == 27:
App.get_running_app().root.current = 'menu'
return True
def on_pre_leave(self):
Window.unbind(on_keyboard=self.voltar)
def adicionar_nova_tarefa(self):
nova_tarefa = self.ids.texto_da_tarefa.text
self.ids.scroll_para_colocar_as_tarefas.add_widget(
Tarefa_mais_botao_remover(text=nova_tarefa))
self.ids.texto_da_tarefa.text = ''
class Tarefa_mais_botao_remover(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.descricao_tarefa.text = text
class HashLDash_Tutorial_App(App):
def build(self):
return Gerenciador_de_telas()
if __name__ == '__main__':
HashLDash_Tutorial_App().run()
| StarcoderdataPython |
3282141 | <filename>Tools/python37/Lib/site-packages/Crypto/Cipher/ChaCha20_Poly1305.py
# ===================================================================
#
# Copyright (c) 2018, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
from binascii import unhexlify
from Crypto.Cipher import ChaCha20
from Crypto.Hash import Poly1305, BLAKE2s
from Crypto.Random import get_random_bytes
from Crypto.Util.number import long_to_bytes
from Crypto.Util.py3compat import _copy_bytes, bord
from Crypto.Util._raw_api import is_buffer
def _enum(**enums):
return type('Enum', (), enums)
_CipherStatus = _enum(PROCESSING_AUTH_DATA=1,
PROCESSING_CIPHERTEXT=2,
PROCESSING_DONE=3)
class ChaCha20Poly1305Cipher(object):
"""ChaCha20-Poly1305 cipher object.
Do not create it directly. Use :py:func:`new` instead.
:var nonce: The nonce with length 8 or 12 bytes
:vartype nonce: byte string
"""
def __init__(self, key, nonce):
"""Initialize a ChaCha20-Poly1305 AEAD cipher object
See also `new()` at the module level."""
self.nonce = _copy_bytes(None, None, nonce)
self._next = (self.update, self.encrypt, self.decrypt, self.digest,
self.verify)
self._authenticator = Poly1305.new(key=key, nonce=nonce, cipher=ChaCha20)
self._cipher = ChaCha20.new(key=key, nonce=nonce)
self._cipher.seek(64) # Block counter starts at 1
self._len_aad = 0
self._len_ct = 0
self._mac_tag = None
self._status = _CipherStatus.PROCESSING_AUTH_DATA
def update(self, data):
"""Protect the associated data.
Associated data (also known as *additional authenticated data* - AAD)
is the piece of the message that must stay in the clear, while
still allowing the receiver to verify its integrity.
An example is packet headers.
The associated data (possibly split into multiple segments) is
fed into :meth:`update` before any call to :meth:`decrypt` or :meth:`encrypt`.
If there is no associated data, :meth:`update` is not called.
:param bytes/bytearray/memoryview assoc_data:
A piece of associated data. There are no restrictions on its size.
"""
if self.update not in self._next:
raise TypeError("update() method cannot be called")
self._len_aad += len(data)
self._authenticator.update(data)
def _pad_aad(self):
assert(self._status == _CipherStatus.PROCESSING_AUTH_DATA)
if self._len_aad & 0x0F:
self._authenticator.update(b'\x00' * (16 - (self._len_aad & 0x0F)))
self._status = _CipherStatus.PROCESSING_CIPHERTEXT
def encrypt(self, plaintext, output=None):
"""Encrypt a piece of data.
Args:
plaintext(bytes/bytearray/memoryview): The data to encrypt, of any size.
Keyword Args:
output(bytes/bytearray/memoryview): The location where the ciphertext
is written to. If ``None``, the ciphertext is returned.
Returns:
If ``output`` is ``None``, the ciphertext is returned as ``bytes``.
Otherwise, ``None``.
"""
if self.encrypt not in self._next:
raise TypeError("encrypt() method cannot be called")
if self._status == _CipherStatus.PROCESSING_AUTH_DATA:
self._pad_aad()
self._next = (self.encrypt, self.digest)
result = self._cipher.encrypt(plaintext, output=output)
self._len_ct += len(plaintext)
if output is None:
self._authenticator.update(result)
else:
self._authenticator.update(output)
return result
def decrypt(self, ciphertext, output=None):
"""Decrypt a piece of data.
Args:
ciphertext(bytes/bytearray/memoryview): The data to decrypt, of any size.
Keyword Args:
output(bytes/bytearray/memoryview): The location where the plaintext
is written to. If ``None``, the plaintext is returned.
Returns:
If ``output`` is ``None``, the plaintext is returned as ``bytes``.
Otherwise, ``None``.
"""
if self.decrypt not in self._next:
raise TypeError("decrypt() method cannot be called")
if self._status == _CipherStatus.PROCESSING_AUTH_DATA:
self._pad_aad()
self._next = (self.decrypt, self.verify)
self._len_ct += len(ciphertext)
self._authenticator.update(ciphertext)
return self._cipher.decrypt(ciphertext, output=output)
def _compute_mac(self):
"""Finalize the cipher (if not done already) and return the MAC."""
if self._mac_tag:
assert(self._status == _CipherStatus.PROCESSING_DONE)
return self._mac_tag
assert(self._status != _CipherStatus.PROCESSING_DONE)
if self._status == _CipherStatus.PROCESSING_AUTH_DATA:
self._pad_aad()
if self._len_ct & 0x0F:
self._authenticator.update(b'\x00' * (16 - (self._len_ct & 0x0F)))
self._status = _CipherStatus.PROCESSING_DONE
self._authenticator.update(long_to_bytes(self._len_aad, 8)[::-1])
self._authenticator.update(long_to_bytes(self._len_ct, 8)[::-1])
self._mac_tag = self._authenticator.digest()
return self._mac_tag
def digest(self):
"""Compute the *binary* authentication tag (MAC).
:Return: the MAC tag, as 16 ``bytes``.
"""
if self.digest not in self._next:
raise TypeError("digest() method cannot be called")
self._next = (self.digest,)
return self._compute_mac()
def hexdigest(self):
"""Compute the *printable* authentication tag (MAC).
This method is like :meth:`digest`.
:Return: the MAC tag, as a hexadecimal string.
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def verify(self, received_mac_tag):
"""Validate the *binary* authentication tag (MAC).
The receiver invokes this method at the very end, to
check if the associated data (if any) and the decrypted
messages are valid.
:param bytes/bytearray/memoryview received_mac_tag:
This is the 16-byte *binary* MAC, as received from the sender.
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
if self.verify not in self._next:
raise TypeError("verify() cannot be called"
" when encrypting a message")
self._next = (self.verify,)
secret = get_random_bytes(16)
self._compute_mac()
mac1 = BLAKE2s.new(digest_bits=160, key=secret,
data=self._mac_tag)
mac2 = BLAKE2s.new(digest_bits=160, key=secret,
data=received_mac_tag)
if mac1.digest() != mac2.digest():
raise ValueError("MAC check failed")
def hexverify(self, hex_mac_tag):
"""Validate the *printable* authentication tag (MAC).
This method is like :meth:`verify`.
:param string hex_mac_tag:
This is the *printable* MAC.
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
self.verify(unhexlify(hex_mac_tag))
def encrypt_and_digest(self, plaintext):
"""Perform :meth:`encrypt` and :meth:`digest` in one step.
:param plaintext: The data to encrypt, of any size.
:type plaintext: bytes/bytearray/memoryview
:return: a tuple with two ``bytes`` objects:
- the ciphertext, of equal length as the plaintext
- the 16-byte MAC tag
"""
return self.encrypt(plaintext), self.digest()
def decrypt_and_verify(self, ciphertext, received_mac_tag):
"""Perform :meth:`decrypt` and :meth:`verify` in one step.
:param ciphertext: The piece of data to decrypt.
:type ciphertext: bytes/bytearray/memoryview
:param bytes received_mac_tag:
This is the 16-byte *binary* MAC, as received from the sender.
:return: the decrypted data (as ``bytes``)
:raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
plaintext = self.decrypt(ciphertext)
self.verify(received_mac_tag)
return plaintext
def new(**kwargs):
"""Create a new ChaCha20-Poly1305 AEAD cipher.
:keyword key: The secret key to use. It must be 32 bytes long.
:type key: byte string
:keyword nonce:
A value that must never be reused for any other encryption
done with this key. It must be 8 or 12 bytes long.
If not provided, 12 ``bytes`` will be generated randomly
(you can find them back in the ``nonce`` attribute).
:type nonce: bytes, bytearray, memoryview
:Return: a :class:`Crypto.Cipher.ChaCha20.ChaCha20Poly1305Cipher` object
"""
try:
key = kwargs.pop("key")
except KeyError as e:
raise TypeError("Missing parameter %s" % e)
self._len_ct += len(plaintext)
if len(key) != 32:
raise ValueError("Key must be 32 bytes long")
nonce = kwargs.pop("nonce", None)
if nonce is None:
nonce = get_random_bytes(12)
if len(nonce) not in (8, 12):
raise ValueError("Nonce must be 8 or 12 bytes long")
if not is_buffer(nonce):
raise TypeError("nonce must be bytes, bytearray or memoryview")
if kwargs:
raise TypeError("Unknown parameters: " + str(kwargs))
return ChaCha20Poly1305Cipher(key, nonce)
# Size of a key (in bytes)
key_size = 32
| StarcoderdataPython |
1712002 | <reponame>Ally-s-Lab/miRmedon
import os
def alignment_to_emiRbase(fastq_file, path_to_star, threads, star_ref_dir, path_to_samtools):
params = '''--runThreadN {}
--alignIntronMin 1
--outFilterMultimapNmax 200
--outFilterMatchNmin 12
--outFilterMatchNminOverLread 0.66
--outFilterMismatchNoverLmax 0.08
--seedSearchStartLmax 6
--winAnchorMultimapNmax 2000
--outFilterMultimapScoreRange 0
--outSAMtype BAM Unsorted
--outReadsUnmapped Fastx
--outFilterMismatchNmax 1
--outSAMprimaryFlag AllBestScore
--outWigType None
--outSAMattributes NH AS NM MD'''.format(threads)
os.system('{} --genomeDir {} --readFilesIn {} --outFileNamePrefix {} {}'.format(path_to_star, star_ref_dir, fastq_file, '_', params.replace('\n', ' ')))
os.system('mv _Aligned.out.bam _Aligned.out.FCRC.bam')
os.system('{} view -b -h -F 16 _Aligned.out.FCRC.bam > _Aligned.out.bam'.format(path_to_samtools))
| StarcoderdataPython |
1654586 | <reponame>teddy-owen/Tools<gh_stars>0
from django.contrib import messages
################################################################################
# Exposed
################################################################################
def flash_alert(request,type,text):
'''
(obj,str,str)->void
Creates a bootstrap flash message alert for parameters
'''
if (type=="debug"):
messages.debug(request, text,extra_tags='alert alert-primary')
elif(type=="info"):
messages.info(request, text,extra_tags='alert alert-info')
elif(type=="success"):
messages.success(request, text,extra_tags='alert alert-success')
elif(type=="warning"):
messages.warning(request, text,extra_tags='alert alert-warning')
elif(type=="error"):
messages.error(request, text,extra_tags='alert alert-danger')
return
else:
return
################################################################################
################################################################################
# Hidden
################################################################################ | StarcoderdataPython |
1648019 | ## TODO: test case should cover, n_class from 3 to 256, test ignore index, test speed and memory usage
import random
import numpy as np
import torch
import torch.nn as nn
import torchvision
from label_smooth import LabelSmoothSoftmaxCEV3
torch.manual_seed(15)
random.seed(15)
np.random.seed(15)
torch.backends.cudnn.deterministic = True
class Model(nn.Module):
def __init__(self, n_classes):
super(Model, self).__init__()
net = torchvision.models.resnet18(pretrained=False)
self.conv1 = net.conv1
self.bn1 = net.bn1
self.maxpool = net.maxpool
self.relu = net.relu
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
self.fc = nn.Conv2d(512, n_classes, 3, 1, 1)
def forward(self, x):
feat = self.conv1(x)
feat = self.bn1(feat)
feat = self.relu(feat)
feat = self.maxpool(feat)
feat = self.layer1(feat)
feat = self.layer2(feat)
feat = self.layer3(feat)
feat = self.layer4(feat)
feat = self.fc(feat)
# out = F.interpolate(feat, x.size()[2:], mode='bilinear', align_corners=True)
out = torch.mean(feat, dim=(2, 3))
return out
c = 2
net1 = Model(c)
# net2 = Model()
# net2.load_state_dict(net1.state_dict())
red = 'mean'
# criteria1 = LovaszSoftmaxV1(reduction='sum', ignore_index=255)
# criteria1 = LovaszSoftmaxV3(reduction='sum', ignore_index=255)
criteria1 = LabelSmoothSoftmaxCEV3(reduction='sum', ignore_index=255)
print(criteria1)
net1.cuda()
# net2.cuda()
net1.train()
# net2.train()
criteria1.cuda()
# criteria2.cuda()
# net1 = net1.half()
optim1 = torch.optim.SGD(net1.parameters(), lr=1e-2)
# optim2 = torch.optim.SGD(net2.parameters(), lr=1e-2)
bs, h, w = 2, 1000, 1000
for it in range(1000):
inten = torch.randn(bs, 3, h, w).cuda()#.half()
# lbs = torch.randint(0, c, (bs, h, w)).cuda()
lbs = torch.randint(0, c, (bs, )).cuda()
# lbs[1, 1, 1] = 255
# lbs[0, 3:100, 2:100] = 255
# lbs[1, 4:70, 28:200] = 255
logits1 = net1(inten)
logits1.retain_grad()
loss1 = criteria1(logits1, lbs)
optim1.zero_grad()
loss1.backward()
optim1.step()
with torch.no_grad():
if (it+1) % 50 == 0:
print('iter: {}, ================='.format(it+1))
| StarcoderdataPython |
3335973 | import os
from unittest.mock import patch, call
import pytest
from dev.tasks.pypi import Pypi
@patch('dev.tasks.pypi.os.environ')
@patch('dev.tasks.pypi.run_command')
def test_up(run_command_mock, environ_mock):
environ_mock.get.return_value = 'abc'
Pypi('upload', extra_args=[])
run_command_mock.assert_has_calls([
call('rm -rf dist'),
call('pip install --upgrade twine build'),
call('python -m build'),
call('python -m twine check dist/*'),
call('python -m twine upload dist/*'),
])
@patch('dev.task.error_console.print')
@patch('dev.tasks.pypi.os.environ')
@patch('dev.tasks.pypi.run_command')
def test_up_missing_command(run_command_mock, environ_mock, console_print_mock):
environ_mock.get.return_value = 'abc'
with pytest.raises(SystemExit):
Pypi('abc', extra_args=[])
run_command_mock.assert_not_called()
console_print_mock.assert_called_once_with('Failed to run [b]Pypi[/] task: Unknown argument [b]abc[/]', style='red')
@patch.dict(os.environ, {'TWINE_USERNAME': 'abc'})
@patch('dev.task.error_console.print')
@patch('dev.tasks.pypi.run_command')
def test_up_missing_password(run_command_mock, console_print_mock):
with pytest.raises(SystemExit):
Pypi('upload', extra_args=[])
run_command_mock.assert_not_called()
console_print_mock.assert_called_once_with(
'Failed to run [b]Pypi[/] task: You need to set TWINE_PASSWORD', style='red'
)
@patch.dict(os.environ, {'TWINE_PASSWORD': '<PASSWORD>'})
@patch('dev.task.error_console.print')
@patch('dev.tasks.pypi.run_command')
def test_up_missing_username(run_command_mock, console_print_mock):
with pytest.raises(SystemExit):
Pypi('upload', extra_args=[])
run_command_mock.assert_not_called()
console_print_mock.assert_called_once_with(
'Failed to run [b]Pypi[/] task: You need to set TWINE_USERNAME', style='red'
)
| StarcoderdataPython |
3203248 | # begin20210418181255
import numpy as np
import pyfftwpp
if __name__ == "__main__":
M, N, dim = 7, 8, 2
x = np.array([0.8, -0.9])[None, :]
y = np.array([-1.1, 1.2])[None, :]
# end20210418181255
#begin20210418181632
m = np.arange(0, M)[:, None]
n = np.arange(0, N)[:, None]
φ_pow_m = np.exp(-2 * 1j * np.pi / M * m)
ψ_pow_n = np.exp(-2 * 1j * np.pi / N * n)
x_pow_m = x ** m
y_pow_n = y ** n
x_hat = (1 - x ** M) / (1 - x * φ_pow_m)
y_hat = (1 - y ** N) / (1 - y * ψ_pow_n)
exp = x_hat[:, None, :] * y_hat[None, :, :]
# end20210418181632
# begin20210418181818
in_ = np.empty_like(exp)
act = np.empty_like(exp)
#end20210418181818
# begin20210418182444
factory = pyfftwpp.PlanFactory().set_estimate()
# end20210418182444
# begin20210418182614
plan = factory.create_plan(2, in_, act, -1)
# end20210418182614
print(f"The following plan was created:\n{plan}")
# begin20210418183338
in_[...] = x_pow_m[:, None, :] * y_pow_n[None, :, :]
# end20210418183338
#begin20210418183425
plan.execute()
#end20210418183425
#begin20210418202605
for exp_, act_ in np.nditer([exp, act]):
print(f"expected = {exp_}, actual = {act_}")
#end20210418202605
| StarcoderdataPython |
3288077 | <filename>xpresso/routing/router.py
import sys
import typing
if sys.version_info < (3, 8):
from typing_extensions import Protocol
else:
from typing import Protocol
import starlette.middleware
from starlette.routing import BaseRoute
from starlette.routing import Router as StarletteRouter
from starlette.types import Receive, Scope, Send
from xpresso.dependencies.models import Dependant
from xpresso.responses import Responses
class _ASGIApp(Protocol):
def __call__(
self,
scope: Scope,
receive: Receive,
send: Send,
) -> typing.Awaitable[None]:
...
_MiddlewareIterator = typing.Iterable[
typing.Tuple[typing.Callable[..., _ASGIApp], typing.Mapping[str, typing.Any]]
]
class Router:
routes: typing.Sequence[BaseRoute]
lifespan: typing.Optional[
typing.Callable[..., typing.AsyncContextManager[None]]
] = None
dependencies: typing.Sequence[Dependant]
tags: typing.Sequence[str]
responses: Responses
include_in_schema: bool
_app: _ASGIApp
def __init__(
self,
routes: typing.Sequence[BaseRoute],
*,
middleware: typing.Optional[
typing.Sequence[starlette.middleware.Middleware]
] = None,
lifespan: typing.Optional[
typing.Callable[..., typing.AsyncContextManager[None]]
] = None,
redirect_slashes: bool = True,
default: typing.Optional[_ASGIApp] = None,
dependencies: typing.Optional[typing.Sequence[Dependant]] = None,
tags: typing.Optional[typing.List[str]] = None,
responses: typing.Optional[Responses] = None,
include_in_schema: bool = True,
) -> None:
self.routes = list(routes)
self.lifespan = lifespan
self._router = StarletteRouter(
routes=self.routes,
redirect_slashes=redirect_slashes,
default=default, # type: ignore[arg-type]
lifespan=lifespan, # type: ignore[arg-type]
)
self.dependencies = list(dependencies or [])
self.tags = list(tags or [])
self.responses = dict(responses or {})
self.include_in_schema = include_in_schema
self._app = self._router.__call__
if middleware is not None:
for cls, options in typing.cast(_MiddlewareIterator, reversed(middleware)):
self._app = cls(app=self._app, **options)
async def __call__(
self,
scope: Scope,
receive: Receive,
send: Send,
) -> None:
await self._app(scope, receive, send) # type: ignore[arg-type,call-arg,misc]
| StarcoderdataPython |
1729842 | <reponame>cleiver/codeandtalk.com
#!/usr/bin/env python3
import os, sys, json, datetime
# read all the events
# list the ones that have youtube value which is not - and that does NOT have the video directory.
# list the ones that have no youtube entry or that it is empty
# Only show events that have already finished.
with open(os.path.join('html', 'cat.json')) as fh:
cat = json.load(fh)
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d')
no_videos = ''
no_youtube = ''
on_vimeo = ''
for e in sorted(cat['events'].values(), key=lambda e: e['event_start'], reverse=True):
#exit(e)
if e.get('videos_url'):
continue
youtube = e.get('youtube')
vimeo = e.get('vimeo')
if e['event_end'] > now_str:
if youtube:
exit("ERROR. There is a youtube entry in a future event {}".format(e['nickname']))
continue
if youtube:
if youtube != '-':
if not os.path.exists('data/videos/' + e['nickname']):
no_videos += "--list {:30} -d {} -e {}\n".format( youtube, e['event_start'], e['nickname'])
elif vimeo:
on_vimeo += "vimeo {} {}\n".format( e['event_start'], e['nickname'] )
else:
no_youtube += "{} {}\n".format( e['event_start'], e['nickname'] )
if no_videos:
print("Has youtube ID but videos were not included")
print(no_videos)
if on_vimeo:
print("On vimeo")
print(on_vimeo)
if no_youtube:
print("Has no youtube ID")
print(no_youtube)
# vim: expandtab
| StarcoderdataPython |
4829008 | <reponame>urchinpro/L2-forms
import sys
from datetime import datetime, date
from dateutil.relativedelta import relativedelta
import simplejson
from django.core.management.base import OutputWrapper
from django.db import models
import slog.models as slog
TESTING = 'test' in sys.argv[1:] or 'jenkins' in sys.argv[1:]
class AgeCache(models.Model):
n = models.IntegerField(db_index=True)
t = models.IntegerField(db_index=True)
s = models.CharField(max_length=30)
def __str__(self):
return self.s
class Individual(models.Model):
family = models.CharField(max_length=120, blank=True, help_text="Фамилия", db_index=True)
name = models.CharField(max_length=120, blank=True, help_text="Имя", db_index=True)
patronymic = models.CharField(max_length=120, blank=True, help_text="Отчество", db_index=True)
birthday = models.DateField(help_text="Дата рождения", db_index=True)
sex = models.CharField(max_length=2, default="м", help_text="Пол", db_index=True)
def join_individual(self, b: 'Individual', out: OutputWrapper = None):
if out:
out.write("Карт для переноса: %s" % Card.objects.filter(individual=b).count())
slog.Log(key=str(self.pk), type=2002,
body=simplejson.dumps({"Сохраняемая запись": str(self), "Объединяемая запись": str(b)}),
user=None).save()
for c in Card.objects.filter(individual=b):
c.individual = self
c.save()
b.delete()
def sync_with_rmis(self, out: OutputWrapper = None, c=None):
if out:
out.write("Обновление данных для: %s" % self.fio(full=True))
if c is None:
from rmis_integration.client import Client
c = Client()
ok = False
has_rmis = False
rmis_uid = ""
if Card.objects.filter(individual=self, base__is_rmis=True).exists():
rmis_uid = Card.objects.filter(individual=self, base__is_rmis=True)[0].number
ok = has_rmis = True
if out:
out.write("Есть РМИС запись: %s" % rmis_uid)
if not ok:
docs = Document.objects.filter(individual=self).exclude(document_type__check_priority=0).order_by(
"-document_type__check_priority")
for document in docs:
s = c.patients.search_by_document(document)
if len(s) > 0:
rmis_uid = s[0]
ok = True
if out:
out.write("Физ.лицо найдено по документу: %s -> %s" % (document, rmis_uid))
break
if ok:
data = c.patients.get_data(rmis_uid)
upd = self.family != data["family"] or self.name != data["name"] or self.patronymic != data[
"patronymic"] or (self.birthday != data["birthday"] and data["birthday"] is not None)
if upd:
prev = str(self)
self.family = data["family"]
self.name = data["name"]
self.patronymic = data["patronymic"]
if data["birthday"] is not None:
self.birthday = data["birthday"]
self.sex = data["sex"]
self.save()
if out:
out.write("Обновление данных: %s" % self.fio(full=True))
slog.Log(key=str(self.pk), type=2003,
body=simplejson.dumps({"Новые данные": str(self), "Не актуальные данные": prev}),
user=None).save()
if not ok:
query = {"surname": self.family, "name": self.name, "patrName": self.patronymic,
"birthDate": self.birthday.strftime("%Y-%m-%d")}
rows = c.patients.client.searchIndividual(**query)
if len(rows) == 1:
rmis_uid = rows[0]
ok = True
if out:
out.write("Физ.лицо найдено по ФИО и д.р.: %s" % rmis_uid)
if not has_rmis and rmis_uid and rmis_uid != '':
ex = Card.objects.filter(number=rmis_uid, is_archive=False, base__is_rmis=True)
if ex.exists():
for e in ex:
self.join_individual(e.individual, out)
s = str(c.patients.create_rmis_card(self, rmis_uid))
if out:
out.write("Добавление РМИС карты -> %s" % s)
save_docs = []
if ok and rmis_uid != "" and Card.objects.filter(individual=self, base__is_rmis=True, is_archive=False).exists():
pat_data = c.patients.extended_data(rmis_uid)
cards = Card.objects.filter(individual=self, base__is_rmis=True, is_archive=False)
for card_i in cards:
c.patients.sync_card_data(card_i, out)
def get_key(d: dict, val):
r = [key for key, v in d.items() if v == val]
if len(r) > 0:
return r[0]
return None
if out:
out.write("Типы документов: %s" % simplejson.dumps(c.patients.local_types))
for document_object in pat_data["identifiers"] or []:
k = get_key(c.patients.local_types, document_object["type"])
if k and document_object["active"]:
if out:
out.write("Тип: %s -> %s (%s)" % (document_object["type"], k, document_object["active"]))
data = dict(document_type=DocumentType.objects.get(pk=k),
serial=document_object["series"] or "",
number=document_object["number"] or "",
date_start=document_object["issueDate"],
date_end=document_object["expiryDate"],
who_give=(document_object["issueOrganization"] or {"name": ""})["name"] or "",
individual=self,
is_active=True)
rowss = Document.objects.filter(document_type=data['document_type'], individual=self, from_rmis=True)
if rowss.exclude(serial=data["serial"]).exclude(number=data["number"]).filter(
card__isnull=True).exists():
Document.objects.filter(document_type=data['document_type'], individual=self, from_rmis=True).delete()
docs = Document.objects.filter(document_type=data['document_type'],
serial=data['serial'],
number=data['number'], from_rmis=True)
if not docs.exists():
doc = Document(**data)
doc.save()
if out:
out.write("Добавление докумена: %s" % doc)
kk = "%s_%s_%s" % (doc.document_type.pk, doc.serial, doc.number)
save_docs.append(kk)
continue
else:
to_delete = []
has = []
ndocs = {}
for d in docs:
kk = "%s_%s_%s" % (d.document_type.pk, d.serial, d.number)
if out:
out.write("Checking: %s" % kk)
if kk in has:
if out:
out.write("to delete: %s" % d.pk)
to_delete.append(d.pk)
if Card.objects.filter(polis=d).exists():
for c in Card.objects.filter(polis=d):
c.polis = ndocs[kk]
c.save()
else:
if out:
out.write("To has: %s" % d.pk)
has.append(kk)
save_docs.append(kk)
ndocs[kk] = d
Document.objects.filter(pk__in=to_delete).delete()
docs = Document.objects.filter(document_type=data['document_type'],
serial=data['serial'],
number=data['number'],
individual=self)
for d in docs:
if d.date_start != data["date_start"]:
d.date_start = data["date_start"]
d.save()
if out:
out.write("Update date_start: %s" % d.date_start)
if d.date_end != data["date_end"]:
d.date_end = data["date_end"]
d.save()
if out:
out.write("Update date_end: %s" % d.date_end)
if d.who_give != data["who_give"]:
d.who_give = data["who_give"]
d.save()
if out:
out.write("Update who_give: %s" % d.who_give)
if out:
out.write("Данные для документов верны: %s" % [str(x) for x in docs])
docs = Document.objects.filter(document_type=data['document_type'],
document_type__title__in=['СНИЛС', 'Паспорт гражданина РФ',
'Полис ОМС'],
serial=data['serial'],
number=data['number']).exclude(individual=self).exclude(number="")
if docs.exists():
if out:
out.write("Объединение записей физ.лиц")
for doc in docs:
self.join_individual(doc.individual, out)
to_delete_pks = []
for d in Document.objects.filter(individual=self, from_rmis=True):
kk = "%s_%s_%s" % (d.document_type.pk, d.serial, d.number)
if out:
out.write("TD %s %s %s" % (kk, kk not in save_docs, save_docs,))
if kk not in save_docs:
to_delete_pks.append(d.pk)
Document.objects.filter(pk__in=to_delete_pks).delete()
else:
if out:
out.write("Физ.лицо не найдено в РМИС")
return ok
def bd(self):
return "{:%d.%m.%Y}".format(self.birthday)
def age(self, iss=None, days_monthes_years=False):
"""
Функция подсчета возраста
"""
if iss is None or (not iss.tubes.exists() and not iss.time_confirmation) or \
((not iss.tubes.exists() or not iss.tubes.filter(
time_recive__isnull=False).exists()) and not iss.research.is_paraclinic):
today = date.today()
elif iss.time_confirmation and iss.research.is_paraclinic or not iss.tubes.exists():
today = iss.time_confirmation.date()
else:
today = iss.tubes.filter(time_recive__isnull=False).order_by("-time_recive")[0].time_recive.date()
born = self.birthday
try:
birthday = born.replace(year=today.year)
except ValueError:
birthday = born.replace(year=today.year, month=born.month + 1, day=1)
if birthday > today:
if days_monthes_years:
rd = relativedelta(today, born)
return rd.days, rd.months, rd.years
return today.year - born.year
else:
if days_monthes_years:
rd = relativedelta(today, born)
return rd.days, rd.months, rd.years
return today.year - born.year
def age_s(self, iss=None, direction=None) -> str:
"""
Формирование строки возраста: 10 лет, 101 год
:return:
"""
if direction is not None:
from directions.models import Issledovaniya
iss = None
i = Issledovaniya.objects.filter(tubes__time_recive__isnull=False, napravleniye=direction) \
.order_by("-tubes__time_recive")
if i.exists():
iss = i[0]
elif Issledovaniya.objects.filter(research__is_paraclinic=True, napravleniye=direction,
time_confirmation__isnull=False):
iss = Issledovaniya.objects.filter(research__is_paraclinic=True, napravleniye=direction,
time_confirmation__isnull=False) \
.order_by("-time_confirmation")[0]
days, monthes, years = self.age(iss=iss, days_monthes_years=True)
if years > 0:
age = years
ages = AgeCache.objects.filter(n=age, t=0)
if ages.exists():
r = ages[0].s
else:
import pymorphy2
morph = pymorphy2.MorphAnalyzer()
if age == 0:
_let = morph.parse("лет ")[0]
elif age < 5:
_let = morph.parse("год")[0]
elif age <= 20:
_let = morph.parse("лет ")[0]
elif 5 > age % 10 > 0:
_let = morph.parse("год")[0]
else:
_let = morph.parse("лет ")[0]
r = "{0} {1}".format(age, _let.make_agree_with_number(age).word).strip()
AgeCache(n=age, t=0, s=r).save()
elif monthes > 0:
age = monthes
ages = AgeCache.objects.filter(n=age, t=1)
if ages.exists():
r = ages[0].s
else:
import pymorphy2
morph = pymorphy2.MorphAnalyzer()
if age == 0:
_let = morph.parse("месяцев ")[0]
elif age == 1:
_let = morph.parse("месяц ")[0]
elif age < 5:
_let = morph.parse("месяца ")[0]
else:
_let = morph.parse("месяцев ")[0]
r = "{0} {1}".format(age, _let.make_agree_with_number(age).word).strip()
AgeCache(n=age, t=1, s=r).save()
else:
age = days
ages = AgeCache.objects.filter(n=age, t=2)
if ages.exists():
r = ages[0].s
else:
import pymorphy2
morph = pymorphy2.MorphAnalyzer()
if age == 0:
_let = morph.parse("дней ")[0]
elif age == 1:
_let = morph.parse("день ")[0]
elif age < 5:
_let = morph.parse("дня ")[0]
elif age <= 20:
_let = morph.parse("дней ")[0]
elif 5 > age % 10 > 0:
_let = morph.parse("день")[0]
else:
_let = morph.parse("дней ")[0]
r = "{0} {1}".format(age, _let.make_agree_with_number(age).word).strip()
AgeCache(n=age, t=2, s=r).save()
return r
def fio(self, short=False, dots=False, full=False, direction=None, npf=False):
if not short:
if full:
r = "{0} {1} {2}, {5}, {3:%d.%m.%Y} ({4})".format(self.family, self.name, self.patronymic,
self.birthday, self.age_s(direction=direction),
self.sex)
elif not npf:
r = "{} {} {}".format(self.family, self.name, self.patronymic).strip()
else:
r = "{} {} {}".format(self.name, self.patronymic, self.family).strip()
else:
def first_letter_not_blank(s):
if len(s) > 0:
return " " + s[0] + ("." if dots else "")
return ""
r = "{0}{1}".format(self.family,
first_letter_not_blank(self.name) + first_letter_not_blank(self.patronymic).replace(" ",
"" if not dots else " "))
return r.strip()
def __str__(self):
return self.fio(full=True)
def check_rmis(self, update=True, client=None):
from rmis_integration.client import Client
if client is None:
client = Client()
rmis_id = client.patients.get_rmis_id_for_individual(individual=self)
if rmis_id and rmis_id != 'NONERMIS':
from directions.models import Napravleniya
Napravleniya.objects.filter(client__individual=self, rmis_number='NONERMIS').update(rmis_number=None)
return rmis_id
def get_rmis_uid(self):
if not Card.objects.filter(base__is_rmis=True, is_archive=False, individual=self).exists():
return self.check_rmis()
return self.check_rmis(False)
def get_rmis_uid_fast(self):
if Card.objects.filter(base__is_rmis=True, is_archive=False, individual=self).exists():
return Card.objects.filter(base__is_rmis=True, is_archive=False, individual=self)[0].number
return ""
class Meta:
verbose_name = 'Физическое лицо'
verbose_name_plural = 'Физические лица'
class DocumentType(models.Model):
title = models.CharField(max_length=60, help_text="Название типа документа")
check_priority = models.IntegerField(default=0,
help_text="Приоритет проверки документа (чем больше число - тем больше (сильнее) приоритет)")
def __str__(self):
return "{} | {} | ^{}".format(self.pk, self.title, self.check_priority)
class Meta:
verbose_name = 'Вид документа'
verbose_name_plural = 'Виды документов'
class Document(models.Model):
document_type = models.ForeignKey(DocumentType, help_text="Тип документа", db_index=True, on_delete=models.CASCADE)
serial = models.CharField(max_length=30, blank=True, help_text="Серия")
number = models.CharField(max_length=30, blank=True, help_text="Номер")
individual = models.ForeignKey(Individual, help_text="Пациент", db_index=True, on_delete=models.CASCADE)
is_active = models.BooleanField(default=True, blank=True)
date_start = models.DateField(help_text="Дата начала действия докумена", blank=True, null=True)
date_end = models.DateField(help_text="Дата окончания действия докумена", blank=True, null=True)
who_give = models.TextField(default="", blank=True)
from_rmis = models.BooleanField(default=True, blank=True)
def __str__(self):
return "{0} {1} {2}, Активен - {3}, {4}".format(self.document_type, self.serial, self.number,
self.is_active, self.individual)
class Meta:
verbose_name = 'Документ'
verbose_name_plural = 'Документы'
class CardBase(models.Model):
title = models.CharField(max_length=50, help_text="Полное название базы")
short_title = models.CharField(max_length=4, help_text="Краткий код базы", db_index=True)
is_rmis = models.BooleanField(help_text="Это РМИС?", default=False)
hide = models.BooleanField(help_text="Скрыть базу", default=False)
history_number = models.BooleanField(help_text="Ввод номера истории", default=False)
internal_type = models.BooleanField(help_text="Внутренний тип карт", default=False)
assign_in_search = models.ForeignKey("clients.CardBase", related_name="assign_in_search_base",
help_text="Показывать результаты в поиске вместе с этой базой", null=True,
blank=True, default=None,
on_delete=models.SET_NULL)
order_weight = models.SmallIntegerField(default=0)
def __str__(self):
return "{0} - {1}".format(self.title, self.short_title)
class Meta:
verbose_name = 'База карт'
verbose_name_plural = 'Базы карт'
class Card(models.Model):
number = models.CharField(max_length=20, blank=True, help_text="Идетификатор карты", db_index=True)
base = models.ForeignKey(CardBase, help_text="База карты", db_index=True, on_delete=models.PROTECT)
individual = models.ForeignKey(Individual, help_text="Пациент", db_index=True, on_delete=models.CASCADE)
is_archive = models.BooleanField(default=False, blank=True, db_index=True)
polis = models.ForeignKey(Document, help_text="Документ для карты", blank=True, null=True, default=None,
on_delete=models.SET_NULL)
main_diagnosis = models.CharField(max_length=36, blank=True, default='', help_text="Основной диагноз",
db_index=True)
main_address = models.CharField(max_length=128, blank=True, default='', help_text="Адрес регистрации")
fact_address = models.CharField(max_length=128, blank=True, default='', help_text="Адрес факт. проживания")
mother = models.ForeignKey('self', related_name='mother_p',help_text="Мать", blank=True, null=True, default=None,
on_delete=models.SET_NULL)
father = models.ForeignKey('self', related_name='father_p',help_text="Отец", blank=True, null=True, default=None,
on_delete=models.SET_NULL)
curator = models.ForeignKey('self', related_name='curator_p', help_text="Опеку", blank=True, null=True, default=None,
on_delete=models.SET_NULL)
curator_doc_auth = models.CharField(max_length=255, blank=True, default='', help_text="Документ-оснвоание опекуна")
agent = models.ForeignKey('self', related_name='agent_p',help_text="Представитель (из учреждения, родственник)", blank=True, null=True, default=None,
on_delete=models.SET_NULL)
agent_doc_auth = models.CharField(max_length=255, blank=True, default='', help_text="Документ-оснвоание опекуна")
payer = models.ForeignKey('self', related_name='payer_p', help_text="Плательщик", blank=True, null=True, default=None,
on_delete=models.SET_NULL)
def __str__(self):
return "{0} - {1}, {2}, Архив - {3}".format(self.number, self.base, self.individual, self.is_archive)
def number_with_type(self):
return "{}{}".format(self.number, (" " + self.base.short_title) if not self.base.is_rmis else "")
def get_phones(self):
return list(set([y for y in [x.normalize_number() for x in
Phones.objects.filter(card__individual=self.individual, card__is_archive=False)] if
y != ""]))
# def full_type_card(self):
# return "{}".format(self.base.title)
def short_type_card(self):
return "{}".format(self.base.short_title)
class Meta:
verbose_name = 'Карта'
verbose_name_plural = 'Карты'
def clear_phones(self, ts):
to_delete = [x.pk for x in Phones.objects.filter(card=self) if x.number not in ts]
Phones.objects.filter(pk__in=to_delete).delete()
def add_phone(self, t: str):
if not t:
return
p, created = Phones.objects.get_or_create(card=self, number=t)
p.normalize_number()
@staticmethod
def next_l2_n():
last_l2 = Card.objects.filter(base__internal_type=True).extra(
select={'numberInt': 'CAST(number AS INTEGER)'}
).order_by("-numberInt").first()
n = 0
if last_l2:
n = last_l2.numberInt
return n + 1
@staticmethod
def add_l2_card(individual: [Individual, None]=None, card_orig: ['Card', None]=None, distinct=True):
if distinct and card_orig \
and Card.objects.filter(individual=card_orig.individual, base__internal_type=True).exists():
return
if not card_orig and not individual:
return
c = Card(number=Card.next_l2_n(), base=CardBase.objects.filter(internal_type=True).first(),
individual=individual if individual else card_orig.individual, polis=None if not card_orig else card_orig.polis,
main_diagnosis='' if not card_orig else card_orig.main_diagnosis,
main_address='' if not card_orig else card_orig.main_address,
fact_address='' if not card_orig else card_orig.fact_address)
c.save()
print('add_l2_card', c)
return c
class Phones(models.Model):
card = models.ForeignKey(Card, help_text="Карта", db_index=True, on_delete=models.CASCADE)
number = models.CharField(max_length=20, help_text='Номер телефона')
normalized_number = models.CharField(max_length=20, blank=True, default='', help_text='(NORMALIZED) Номер телефона')
def normalize_number(self):
n = self.nn(self.number)
if self.normalized_number != n:
self.normalized_number = n
self.save()
return n
@staticmethod
def nn(n):
from string import digits
n = n.replace("+7", "8")
n = ''.join(c for c in n if c in digits)
if len(n) == 10 and n[0] == "9":
n = "8" + n
if len(n) == 11 and n[0] == "7":
n = "8" + n[1:]
return n
def __str__(self):
return "{0}: {1}".format(self.card, self.number)
class Meta:
verbose_name = 'Телефон'
verbose_name_plural = 'Телефоны'
| StarcoderdataPython |
3274693 | from settings import WATCHED_SOURCES
from parsers import default
PARSERS = {}
for source, parsers in WATCHED_SOURCES.items():
parsers = [parsers] if type(parsers) == str else parsers
parser_modules = []
for module_name in parsers:
if module_name:
try:
exec('from parsers import %s' % module_name)
parser_modules.append(eval('%s' % module_name))
except ImportError:
print('[X] Parser module not found! %s' % module_name)
else:
parser_modules.append(default)
PARSERS[source] = tuple(parser_modules)
def get_sources_info():
"""/var/log/system.log : for sudo,auth events
port 5900 : for vnc events
port 22 : for ssh events
"""
return '\n'.join(
'{0}: {1}'.format(
('port %s' % source if type(source) == int else source).ljust(25),
'for %s events' % ','.join(
f.__name__.split('.', 1)[-1] # parsers.ssh -> ssh
for f in parsers
)
)
for source, parsers in PARSERS.items()
)
def parse_line(line, source=None):
for parser in PARSERS[source]:
alert_type, title, content = parser.parse(line, source)
if alert_type:
return (alert_type, title, content)
return (None, '', '')
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.