id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5101460 | <gh_stars>0
from skimage import data, filters, feature
from skimage.morphology import disk
import matplotlib.pyplot as plt
def ex_1(): # sobel,roberts,scharr,prewitt,canny operator
img = data.camera()
edges = filters.sobel(img)
edges1 = filters.roberts(img)
edges2 = filters.scharr(img)
edges3 = filters.prewitt(img)
# edges4 = feature.canny(img,sigma=3)
edges4 = feature.canny(img)
plt.subplot(231)
plt.imshow(edges, plt.cm.gray)
plt.title('sobel')
plt.subplot(232)
plt.imshow(edges1, plt.cm.gray)
plt.title('robert')
plt.subplot(233)
plt.imshow(edges2, plt.cm.gray)
plt.title('scharr')
plt.subplot(234)
plt.imshow(edges3, plt.cm.gray)
plt.title('prewitt')
plt.subplot(235)
plt.imshow(edges4, plt.cm.gray)
plt.title('canny')
plt.show()
def ex_2(): # gabor filter
img = data.camera()
filt_real, filt_imag = filters.gabor_filter(img, frequency=0.6)
plt.figure('gabor', figsize=(8, 8))
plt.subplot(121)
plt.title('filt_real')
plt.imshow(filt_real, plt.cm.gray)
plt.subplot(122)
plt.title('filt_imag')
plt.imshow(filt_imag, plt.cm.gray)
plt.show()
def ex_3(): # gaussian filter
img = data.astronaut()
edges1 = filters.gaussian_filter(img, sigma=0.4)
edges2 = filters.gaussian_filter(img, sigma=5)
plt.figure('gaussian', figsize=(8, 8))
plt.subplot(121)
plt.imshow(edges1, plt.cm.gray)
plt.title('sigma = 0.4')
plt.subplot(122)
plt.imshow(edges2, plt.cm.gray)
plt.title('sigma = 5')
plt.show()
def ex_4(): # median filter
img = data.camera()
edges1 = filters.median(img, disk(5))
edges2 = filters.median(img, disk(9))
plt.figure('median')
plt.subplot(121)
plt.imshow(edges1, plt.cm.gray)
plt.subplot(122)
plt.imshow(edges2, plt.cm.gray)
plt.show()
def ex_5(): # Sobel vertical & horizontal operator
img = data.camera()
edges = filters.sobel(img)
edges1 = filters.sobel_h(img)
edges2 = filters.sobel_v(img)
plt.figure('sobel_v_h')
plt.subplot(131)
plt.title('sobel_h')
plt.imshow(edges1, plt.cm.gray)
plt.subplot(132)
plt.title('sobel_v')
plt.imshow(edges2, plt.cm.gray)
plt.subplot(133)
plt.title('sobel')
plt.imshow(edges, plt.cm.gray)
plt.show()
def ex_6(): # Roberts 交叉边缘检测
"""
core
0 1
-1 0
"""
img = data.camera()
dst = filters.roberts_neg_diag(img)
plt.figure('filters')
plt.subplot(121)
plt.title('origin image')
plt.imshow(img, plt.cm.gray)
plt.subplot(122)
plt.title('filtered image')
plt.imshow(dst, plt.cm.gray)
plt.show()
def ex_7(): # Roberts 交叉边缘检测
"""
core
1 0
0 -1
"""
img = data.camera()
dst = filters.roberts_pos_diag(img)
plt.figure('filters')
plt.subplot(121)
plt.title('origin image')
plt.imshow(img, plt.cm.gray)
plt.subplot(122)
plt.title('filtered image')
plt.imshow(dst, plt.cm.gray)
plt.show()
if __name__ == '__main__':
ex_7()
| StarcoderdataPython |
5029650 | <reponame>yessGlory17/2ImageDifferent
import cv2
import numpy
class Resim:
def __init__(self, image):
self.src = image
img = cv2.imread(image)
self.image = img
w, h, c = img.shape
self.height = w
self.width = h
#self.channels = c
def resimBoyutunuGetir(self):
return self.width * self.height
def siyahBeyaz():
return ""
def goster(self):
cv2.imshow("image", self.image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def yenidenBoyutlandir(self, genislik, yukseklik):
self.image = cv2.resize(
self.image, (genislik, yukseklik), interpolation=cv2.INTER_AREA)
w, h, c = self.image.shape
self.width = w
self.height = h
| StarcoderdataPython |
1775246 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from imdb import IMDb
from cinemanio.core.models import Movie, Person, Genre, Language, Country
from cinemanio.sites.exceptions import PossibleDuplicate, NothingFound, WrongValue
from cinemanio.sites.models import SitesBaseModel
class UrlMixin:
@property
def url(self):
return self.link.format(id=f'{self.id:0>7}')
class ImdbBaseManager(models.Manager):
def create_for(self, instance):
self.validate_for_search(instance)
imdb = IMDb()
try:
return self.search_using_roles(imdb, instance)
except NothingFound:
pass
try:
return self.search(imdb, instance)
except NothingFound:
pass
raise NothingFound(f"No IMDb results found for {instance._meta.model_name} ID={instance.id}")
def validate_for_search(self, instance):
raise NotImplementedError()
def search_using_roles(self, imdb, instance):
raise NotImplementedError()
def search(self, imdb, instance):
raise NotImplementedError()
def safe_create(self, imdb_id, instance):
instance_type = instance._meta.model_name
try:
instance_exist = self.get(id=imdb_id)
instance_exist_id = getattr(instance_exist, f'{instance_type}_id')
if instance.id == instance_exist_id:
return instance_exist
raise PossibleDuplicate(
f"Can not assign IMDb ID={imdb_id} to {instance_type} ID={instance.id}, "
f"because it's already assigned to {instance_type} ID={instance_exist_id}")
except self.model.DoesNotExist:
try:
instance_exist = self.get(**{instance_type: instance})
if imdb_id != instance_exist.id:
raise WrongValue(
f"Can not assign IMDb ID={imdb_id} to {instance_type} ID={instance.id}, "
f"because another IMDb ID={instance_exist.id} already assigned there")
except self.model.DoesNotExist:
return self.create(id=imdb_id, **{instance_type: instance})
class ImdbMovieManager(ImdbBaseManager):
def validate_for_search(self, movie):
if not movie.title or not movie.year:
raise ValueError("To be able search IMDb movie it should has at least title and year")
def search_using_roles(self, imdb, movie):
"""Search by movie's imdb person"""
if movie.cast.exclude(person__imdb=None).exists():
person_imdb_id = movie.cast.exclude(person__imdb=None)[0].person.imdb.id
person_imdb = imdb.get_person(person_imdb_id)
for category in person_imdb['filmography']:
for __, results in category.items():
for result in results:
if result.data['title'] == movie.title and result.data['year'] == movie.year:
return self.safe_create(result.movieID, movie)
raise NothingFound
def search(self, imdb, movie):
"""Search by movie's title"""
for result in imdb.search_movie(movie.title):
year = result.data.get('year')
if year and year == movie.year:
return self.safe_create(result.movieID, movie)
raise NothingFound
class ImdbPersonManager(ImdbBaseManager):
movie_persons_categories = [
'cast',
'art department',
# 'assistant directors',
# 'camera department',
# 'casting department',
'cinematographers',
'director',
'directors',
'editors',
# 'make up department',
'music department',
'producers',
# 'production managers',
# 'sound department',
# 'thanks',
# 'visual effects',
'writer',
'writers',
]
def validate_for_search(self, person):
if not person.first_name or not person.last_name:
raise ValueError("To be able search IMDb person it should has first name and last name")
def search_using_roles(self, imdb, person):
"""Search by person's imdb movie"""
if person.career.exclude(movie__imdb=None).exists():
movie_imdb_id = person.career.exclude(movie__imdb=None)[0].movie.imdb.id
movie_imdb = imdb.get_movie(movie_imdb_id)
for category in self.movie_persons_categories:
for result in movie_imdb.get(category, []):
if result.data['name'] == f'{person.last_name}, {person.first_name}':
return self.safe_create(result.personID, person)
raise NothingFound
def search(self, imdb, person):
"""Search by person's name"""
for result in imdb.search_person(person.name):
# TODO: make more complicated check if it's right person
return self.safe_create(result.personID, person)
raise NothingFound
class ImdbMovie(SitesBaseModel, UrlMixin):
"""
Imdb movie model
"""
id = models.PositiveIntegerField(_('IMDb ID'), primary_key=True)
rating = models.FloatField(_('IMDb rating'), null=True, db_index=True, blank=True)
votes = models.PositiveIntegerField(_('IMDb votes number'), null=True, blank=True)
movie = models.OneToOneField(Movie, related_name='imdb', on_delete=models.CASCADE)
link = 'http://www.imdb.com/title/tt{id}/'
objects = ImdbMovieManager()
def sync(self, **kwargs):
from cinemanio.sites.imdb.importer import ImdbMovieImporter
ImdbMovieImporter(self.movie, self.id).get_applied_data(**kwargs)
super().sync()
class ImdbPerson(SitesBaseModel, UrlMixin):
"""
Imdb person model
"""
id = models.PositiveIntegerField(_('IMDb ID'), primary_key=True)
person = models.OneToOneField(Person, related_name='imdb', on_delete=models.CASCADE)
link = 'http://www.imdb.com/name/nm{id}/'
objects = ImdbPersonManager()
def sync(self, **kwargs):
from cinemanio.sites.imdb.importer import ImdbPersonImporter
ImdbPersonImporter(self.person, self.id).get_applied_data(**kwargs)
super().sync()
class ImdbPropBase(models.Model):
name = models.CharField(_('IMDb name'), max_length=50, null=True, unique=True)
class Meta:
abstract = True
class ImdbGenre(ImdbPropBase):
genre = models.OneToOneField(Genre, related_name='imdb', on_delete=models.CASCADE)
class ImdbLanguage(ImdbPropBase):
language = models.OneToOneField(Language, related_name='imdb', on_delete=models.CASCADE)
code = models.CharField(_('IMDb code'), max_length=2, null=True, unique=True)
class ImdbCountry(ImdbPropBase):
country = models.OneToOneField(Country, related_name='imdb', on_delete=models.CASCADE)
code = models.CharField(_('IMDb code'), max_length=2, null=True, unique=True)
| StarcoderdataPython |
6403942 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jieba
from .vocab import Vocab
def get_idx_from_word(word, word_to_idx, unk_word):
if word in word_to_idx:
return word_to_idx[word]
return word_to_idx[unk_word]
class BaseTokenizer(object):
def __init__(self, vocab):
self.vocab = vocab
def get_tokenizer(self):
return self.tokenizer
def cut(self, sentence):
pass
def encode(self, sentence):
pass
class JiebaTokenizer(BaseTokenizer):
def __init__(self, vocab):
super(JiebaTokenizer, self).__init__(vocab)
self.tokenizer = jieba.Tokenizer()
# initialize tokenizer
self.tokenizer.FREQ = {key: 1 for key in self.vocab.token_to_idx.keys()}
self.tokenizer.total = len(self.tokenizer.FREQ)
self.tokenizer.initialized = True
def cut(self, sentence, cut_all=False, use_hmm=True):
return self.tokenizer.lcut(sentence, cut_all, use_hmm)
def encode(self, sentence, cut_all=False, use_hmm=True):
words = self.cut(sentence, cut_all, use_hmm)
return [
get_idx_from_word(word, self.vocab.token_to_idx,
self.vocab.unk_token) for word in words
]
| StarcoderdataPython |
8078339 | import rospy
from std_srvs.srv import Empty
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import GetModelState, SetModelState
class GazeboServiceCaller:
def __init__(self, model_name="PenguinPi"):
"""
Args:
model_name (str, optional): Name of model you want to call services for. Defaults to "PenguinPi".
"""
self.model_name = model_name
def reset_world(self):
"""Reset Gazebo world
WARNING: This resets the map and models too
"""
rospy.ServiceProxy("/gazebo/reset_world", Empty)
def print_model_state(self, entity=None):
"""Call get_model_state service to get latest state from Gazebo
Args:
model (string): Name of model
entity (string, optional): Name of entity i.e. link/joint.
Defaults to None.
"""
gms = rospy.ServiceProxy("/gazebo/get_model_state", GetModelState)
result = gms(self.model_name, entity)
position = result.pose.position
print(f"{position}")
def reset_model(self, entity=None):
"""Reset model to original pose
Args:
entity (string, optional): Name of entity i.e. link/joint.
Defaults to None.
"""
state_msg = ModelState()
state_msg.model_name = self.model_name
state_msg.pose.position.x = 0
state_msg.pose.position.y = 0
state_msg.pose.position.z = 0
state_msg.pose.orientation.x = 0
state_msg.pose.orientation.y = 0
state_msg.pose.orientation.z = 0
state_msg.pose.orientation.w = 0
try:
set_state = rospy.ServiceProxy("/gazebo/set_model_state", SetModelState)
set_state(state_msg)
except rospy.ServiceException:
print(f"Service call failed")
if __name__ == "__main__":
a = GazeboServiceCaller()
a.reset_model("PenguinPi")
| StarcoderdataPython |
5197709 | import database
import libpyprjoxide
def main():
db = libpyprjoxide.Database(database.get_db_root())
libpyprjoxide.copy_db(db, "LIFCL", "EBR_10", ["TRUNK_L_EBR_10", ], "PEWC", "")
if __name__ == '__main__':
main()
| StarcoderdataPython |
3441425 | <filename>graphs/stepping_numbers.py
#https://practice.geeksforgeeks.org/problems/stepping-numberswrong-output1813/1/?page=1&company[]=Amazon&category[]=DFS&sortBy=submissions#
from collections import deque
class Solution:
def bfs(self , num , n , m):
count = 0
q = deque()
q.append(num)
while(q):
stepNum = q.popleft()
if(stepNum >= n and stepNum <= m):
count += 1
# Out of range
if(stepNum <= 0 or stepNum > m):
continue
l = stepNum % 10
stepNumA = stepNum * 10 + (l - 1)
stepNumB = stepNum * 10 + (l + 1)
if(l == 0):
q.append(stepNumB)
elif(l == 9):
q.append(stepNumA)
else:
q.append(stepNumA)
q.append(stepNumB)
return count
def steppingNumbers(self, n, m):
# code here
count = 0
for i in range(10):
count += self.bfs(i , n , m)
return count
if __name__ == '__main__':
ob = Solution()
t = int (input ())
for _ in range (t):
N, M = map(int, input().split())
ans = ob.steppingNumbers(N, M);
print(ans)
| StarcoderdataPython |
1782505 | # Tile Dim. - 1x4 : To Fill nx4
from sys import stdin
def dp(n: int):
if n<4:
return 1
dp = [0]*(n+1)
dp[0] = 1
dp[1] = 1
dp[2] = 1
dp[3] = 1
for i in range(4, n+1):
dp[i] += dp[i-1] + dp[i-4]
return dp[n]
def main():
t = int(input())
for i in range(t):
n = int(input())
ans = dp(n)
print(ans)
if __name__ == '__main__':
main() | StarcoderdataPython |
3395988 | import torch
from .classification import ASSANetCls
from .segmentation import ASSANetSeg
from .losses import LabelSmoothingCrossEntropyLoss, MultiShapeCrossEntropy, MaskedCrossEntropy
from torch.optim.lr_scheduler import _LRScheduler, MultiStepLR, CosineAnnealingLR
def build_classification(config):
if config.model.name == 'assanet':
model = ASSANetCls(config)
else:
raise NotImplementedError(f'{config.model.name} is not support yet')
criterion = LabelSmoothingCrossEntropyLoss()
return model, criterion
def build_multi_part_segmentation(config):
if config.model.name == 'assanet':
model = ASSANetPartSeg(config)
else:
raise NotImplementedError(f'{config.model.name} is not support yet')
criterion = MultiShapeCrossEntropy(config.data.num_classes)
return model, criterion
def build_scene_segmentation(config):
if config.model.name == 'assanet':
model = ASSANetSeg(config)
else:
raise NotImplementedError(f'{config.model.name} is not support yet')
criterion = MaskedCrossEntropy()
return model, criterion
def build_optimizer(model, config):
optim_config = config.optimizer
lr = optim_config.lr # linear rule does not apply here
if optim_config.name == 'sgd':
# lr = optim_config.batch_size * dist.get_world_size() / 8 * optim_config.lr
optimizer = torch.optim.SGD(model.parameters(),
lr=lr,
momentum=optim_config.momentum,
weight_decay=optim_config.weight_decay)
elif optim_config.name == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
lr=lr,
weight_decay=optim_config.weight_decay)
elif optim_config.name == 'adamW':
optimizer = torch.optim.AdamW(model.parameters(),
lr=lr,
weight_decay=optim_config.weight_decay)
else:
raise NotImplementedError(f"Optimizer {optim_config.name} not supported")
return optimizer
# noinspection PyAttributeOutsideInit
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: init learning rate = base lr / multiplier
warmup_epoch: target learning rate is reached at warmup_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, warmup_epoch, after_scheduler, last_epoch=-1):
self.multiplier = multiplier
if self.multiplier <= 1.:
raise ValueError('multiplier should be greater than 1.')
self.warmup_epoch = warmup_epoch
self.after_scheduler = after_scheduler
self.finished = False
super().__init__(optimizer, last_epoch=last_epoch)
def get_lr(self):
if self.last_epoch > self.warmup_epoch:
return self.after_scheduler.get_lr()
else:
return [base_lr / self.multiplier * ((self.multiplier - 1.) * self.last_epoch / self.warmup_epoch + 1.)
for base_lr in self.base_lrs]
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
if epoch > self.warmup_epoch:
self.after_scheduler.step(epoch - self.warmup_epoch)
else:
super(GradualWarmupScheduler, self).step(epoch)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
state = {key: value for key, value in self.__dict__.items() if key != 'optimizer' and key != 'after_scheduler'}
state['after_scheduler'] = self.after_scheduler.state_dict()
return state
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
after_scheduler_state = state_dict.pop('after_scheduler')
self.__dict__.update(state_dict)
self.after_scheduler.load_state_dict(after_scheduler_state)
def build_scheduler(optimizer, config, n_iter_per_epoch=1):
""" build the lr scheduler
Args:
optimizer:
config:
n_iter_per_epoch: set to 1 if perform lr scheduler per epoch
Returns:
"""
assert config.lr_scheduler.on_epoch == (n_iter_per_epoch == 1)
if "cosine" in config.lr_scheduler.name:
scheduler = CosineAnnealingLR(
optimizer=optimizer,
eta_min=0.000001,
T_max=(config.epochs - config.warmup_epoch)*n_iter_per_epoch)
elif "multistep" in config.lr_scheduler.name:
scheduler = MultiStepLR(
optimizer=optimizer,
gamma=config.lr_scheduler.decay_rate,
milestones=[int(x) for x in config.lr_scheduler.decay_steps.split(',')])
elif "step" in config.lr_scheduler.name:
lr_decay_epochs = [config.lr_scheduler.decay_steps * i
for i in range(1, config.epochs // config.lr_scheduler.decay_steps)]
scheduler = MultiStepLR(
optimizer=optimizer,
gamma=config.lr_scheduler.decay_rate,
milestones=[(m - config.warmup_epoch)*n_iter_per_epoch for m in lr_decay_epochs])
else:
raise NotImplementedError(f"scheduler {config.lr_scheduler.name} not supported")
if config.warmup_epoch > 0:
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=config.warmup_multiplier,
after_scheduler=scheduler,
warmup_epoch=config.warmup_epoch*n_iter_per_epoch)
return scheduler
| StarcoderdataPython |
4936878 | def fibonacci(n, sequence=(0, 1)):
# sequence=(0, 1) -> Tuple containing the first 2 values of the fibonacci sequence
# If the 'n' is less than the len(sequence), the first 2 elements of the fibonacci
# sequence, 0 and 1, are already returned
return sequence if len(sequence) == n else \
fibonacci(n, sequence + (sum(sequence[-2:]),))
# The fibonacci function itself will be called
# (sum(sequence[-2:]),) -> makes the sum of the last two values of the tuple and
# generates a new tuple, contains that single value which is the sum
# sequence + (sum(sequence[-2:]),) -> Sum the two tuples, the tuple with the initial
# values and the tuple with the sum value, and recursively update the value from
# sequence=(0, 1) to sequence=(0, 1, (sum(sequence[-2:]),)), and so on until
# len(sequence) is greater than or equal to 'n'
if __name__ == '__main__':
n = int(input())
n += 1
# Since the return of the fibonacci function is a tuple, one must take the required
# position of the 'n'
print(fibonacci(n)[n-1])
| StarcoderdataPython |
6612766 | <filename>torchreid/models/resnet.py
"""
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import copy
import torch
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
from torch.nn import init
from torch import nn
import math
from scipy.stats import norm
__all__ = ['resnet18','resnet34','resnet50','resnet101','resnet152']
model_urls = {'resnet18':'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34':'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50':'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101':'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152':'https://download.pytorch.org/models/resnet152-b121ed2d.pth'
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64'
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
x = F.relu(x)
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
# out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width/64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
x = F.relu(x)
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
# out = self.relu(out)
return out
class ResNet(nn.Module):
"""Residual network.
Reference:
- He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
- Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017.
Public keys:
- ``resnet18``: ResNet18.
- ``resnet34``: ResNet34.
- ``resnet50``: ResNet50.
- ``resnet101``: ResNet101.
- ``resnet152``: ResNet152.
- ``resnext50_32x4d``: ResNeXt50.
- ``resnext101_32x8d``: ResNeXt101.
- ``resnet50_fc512``: ResNet50 + FC.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
zero_init_residual=False,
groups=1,
fc_dim=2048,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
last_stride=2, # was 2 initially
dropout_p=None,
teacher_arch=None,
**kwargs
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.loss = loss
self.teacher_arch = teacher_arch
self.margins = None
self.out_dim = 512 * block.expansion
self.feature_dim = self.out_dim
self.fc_dim = fc_dim
self.inplanes = 64
self.dilation = 1
self.expansion = block.expansion
self.multi_head = False
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".
format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=last_stride,
dilate=replace_stride_with_dilation[2]
)
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
if fc_dim > 0:
self.feat = nn.Linear(self.out_dim, self.feature_dim)
self.feat_bn = nn.BatchNorm1d(self.feature_dim)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat.bias, 0)
self.feature_dim = fc_dim
self.classifier = nn.Linear(self.feature_dim, num_classes)
self._init_params()
if self.teacher_arch != None:
if self.teacher_arch == "resnet50" or self.teacher_arch == "resnet101" or self.teacher_arch == "resnet152":
teacher_feat_dims = [256, 512, 1024, 2048]
else:
teacher_feat_dims = [64, 128, 256, 512]
student_feat_dims = [64 * self.expansion, 128 * self.expansion, 256 * self.expansion,
512 * self.expansion]
# 1x1 conv to match smaller resnet feature dimension with larger models
if self.loss == 'kd_reid':
self.feat_matcher_list = nn.ModuleList([self._construct_feat_matchers(s, t) for s, t in zip(student_feat_dims, teacher_feat_dims)])
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _construct_feat_matchers(self, dim_in, dim_out):
C = [nn.Conv2d(dim_in, dim_out, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(dim_out)]
for m in C:
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return nn.Sequential(*C)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def get_margin_from_bn(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
bn4 = self.layer4[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
bn4 = self.layer4[-1].bn2
else:
raise KeyError('ResNet unknown block error !!!')
bns = [bn1, bn2, bn3, bn4]
for i, bn in enumerate(bns):
margin = []
std = bn.weight.data
mean = bn.bias.data
for (s, m) in zip(std, mean):
s = abs(s.item())
m = m.item()
if norm.cdf(-m / s) > 0.001:
margin.append(
- s * math.exp(- (m / s) ** 2 / 2) / math.sqrt(2 * math.pi) / norm.cdf(-m / s) + m)
else:
margin.append(-3 * s)
margin = torch.FloatTensor(margin).to(std.device)
self.register_buffer('margin%d' % (i+1), margin.unsqueeze(1).unsqueeze(2).unsqueeze(0).detach())
return margin
def get_channel_num(self):
return [64 * self.expansion, 128 * self.expansion, 256 * self.expansion, 512 * self.expansion]
def forward(self, input, target=None):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
f1 = self.layer1(x)
f2 = self.layer2(f1)
f3 = self.layer3(f2)
f4 = self.layer4(f3)
f = F.relu(f4)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc_dim > 0:
if self.multi_head:
v = self.feat_fc_multi[target](v)
else:
v = self.feat_bn(self.feat(v))
if not self.training:
v = F.normalize(v)
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'kd_mmd' or self.loss == 'mmd' or self.loss == 'triplet':
return y, v
elif self.loss == 'kd_reid':
# Margin ReLU if teacher, 1x1 Conv for student
if self.teacher_arch == None:
f1 = torch.max(f1, getattr(self, 'margin%d' % (1)))
f1 = f1.view(f1.size(0), -1)
f2 = torch.max(f2, getattr(self, 'margin%d' % (2)))
f2 = f2.view(f2.size(0), -1)
f3 = torch.max(f3, getattr(self, 'margin%d' % (3)))
f3 = f3.view(f3.size(0), -1)
f4 = torch.max(f4, getattr(self, 'margin%d' % (4)))
f4 = f4.view(f4.size(0), -1)
else:
f1 = self.feat_matcher_list[0](f1)
f1 = f1.view(f1.size(0), -1)
f2 = self.feat_matcher_list[1](f2)
f2 = f2.view(f2.size(0), -1)
f3 = self.feat_matcher_list[2](f3)
f3 = f3.view(f3.size(0), -1)
f4 = self.feat_matcher_list[3](f4)
f4 = f4.view(f4.size(0), -1)
return [f1, f2, f3, f4], v, y
elif self.loss == 'feat_kd':
f1 = F.relu(f1)
f1 = f1.view(f1.size(0), -1)
f2 = F.relu(f2)
f2 = f2.view(f2.size(0), -1)
f3 = F.relu(f3)
f3 = f3.view(f3.size(0), -1)
f4 = F.relu(f4)
f4 = f4.view(f4.size(0), -1)
return [f1, f2, f3, f4], v, y
elif self.loss == 'adv_feat_kd':
f1 = F.relu(f1)
f2 = F.relu(f2)
f3 = F.relu(f3)
f4 = F.relu(f4)
return [f1, f2, f3, f4], v, y
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def convert_2_multi_head(model, multi_head):
model.multi_head = True
model.feat_fc_multi = nn.ModuleList()
for t in range(multi_head):
feat_tmp = copy.deepcopy(model.feat)
feat_bn_tmp = copy.deepcopy(model.feat_bn)
C = [feat_tmp, feat_bn_tmp]
model.feat_fc_multi.append(nn.Sequential(*C))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
def resnet18(num_classes, loss='softmax', pretrained=True, teacher_arch=None, fc_dim=2048, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[2, 2, 2, 2],
last_stride=2,
fc_dim=fc_dim,
dropout_p=None,
teacher_arch=teacher_arch,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
model.margins = model.get_margin_from_bn()
return model
def resnet34(num_classes, loss='softmax', pretrained=True, teacher_arch=None, fc_dim=2048, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dim=fc_dim,
dropout_p=None,
teacher_arch=teacher_arch,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet34'])
model.margins = model.get_margin_from_bn()
return model
def resnet50(num_classes, loss='softmax', pretrained=True, teacher_arch=None, fc_dim=2048, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dim=fc_dim,
dropout_p=None,
teacher_arch=teacher_arch,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
model.margins = model.get_margin_from_bn()
return model
def resnet101(num_classes, loss='softmax', pretrained=True, teacher_arch=None, fc_dim=2048, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dim=fc_dim,
dropout_p=None,
teacher_arch=teacher_arch,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet101'])
model.margins = model.get_margin_from_bn()
return model
def resnet152(num_classes, loss='softmax', pretrained=True, teacher_arch=None, fc_dim=2048, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 8, 36, 3],
last_stride=2,
fc_dim=fc_dim,
dropout_p=None,
teacher_arch=teacher_arch,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet152'])
model.margins = model.get_margin_from_bn()
return model
| StarcoderdataPython |
3441686 | import os
import lmdb # install lmdb by "pip install lmdb"
import cv2
import numpy as np
def checkImageIsValid(imageBin):
if imageBin is None:
return False
imageBuf = np.fromstring(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
imgH, imgW = img.shape[0], img.shape[1]
if imgH * imgW == 0:
return False
return True
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(str(k).encode(), str(v).encode())
def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
labelList : list of corresponding groundtruth texts
lexiconList : (optional) list of lexicon lists
checkValid : if true, check the validity of every image
"""
assert(len(imagePathList) == len(labelList))
nSamples = len(imagePathList)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
for i in range(nSamples):
imagePath = imagePathList[i]
label = labelList[i]
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
with open(imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if lexiconList:
lexiconKey = 'lexicon-%09d' % cnt
cache[lexiconKey] = ' '.join(lexiconList[i])
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt-1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
if __name__ == '__main__':
out_path = "/dltraining/datasets/nips/lmdb"
image_files = []
labels = []
lexicon = []
nips_datasets_path = "/dltraining/datasets/nips"
if os.path.isfile(os.path.join(nips_datasets_path, "annotation_train.txt")):
with open(os.path.join(nips_datasets_path, "annotation_train.txt")) as file:
for line in file:
lines = line.strip("\n").split(" ")
image_path = os.path.join(nips_datasets_path, lines[0])
image_file = os.path.basename(image_path)
label = image_file.split("_")[1]
image_files.append(image_path)
labels.append(label)
# if os.path.isfile(os.path.join(nips_datasets_path, "lexicon.txt")):
# with open(os.path.join(nips_datasets_path, "lexicon.txt")) as file:
# for line in file:
# line = line.strip("\n")
# lexicon.append(line)
createDataset(out_path, image_files, labels)
| StarcoderdataPython |
3432018 | <filename>pj_login/login/views.py<gh_stars>0
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from login.models import Bbs
from login.serializers import BbsSerializer
# 요청 url 인 bbs/ 에 대해서 urls.py 에 정의된 view.bbs_list 가 호출된다.
@api_view(['GET', 'POST'])
def bbs_list(request, format=None):
if request.method == 'GET':
bbs = Bbs.objects.all()
serializer = BbsSerializer(bbs, many=True) # many 값이 True 이면 다수의 데이터 instance를 직렬화할수 있다
return Response(serializer.data)
elif request.method == 'POST':
serializer = BbsSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# 요청 url 인 bbs/번호 에 대해서 urls.py 에 정의된 view.bbs_detail 이 호출된다
@api_view(['GET', 'PUT', 'DELETE'])
def bbs_detail(request, pk, format=None):
try:
bbs = Bbs.objects.get(pk=pk)
except Bbs.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = BbsSerializer(bbs)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = BbsSerializer(bbs, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
bbs.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | StarcoderdataPython |
350959 | <reponame>mrdakj/service-fabric-cli
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .execution_policy_py3 import ExecutionPolicy
class RunToCompletionExecutionPolicy(ExecutionPolicy):
"""The run to completion execution policy, the service will perform its
desired operation and complete successfully. If the service encounters
failure, it will restarted based on restart policy specified. If the
service completes its operation successfully, it will not be restarted
again.
All required parameters must be populated in order to send to Azure.
:param type: Required. Constant filled by server.
:type type: str
:param restart: Required. Enumerates the restart policy for
RunToCompletionExecutionPolicy. Possible values include: 'OnFailure',
'Never'
:type restart: str or ~azure.servicefabric.models.RestartPolicy
"""
_validation = {
'type': {'required': True},
'restart': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'restart': {'key': 'restart', 'type': 'str'},
}
def __init__(self, *, restart, **kwargs) -> None:
super(RunToCompletionExecutionPolicy, self).__init__(**kwargs)
self.restart = restart
self.type = 'RunToCompletion'
| StarcoderdataPython |
8040159 | <reponame>PRIS-CV/BSNet
import numpy as np
import torch
from torch.autograd import Variable
import os
import glob
import h5py
import configs
import backbone
from data.datamgr import SimpleDataManager
from io_utils import parse_args, get_best_file
def save_features(model, data_loader, outfile ):
f = h5py.File(outfile, 'w')
max_count = len(data_loader)*data_loader.batch_size
all_labels = f.create_dataset('all_labels',(max_count,), dtype='i')
all_feats=None
count=0
for i, (x,y) in enumerate(data_loader):
if i%10 == 0:
print('{:d}/{:d}'.format(i, len(data_loader)))
x = x.cuda()
x_var = Variable(x)
feats = model(x_var)
if all_feats is None:
all_feats = f.create_dataset('all_feats', [max_count] + list( feats.size()[1:]) , dtype='f')
all_feats[count:count+feats.size(0)] = feats.data.cpu().numpy()
all_labels[count:count+feats.size(0)] = y.cpu().numpy()
count = count + feats.size(0)
count_var = f.create_dataset('count', (1,), dtype='i')
count_var[0] = count
f.close()
if __name__ == '__main__':
params = parse_args('save_features')
os.environ["CUDA_VISIBLE_DEVICES"] = str(params.gpu)
image_size = 84
split = params.split
loadfile = configs.data_dir[params.dataset] + split + '.json'
checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
if params.train_aug:
checkpoint_dir += '_aug'
checkpoint_dir += '_%dway_%dshot' %( params.train_n_way, params.n_shot)
modelfile = get_best_file(checkpoint_dir)
if params.save_iter != -1:
outfile = os.path.join( checkpoint_dir.replace("checkpoints", "features"), split + "_" + str(params.save_iter)+ ".hdf5")
else:
outfile = os.path.join( checkpoint_dir.replace("checkpoints", "features"), split + ".hdf5")
datamgr = SimpleDataManager(image_size, batch_size = 64)
data_loader = datamgr.get_data_loader(loadfile, aug = False)
model = backbone.Conv4NP()
model = model.cuda()
tmp = torch.load(modelfile)
state = tmp['state']
state_keys = list(state.keys())
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.","") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state.pop(key)
model.load_state_dict(state)
model.eval()
dirname = os.path.dirname(outfile)
if not os.path.isdir(dirname):
os.makedirs(dirname)
save_features(model, data_loader, outfile)
| StarcoderdataPython |
54199 |
import random
square = [[1,2,3],[4,5,6],[7,8,9]]
def get_square():
line =[i for i in range(1,10)]
lst = random.sample(line, 9)
square = [lst[i:i + 3] for i in range(0, len(lst), 3)]
return square
def valid_line(line):
if sorted(line) == [1,2,3,4,5,6,7,8,9]: return True
else: return False
def get_3_lines():
square_list= [get_square(),get_square(),get_square()]
lines = []
for i in range(0,3):
lines.append(
square_list[0][i]+square_list[1][i]+square_list[2][i])
return lines
def get_board():
board = get_3_lines()+get_3_lines()+get_3_lines()
return board
def all_valid_lines(board):
for i in board:
if valid_line(i) == False:
return False
return True
def all_valid_columms(board):
transposed_Board= [[board[j][i] for j in range(len(board))] for i in range(len(board[0]))]
return all_valid_lines(transposed_Board)
x = 0
counter=0
while x == 0:
counter+=1
b = get_board()
if all_valid_lines(b) and all_valid_columms(b) == True:
print(b)
print(counter)
x=1
print("ok")
| StarcoderdataPython |
5080528 | <filename>tf_utils/distributions_test.py<gh_stars>100-1000
import numpy as np
import tensorflow as tf
from distributions import logsumexp, compute_lowerbound, repeat
class DistributionsTestCase(tf.test.test_util.TensorFlowTestCase):
def test_logsumexp(self):
a = np.arange(10)
res = np.log(np.sum(np.exp(a)))
with self.test_session():
res_tf = logsumexp(a.astype(np.float32).reshape([1, -1])).eval()
self.assertEqual(res, res_tf)
def test_lowerbound(self):
a = np.log(np.array([0.3, 0.3, 0.3, 0.3], np.float32).reshape([1, -1]))
b = np.log(np.array([0.1, 0.5, 0.9, 0.6], np.float32).reshape([1, -1]))
res = - (- np.log(4) + np.log(np.sum(np.exp(a - b))))
with self.test_session():
res_tf = tf.reduce_sum(compute_lowerbound(a, b, 4)).eval()
self.assertAlmostEqual(res, res_tf, places=4)
def test_lowerbound2(self):
a = np.log(np.array([0.3, 0.3, 0.3, 0.3], np.float32).reshape([-1, 1]))
b = np.log(np.array([0.1, 0.5, 0.9, 0.6], np.float32).reshape([-1, 1]))
res = (b - a).sum()
with self.test_session():
res_tf = tf.reduce_sum(compute_lowerbound(a, b, 1)).eval()
self.assertAlmostEqual(res, res_tf, places=4)
def test_repeat(self):
a = np.random.randn(10, 5, 2)
repeated_a = np.repeat(a, 2, axis=0)
with self.test_session():
repeated_a_tf = repeat(a, 2).eval()
self.assertAllClose(repeated_a, repeated_a_tf)
| StarcoderdataPython |
1786437 |
def parse(grammar):
# Extract every line of the file and put it into lines array
lines = [line.strip() for line in grammar]
# Representing the grammar as a python dict
grammar_dict = {}
# first elements in lines is consisted of all starting symbols
starting_symbols = filter(None, lines[0].split(" "))
# Extract rules from lines array and enrich the grammar dict
for line in lines[1:]:
if line.strip() != "":
rule = [x.strip() for x in line.split("->")]
if len(rule) != 2:
l = None
r = None
else:
l, r = rule
# When we have multiple choices
r = filter(None, [x.strip().split(' ') for x in r.split(" | ")])
if not l in grammar_dict.keys():
grammar_dict[l] = []
grammar_dict[l] = grammar_dict[l] + r
# Extract grammars in the form of A -> B C
grammar_rules_with_variables = dict_traversal(grammar_dict, 1)
# Extract grammars in the form of A -> a
grammar_rules_with_terminals = dict_traversal(grammar_dict, 2)
# Extract the list of terminals
terminals = []
for variables in grammar_dict.keys():
for rules in grammar_dict[variables]:
for rule in rules:
if not rule in grammar_dict.keys() and \
not rule in terminals:
terminals = terminals + [rule]
return starting_symbols, grammar_dict.keys(), terminals, \
grammar_rules_with_variables, grammar_rules_with_terminals
# Search into a dictionary
# gets out the rules in the form
# of A -> B C or A -> a based on length
def dict_traversal(dic, length):
rules = {}
for key, values in dic.iteritems():
rules[key] = []
for value in values:
if len(value) == length:
rules[key] = rules[key] + [value]
if len(rules[key]) == 0:
del rules[key]
return rules
| StarcoderdataPython |
6424187 | <reponame>Seniatical/Hashables
from .key import DictKeySet
from .value import DictValueSet
from .item import DictItemSet | StarcoderdataPython |
3358825 | r"""
Labelled permutations
A labelled (generalized) permutation is better suited to study the
dynamic of a translation surface than a reduced one (see the module
:mod:`sage.dynamics.interval_exchanges.reduced`). The latter is more
adapted to the study of strata. This kind of permutation was
introduced by Yoccoz [Yoc05]_ (see also [MMY03]_).
In fact, there is a geometric counterpart of labelled
permutations. They correspond to translation surfaces with marked
outgoing separatrices (i.e. we fix a label for each of them).
Remarks that Rauzy diagram of reduced objects are significantly
smaller than the one for labelled object (for the permutation a b d b
e / e d c a c the labelled Rauzy diagram contains 8760 permutations,
and the reduced only 73). But, as it is in geometrical way, the
labelled Rauzy diagram is a covering of the reduced Rauzy diagram.
AUTHORS:
- <NAME> (2009-09-29) : initial version
TESTS::
sage: from sage.dynamics.interval_exchanges.labelled import LabelledPermutationIET
sage: LabelledPermutationIET([['a', 'b', 'c'], ['c', 'b', 'a']])
a b c
c b a
sage: LabelledPermutationIET([[1,2,3,4],[4,1,2,3]])
1 2 3 4
4 1 2 3
sage: from sage.dynamics.interval_exchanges.labelled import LabelledPermutationLI
sage: LabelledPermutationLI([[1,1],[2,2,3,3,4,4]])
1 1
2 2 3 3 4 4
sage: LabelledPermutationLI([['a','a','b','b','c','c'],['d','d']])
a a b b c c
d d
sage: from sage.dynamics.interval_exchanges.labelled import FlippedLabelledPermutationIET
sage: FlippedLabelledPermutationIET([[1,2,3],[3,2,1]],flips=[1,2])
-1 -2 3
3 -2 -1
sage: FlippedLabelledPermutationIET([['a','b','c'],['b','c','a']],flips='b')
a -b c
-b c a
sage: from sage.dynamics.interval_exchanges.labelled import FlippedLabelledPermutationLI
sage: FlippedLabelledPermutationLI([[1,1],[2,2,3,3,4,4]], flips=[1,4])
-1 -1
2 2 3 3 -4 -4
sage: FlippedLabelledPermutationLI([['a','a','b','b'],['c','c']],flips='ac')
-a -a b b
-c -c
sage: from sage.dynamics.interval_exchanges.labelled import LabelledRauzyDiagram
sage: p = LabelledPermutationIET([[1,2,3],[3,2,1]])
sage: d1 = LabelledRauzyDiagram(p)
sage: p = LabelledPermutationIET([['a','b'],['b','a']])
sage: d = p.rauzy_diagram()
sage: g1 = d.path(p, 'top', 'bottom')
sage: g1.matrix()
[1 1]
[1 2]
sage: g2 = d.path(p, 'bottom', 'top')
sage: g2.matrix()
[2 1]
[1 1]
sage: p = LabelledPermutationIET([['a','b','c','d'],['d','c','b','a']])
sage: d = p.rauzy_diagram()
sage: g = d.path(p, 't', 't', 'b', 't', 'b', 'b', 't', 'b')
sage: g
Path of length 8 in a Rauzy diagram
sage: g.is_loop()
True
sage: g.is_full()
True
sage: s1 = g.orbit_substitution()
sage: s1
WordMorphism: a->adbd, b->adbdbd, c->adccd, d->adcd
sage: s2 = g.interval_substitution()
sage: s2
WordMorphism: a->abcd, b->bab, c->cdc, d->dcbababcd
sage: s1.incidence_matrix() == s2.incidence_matrix().transpose()
True
REFERENCES:
.. [Yoc05] <NAME> "Echange d'Intervalles", Cours au college de
France
.. [MMY03] <NAME>, <NAME> and <NAME> "On the
cohomological equation for interval exchange maps", :arxiv:`math/0304469v1`
"""
#*****************************************************************************
# Copyright (C) 2008 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from __future__ import absolute_import
from sage.structure.sage_object import SageObject
from sage.misc.lazy_attribute import lazy_attribute
from copy import copy
from sage.combinat.words.alphabet import Alphabet
from sage.combinat.words.morphism import WordMorphism
from sage.matrix.constructor import identity_matrix
from sage.rings.integer import Integer
from .template import PermutationIET, PermutationLI
from .template import FlippedPermutationIET, FlippedPermutationLI
from .template import twin_list_iet, twin_list_li
from .template import RauzyDiagram, FlippedRauzyDiagram
from .template import interval_conversion, side_conversion
class LabelledPermutation(SageObject):
r"""
General template for labelled objects.
.. warning::
Internal class! Do not use directly!
"""
def __init__(self, intervals=None, alphabet=None):
r"""
TESTS::
sage: from sage.dynamics.interval_exchanges.labelled import LabelledPermutationIET
sage: p1 = LabelledPermutationIET([[1,2,3],[3,2,1]])
sage: p1 == loads(dumps(p1))
True
sage: p2 = LabelledPermutationIET([['a', 'b', 'c'], ['c', 'b', 'a']])
sage: p2 == loads(dumps(p2))
True
sage: p3 = LabelledPermutationIET([['1','2','3'],['3','2','1']])
sage: p3 == loads(dumps(p3))
True
sage: from sage.dynamics.interval_exchanges.labelled import LabelledPermutationLI
sage: p1 = LabelledPermutationLI([[1,2,2],[3,3,1]])
sage: p1 == loads(dumps(p1))
True
sage: p2 = LabelledPermutationLI([['a','b','b'],['c','c','a']])
sage: p2 == loads(dumps(p2))
True
sage: p3 = LabelledPermutationLI([['1','2','2'],['3','3','1']])
sage: p3 == loads(dumps(p3))
True
"""
self._hash = None
if intervals is None:
self._intervals = [[],[]]
self._alphabet = None
else:
if alphabet is not None:
alphabet = Alphabet(alphabet)
if alphabet.cardinality() < len(intervals[0]) :
raise ValueError("the alphabet is too short")
self._alphabet = alphabet
else:
self._init_alphabet(intervals)
self._intervals = [
[self._alphabet.rank(_) for _ in intervals[0]],
[self._alphabet.rank(_) for _ in intervals[1]]]
def __copy__(self):
r"""
Returns a copy of self.
TESTS::
sage: p = iet.Permutation('a b c','c b a')
sage: q = copy(p)
sage: p == q
True
sage: p is q
False
sage: p._inversed()
sage: p == q
False
sage: p._inversed()
sage: p == q
True
sage: p._reversed()
sage: p == q
False
sage: q._reversed()
sage: p == q
True
"""
result = self.__class__()
result._intervals = [
copy(self._intervals[0]),
copy(self._intervals[1])]
result._alphabet = self._alphabet
result._repr_type = self._repr_type
result._repr_options = self._repr_options
return result
def __len__(self):
r"""
TESTS::
sage: len(iet.Permutation('',''))
0
sage: len(iet.Permutation('a','a'))
1
sage: len(iet.Permutation('1 2 3 4 5 6','1 2 3 4 5 6'))
6
"""
return (len(self._intervals[0]) + len(self._intervals[1])) / 2
def length_top(self):
r"""
Returns the number of intervals in the top segment.
OUTPUT:
integer -- number of intervals
EXAMPLES::
sage: iet.Permutation('a b c','c b a').length_top()
3
sage: iet.GeneralizedPermutation('a a','b b c c').length_top()
2
sage: iet.GeneralizedPermutation('a a b b','c c').length_top()
4
"""
return len(self._intervals[0])
def length_bottom(self):
r"""
Returns the number of intervals in the bottom segment.
OUTPUT:
integer -- number of intervals
EXAMPLES::
sage: iet.Permutation('a b','b a').length_bottom()
2
sage: iet.GeneralizedPermutation('a a','b b c c').length_bottom()
4
sage: iet.GeneralizedPermutation('a a b b','c c').length_bottom()
2
"""
return len(self._intervals[1])
def length(self, interval=None):
r"""
Returns a 2-uple of lengths.
p.length() is identical to (p.length_top(), p.length_bottom())
If an interval is specified, it returns the length of the specified
interval.
INPUT:
- ``interval`` - ``None``, 'top' or 'bottom'
OUTPUT:
tuple -- a 2-uple of integers
EXAMPLES::
sage: iet.Permutation('a b c','c b a').length()
(3, 3)
sage: iet.GeneralizedPermutation('a a','b b c c').length()
(2, 4)
sage: iet.GeneralizedPermutation('a a b b','c c').length()
(4, 2)
"""
if interval is None:
return len(self._intervals[0]),len(self._intervals[1])
else:
interval = interval_conversion(interval)
return len(self._intervals[interval])
def __getitem__(self,i):
r"""
TESTS::
sage: p = iet.Permutation([0,1,2,3],[3,2,1,0])
sage: p[0][0]
0
sage: p[1][2]
1
sage: p = iet.Permutation('a b c','c b a')
sage: p[0][1]
'b'
sage: p[1][2]
'a'
"""
return [self._alphabet.unrank(_) for _ in self._intervals[i]]
def __hash__(self):
r"""
ALGORITHM:
Uses the hash of string
TESTS::
sage: from sage.dynamics.interval_exchanges.labelled import *
sage: p1 = LabelledPermutationIET([[1,2],[1,2]])
sage: p2 = LabelledPermutationIET([[1,2],[2,1]])
sage: p3 = LabelledPermutationLI([[1,1],[2,2]])
sage: hash(p1) == hash(p2)
False
sage: hash(p1) == hash(p3)
False
sage: hash(p2) == hash(p3)
False
sage: p1 = LabelledPermutationLI([[1,1], [2,2,3,3]])
sage: p2 = LabelledPermutationLI([[1,1,2], [2,3,3]])
sage: p3 = LabelledPermutationLI([[1,1,2,2], [3,3]])
sage: hash(p1) == hash(p2)
False
sage: hash(p1) == hash(p3)
False
sage: hash(p2) == hash(p3)
False
"""
if self._hash is None:
t = []
t.extend([str(i) for i in self._intervals[0]])
t.extend([str(-(i+1)) for i in self._intervals[1]])
self._hash = hash(''.join(t))
return self._hash
def _reversed(self):
r"""
.. TODO::
resolve properly the mutablility problem with the
:meth:`_twin` attribute.
TESTS::
sage: p = iet.Permutation([1,2,3],[3,1,2])
sage: p
1 2 3
3 1 2
sage: p._reversed()
sage: p
3 2 1
2 1 3
"""
if '_twin' in self.__dict__:
del self.__dict__['_twin']
if self._hash is not None:
self._hash = None
self._intervals[0].reverse()
self._intervals[1].reverse()
def _inversed(self):
r"""
.. TODO::
properly resolve the mutability problem of the twin
TESTS::
sage: p = iet.Permutation([1,2,3],[3,1,2])
sage: p
1 2 3
3 1 2
sage: p._inversed()
sage: p
3 1 2
1 2 3
"""
if '_twin' in self.__dict__:
del self.__dict__['_twin']
if self._hash is not None:
self._hash = None
self._intervals = (self._intervals[1],self._intervals[0])
def list(self):
r"""
Returns a list of two lists corresponding to the intervals.
OUTPUT:
list -- two lists of labels
EXAMPLES:
The list of an permutation from iet::
sage: p1 = iet.Permutation('1 2 3', '3 1 2')
sage: p1.list()
[['1', '2', '3'], ['3', '1', '2']]
sage: p1.alphabet("abc")
sage: p1.list()
[['a', 'b', 'c'], ['c', 'a', 'b']]
Recovering the permutation from this list (and the alphabet)::
sage: q1 = iet.Permutation(p1.list(),alphabet=p1.alphabet())
sage: p1 == q1
True
The list of a quadratic permutation::
sage: p2 = iet.GeneralizedPermutation('g o o', 'd d g')
sage: p2.list()
[['g', 'o', 'o'], ['d', 'd', 'g']]
Recovering the permutation::
sage: q2 = iet.GeneralizedPermutation(p2.list(),alphabet=p2.alphabet())
sage: p2 == q2
True
"""
a0 = [self._alphabet.unrank(_) for _ in self._intervals[0]]
a1 = [self._alphabet.unrank(_) for _ in self._intervals[1]]
return [a0, a1]
def erase_letter(self, letter):
r"""
Return the permutation with the specified letter removed.
OUTPUT:
permutation -- the resulting permutation
EXAMPLES:
::
sage: p = iet.Permutation('a b c d','c d b a')
sage: p.erase_letter('a')
b c d
c d b
sage: p.erase_letter('b')
a c d
c d a
sage: p.erase_letter('c')
a b d
d b a
sage: p.erase_letter('d')
a b c
c b a
::
sage: p = iet.GeneralizedPermutation('a b b','c c a')
sage: p.erase_letter('a')
b b
c c
Beware, there is no validity check for permutation from linear
involutions::
sage: p = iet.GeneralizedPermutation('a b b','c c a')
sage: p.erase_letter('b')
a
c c a
"""
l = [[], []]
letters = self.letters()
a = letters.index(letter)
for i in (0, 1):
for b in self._intervals[i]:
if b < a:
l[i].append(b)
elif b > a:
l[i].append(b-1)
res = copy(self)
res._intervals = l
res.alphabet(letters[0:a] + letters[a+1:])
return res
def rauzy_move_matrix(self, winner=None, side='right'):
r"""
Returns the Rauzy move matrix.
This matrix corresponds to the action of a Rauzy move on the
vector of lengths. By convention (to get a positive matrix),
the matrix is defined as the inverse transformation on the
length vector.
OUTPUT:
matrix -- a square matrix of positive integers
EXAMPLES:
::
sage: p = iet.Permutation('a b','b a')
sage: p.rauzy_move_matrix('t')
[1 0]
[1 1]
sage: p.rauzy_move_matrix('b')
[1 1]
[0 1]
::
sage: p = iet.Permutation('a b c d','b d a c')
sage: q = p.left_right_inverse()
sage: m0 = p.rauzy_move_matrix(winner='top',side='right')
sage: n0 = q.rauzy_move_matrix(winner='top',side='left')
sage: m0 == n0
True
sage: m1 = p.rauzy_move_matrix(winner='bottom',side='right')
sage: n1 = q.rauzy_move_matrix(winner='bottom',side='left')
sage: m1 == n1
True
"""
if winner is None and side is None:
return identity_matrix(len(self))
winner = interval_conversion(winner)
side = side_conversion(side)
winner_letter = self._intervals[winner][side]
loser_letter = self._intervals[1-winner][side]
m = copy(identity_matrix(len(self)))
m[winner_letter, loser_letter] = 1
return m
def rauzy_move_winner(self, winner=None, side=None):
r"""
Returns the winner of a Rauzy move.
INPUT:
- ``winner`` - either 'top' or 'bottom' ('t' or 'b' for short)
- ``side`` - either 'left' or 'right' ('l' or 'r' for short)
OUTPUT:
-- a label
EXAMPLES:
::
sage: p = iet.Permutation('a b c d','b d a c')
sage: p.rauzy_move_winner('top','right')
'd'
sage: p.rauzy_move_winner('bottom','right')
'c'
sage: p.rauzy_move_winner('top','left')
'a'
sage: p.rauzy_move_winner('bottom','left')
'b'
::
sage: p = iet.GeneralizedPermutation('a b b c','d c a e d e')
sage: p.rauzy_move_winner('top','right')
'c'
sage: p.rauzy_move_winner('bottom','right')
'e'
sage: p.rauzy_move_winner('top','left')
'a'
sage: p.rauzy_move_winner('bottom','left')
'd'
"""
if winner is None and side is None:
return None
winner = interval_conversion(winner)
side = side_conversion(side)
return self[winner][side]
def rauzy_move_loser(self,winner=None,side=None):
r"""
Returns the loser of a Rauzy move
INPUT:
- ``winner`` - either 'top' or 'bottom' ('t' or 'b' for short)
- ``side`` - either 'left' or 'right' ('l' or 'r' for short)
OUTPUT:
-- a label
EXAMPLES::
sage: p = iet.Permutation('a b c d','b d a c')
sage: p.rauzy_move_loser('top','right')
'c'
sage: p.rauzy_move_loser('bottom','right')
'd'
sage: p.rauzy_move_loser('top','left')
'b'
sage: p.rauzy_move_loser('bottom','left')
'a'
"""
if winner is None and side is None:
return None
winner = interval_conversion(winner)
side = side_conversion(side)
return self[1-winner][side]
def LabelledPermutationsIET_iterator(nintervals=None,
irreducible=True,
alphabet=None):
r"""
Returns an iterator over labelled permutations.
INPUT:
- ``nintervals`` - integer or ``None``
- ``irreducible`` - boolean (default: ``True``)
- ``alphabet`` - something that should be converted to an alphabet
of at least nintervals letters
OUTPUT:
iterator -- an iterator over permutations
TESTS::
sage: for p in iet.Permutations_iterator(2, alphabet="ab"):
....: print(p)
....: print("****") #indirect doctest
a b
b a
****
b a
a b
****
sage: for p in iet.Permutations_iterator(3, alphabet="abc"):
....: print(p)
....: print("*****") #indirect doctest
a b c
b c a
*****
a b c
c a b
*****
a b c
c b a
*****
a c b
b a c
*****
a c b
b c a
*****
a c b
c b a
*****
b a c
a c b
*****
b a c
c a b
*****
b a c
c b a
*****
b c a
a b c
*****
b c a
a c b
*****
b c a
c a b
*****
c a b
a b c
*****
c a b
b a c
*****
c a b
b c a
*****
c b a
a b c
*****
c b a
a c b
*****
c b a
b a c
*****
"""
from builtins import map
from itertools import product
from six.moves import filter
from sage.combinat.permutation import Permutations
if not irreducible:
if nintervals is None:
raise ValueError("choose a number of intervals")
nintervals = Integer(nintervals)
if not(nintervals > 0):
raise ValueError("nintervals must be positive")
f = lambda x: LabelledPermutationIET([list(x[0]),list(x[1])],alphabet=alphabet)
alphabet = Alphabet(alphabet)
g = lambda x: [alphabet.unrank(k-1) for k in x]
P = [g(_) for _ in Permutations(nintervals)]
return map(f, product(P, P))
else:
return filter(
lambda x: x.is_irreducible(),
LabelledPermutationsIET_iterator(nintervals,False,alphabet))
class LabelledPermutationIET(LabelledPermutation, PermutationIET):
"""
Labelled permutation for iet
EXAMPLES:
Reducibility testing::
sage: p = iet.Permutation('a b c', 'c b a')
sage: p.is_irreducible()
True
sage: q = iet.Permutation('a b c d', 'b a d c')
sage: q.is_irreducible()
False
Rauzy movability and Rauzy move::
sage: p = iet.Permutation('a b c', 'c b a')
sage: p.has_rauzy_move('top')
True
sage: p.rauzy_move('bottom')
a c b
c b a
sage: p.has_rauzy_move('top')
True
sage: p.rauzy_move('top')
a b c
c a b
Rauzy diagram::
sage: p = iet.Permutation('a b c', 'c b a')
sage: d = p.rauzy_diagram()
sage: p in d
True
"""
def __cmp__(self, other):
r"""
ALGORITHM:
The order is lexicographic on intervals[0] + intervals[1]
TESTS::
sage: list_of_p2 = []
sage: p0 = iet.Permutation('1 2', '1 2')
sage: p1 = iet.Permutation('1 2', '2 1')
sage: p0 != p0
False
sage: (p0 == p0) and (p0 < p1)
True
sage: (p1 > p0) and (p1 == p1)
True
"""
if type(self) is not type(other):
return -1
n = len(self)
if n != len(other):
return n - len(other)
i, j = 0, 0
while (self._intervals[i][j] == other._intervals[i][j]):
j += 1
if j == n:
if i == 1: return 0
i = 1
j = 0
return self._intervals[i][j] - other._intervals[i][j]
@lazy_attribute
def _twin(self):
r"""
The twin relations of the permutation.
TESTS::
sage: p = iet.Permutation('a b','a b')
sage: p._twin
[[0, 1], [0, 1]]
sage: p = iet.Permutation('a b','b a')
sage: p._twin
[[1, 0], [1, 0]]
"""
return twin_list_iet(self._intervals)
def reduced(self):
r"""
Returns the associated reduced abelian permutation.
OUTPUT:
a reduced permutation -- the underlying reduced permutation
EXAMPLES::
sage: p = iet.Permutation("a b c d","d c a b")
sage: q = iet.Permutation("a b c d","d c a b",reduced=True)
sage: p.reduced() == q
True
"""
from .reduced import ReducedPermutationIET
return ReducedPermutationIET(self.list(), alphabet=self._alphabet)
def is_identity(self):
r"""
Returns True if self is the identity.
OUTPUT:
bool -- True if self corresponds to the identity
EXAMPLES::
sage: iet.Permutation("a b","a b").is_identity()
True
sage: iet.Permutation("a b","b a").is_identity()
False
"""
for i in range(len(self)):
if self._intervals[0][i] != self._intervals[1][i]:
return False
return True
def has_rauzy_move(self, winner=None, side=None):
r"""
Returns ``True`` if you can perform a Rauzy move.
INPUT:
- ``winner`` - the winner interval ('top' or 'bottom')
- ``side`` - (default: 'right') the side ('left' or 'right')
OUTPUT:
bool -- ``True`` if self has a Rauzy move
EXAMPLES:
::
sage: p = iet.Permutation('a b','b a')
sage: p.has_rauzy_move()
True
::
sage: p = iet.Permutation('a b c','b a c')
sage: p.has_rauzy_move()
False
"""
if side is None:
side = -1
else:
side = side_conversion(side)
if not winner is None:
winner = interval_conversion(winner)
return self._intervals[0][side] != self._intervals[1][side]
def rauzy_move(self, winner=None, side=None, iteration=1):
r"""
Returns the Rauzy move.
INPUT:
- ``winner`` - the winner interval ('top' or 'bottom')
- ``side`` - (default: 'right') the side ('left' or 'right')
OUTPUT:
permutation -- the Rauzy move of the permutation
EXAMPLES:
::
sage: p = iet.Permutation('a b','b a')
sage: p.rauzy_move('t','right')
a b
b a
sage: p.rauzy_move('b','right')
a b
b a
::
sage: p = iet.Permutation('a b c','c b a')
sage: p.rauzy_move('t','right')
a b c
c a b
sage: p.rauzy_move('b','right')
a c b
c b a
::
sage: p = iet.Permutation('a b','b a')
sage: p.rauzy_move('t','left')
a b
b a
sage: p.rauzy_move('b','left')
a b
b a
::
sage: p = iet.Permutation('a b c','c b a')
sage: p.rauzy_move('t','left')
a b c
b c a
sage: p.rauzy_move('b','left')
b a c
c b a
"""
side = side_conversion(side)
winner = interval_conversion(winner)
result = copy(self)
for i in range(iteration):
winner_letter = result._intervals[winner][side]
loser_letter = result._intervals[1-winner].pop(side)
loser_to = result._intervals[1-winner].index(winner_letter) - side
result._intervals[1-winner].insert(loser_to, loser_letter)
return result
def rauzy_move_interval_substitution(self,winner=None,side=None):
r"""
Returns the interval substitution associated.
INPUT:
- ``winner`` - the winner interval ('top' or 'bottom')
- ``side`` - (default: 'right') the side ('left' or 'right')
OUTPUT:
WordMorphism -- a substitution on the alphabet of the permutation
EXAMPLES::
sage: p = iet.Permutation('a b','b a')
sage: p.rauzy_move_interval_substitution('top','right')
WordMorphism: a->a, b->ba
sage: p.rauzy_move_interval_substitution('bottom','right')
WordMorphism: a->ab, b->b
sage: p.rauzy_move_interval_substitution('top','left')
WordMorphism: a->ba, b->b
sage: p.rauzy_move_interval_substitution('bottom','left')
WordMorphism: a->a, b->ab
"""
d = dict([(letter,letter) for letter in self.letters()])
if winner is None and side is None:
return WordMorphism(d)
winner = interval_conversion(winner)
side = side_conversion(side)
winner_letter = self.rauzy_move_winner(winner,side)
loser_letter = self.rauzy_move_loser(winner,side)
if side == 0:
d[winner_letter] = [loser_letter,winner_letter]
else:
d[winner_letter] = [winner_letter,loser_letter]
return WordMorphism(d)
def rauzy_move_orbit_substitution(self,winner=None,side=None):
r"""
Return the action of the rauzy_move on the orbit.
INPUT:
- ``i`` - integer
- ``winner`` - the winner interval ('top' or 'bottom')
- ``side`` - (default: 'right') the side ('right' or 'left')
OUTPUT:
WordMorphism -- a substitution on the alphabet of self
EXAMPLES::
sage: p = iet.Permutation('a b','b a')
sage: p.rauzy_move_orbit_substitution('top','right')
WordMorphism: a->ab, b->b
sage: p.rauzy_move_orbit_substitution('bottom','right')
WordMorphism: a->a, b->ab
sage: p.rauzy_move_orbit_substitution('top','left')
WordMorphism: a->a, b->ba
sage: p.rauzy_move_orbit_substitution('bottom','left')
WordMorphism: a->ba, b->b
"""
d = dict([(letter,letter) for letter in self.letters()])
if winner is None and side is None:
return WordMorphism(d)
winner = interval_conversion(winner)
side = side_conversion(side)
loser_letter = self.rauzy_move_loser(winner,side)
top_letter = self.alphabet().unrank(self._intervals[0][side])
bottom_letter = self.alphabet().unrank(self._intervals[1][side])
d[loser_letter] = [bottom_letter,top_letter]
return WordMorphism(d)
def rauzy_diagram(self, **args):
"""
Returns the associated Rauzy diagram.
For more information try help(iet.RauzyDiagram).
OUTPUT:
Rauzy diagram -- the Rauzy diagram of the permutation
EXAMPLES::
sage: p = iet.Permutation('a b c', 'c b a')
sage: d = p.rauzy_diagram()
"""
return LabelledRauzyDiagram(self, **args)
class LabelledPermutationLI(LabelledPermutation, PermutationLI):
r"""
Labelled quadratic (or generalized) permutation
EXAMPLES:
Reducibility testing::
sage: p = iet.GeneralizedPermutation('a b b', 'c c a')
sage: p.is_irreducible()
True
Reducibility testing with associated decomposition::
sage: p = iet.GeneralizedPermutation('a b c a', 'b d d c')
sage: p.is_irreducible()
False
sage: test, decomposition = p.is_irreducible(return_decomposition = True)
sage: test
False
sage: decomposition
(['a'], ['c', 'a'], [], ['c'])
Rauzy movability and Rauzy move::
sage: p = iet.GeneralizedPermutation('a a b b c c', 'd d')
sage: p.has_rauzy_move(0)
False
sage: p.has_rauzy_move(1)
True
sage: q = p.rauzy_move(1)
sage: q
a a b b c
c d d
sage: q.has_rauzy_move(0)
True
sage: q.has_rauzy_move(1)
True
Rauzy diagrams::
sage: p = iet.GeneralizedPermutation('0 0 1 1','2 2')
sage: r = p.rauzy_diagram()
sage: p in r
True
"""
def __cmp__(self, other):
r"""
ALGORITHM:
Order is lexicographic on length of intervals and on intervals.
TESTS::
sage: p0 = iet.GeneralizedPermutation('0 0','1 1 2 2')
sage: p1 = iet.GeneralizedPermutation('0 0','1 2 1 2')
sage: p2 = iet.GeneralizedPermutation('0 0','1 2 2 1')
sage: p3 = iet.GeneralizedPermutation('0 0 1','1 2 2')
sage: p4 = iet.GeneralizedPermutation('0 0 1 1','2 2')
sage: p5 = iet.GeneralizedPermutation('0 1 0 1','2 2')
sage: p6 = iet.GeneralizedPermutation('0 1 1 0','2 2')
sage: p0 == p0 and p0 < p1 and p0 < p2 and p0 < p3 and p0 < p4
True
sage: p0 < p5 and p0 < p6 and p1 < p2 and p1 < p3 and p1 < p4
True
sage: p1 < p5 and p1 < p6 and p2 < p3 and p2 < p4 and p2 < p5
True
sage: p2 < p6 and p3 < p4 and p3 < p5 and p3 < p6 and p4 < p5
True
sage: p4 < p6 and p5 < p6 and p0 == p0 and p1 == p1 and p2 == p2
True
sage: p3 == p3 and p4 == p4 and p5 == p5 and p6 == p6
True
"""
if type(self) is not type(other):
return -1
n = len(self)
if n != len(other): return n - len(other)
l0 = self._intervals[0]
l1 = other._intervals[0]
n = len(self._intervals[0])
if n != len(other._intervals[0]): return n - len(other._intervals[0])
i = 0
while (i < n) and (l0[i] == l1[i]):
i += 1
if i != n:
return l0[i] - l1[i]
l0 = self._intervals[1]
l1 = other._intervals[1]
n = len(self._intervals[1])
i = 0
while (i < n) and (l0[i] == l1[i]):
i += 1
if i != n:
return l0[i] - l1[i]
return 0
def has_right_rauzy_move(self, winner):
r"""
Test of Rauzy movability with a specified winner
A quadratic (or generalized) permutation is rauzy_movable type
depending on the possible length of the last interval. It is
dependent of the length equation.
INPUT:
- ``winner`` - 'top' (or 't' or 0) or 'bottom' (or 'b' or 1)
OUTPUT:
bool -- ``True`` if self has a Rauzy move
EXAMPLES:
::
sage: p = iet.GeneralizedPermutation('a a','b b')
sage: p.has_right_rauzy_move('top')
False
sage: p.has_right_rauzy_move('bottom')
False
::
sage: p = iet.GeneralizedPermutation('a a b','b c c')
sage: p.has_right_rauzy_move('top')
True
sage: p.has_right_rauzy_move('bottom')
True
::
sage: p = iet.GeneralizedPermutation('a a','b b c c')
sage: p.has_right_rauzy_move('top')
True
sage: p.has_right_rauzy_move('bottom')
False
::
sage: p = iet.GeneralizedPermutation('a a b b','c c')
sage: p.has_right_rauzy_move('top')
False
sage: p.has_right_rauzy_move('bottom')
True
"""
winner = interval_conversion(winner)
loser = self._intervals[1-winner][-1]
# the same letter at the right-end (False)
if self._intervals[0][-1] == self._intervals[1][-1] :
return False
# the winner (or loser) letter is repeated on the other interval (True)
if self._intervals[0][-1] in self._intervals[1]: return True
if self._intervals[1][-1] in self._intervals[0]: return True
# the loser letters is the only letter repeated in the loser
# interval (False)
for i,c in enumerate((self._intervals[1-winner])):
if c != loser and c in self._intervals[1-winner][i+1:]:
return True
return False
def right_rauzy_move(self, winner):
r"""
Perform a Rauzy move on the right (the standard one).
INPUT:
- ``winner`` - 'top' (or 't' or 0) or 'bottom' (or 'b' or 1)
OUTPUT:
boolean -- ``True`` if self has a Rauzy move
EXAMPLES:
::
sage: p = iet.GeneralizedPermutation('a a b','b c c')
sage: p.right_rauzy_move(0)
a a b
b c c
sage: p.right_rauzy_move(1)
a a
b b c c
::
sage: p = iet.GeneralizedPermutation('a b b','c c a')
sage: p.right_rauzy_move(0)
a a b b
c c
sage: p.right_rauzy_move(1)
a b b
c c a
TESTS::
sage: p = iet.GeneralizedPermutation('a a b','b c c')
sage: q = p.top_bottom_inverse()
sage: q = q.right_rauzy_move(0)
sage: q = q.top_bottom_inverse()
sage: q == p.right_rauzy_move(1)
True
sage: q = p.top_bottom_inverse()
sage: q = q.right_rauzy_move(1)
sage: q = q.top_bottom_inverse()
sage: q == p.right_rauzy_move(0)
True
sage: p = p.left_right_inverse()
sage: q = q.left_rauzy_move(0)
sage: q = q.left_right_inverse()
sage: q == p.right_rauzy_move(0)
True
sage: q = p.left_right_inverse()
sage: q = q.left_rauzy_move(1)
sage: q = q.left_right_inverse()
sage: q == p.right_rauzy_move(1)
True
"""
result = copy(self)
winner_letter = result._intervals[winner][-1]
loser_letter = result._intervals[1-winner].pop(-1)
if winner_letter in result._intervals[winner][:-1]:
loser_to = result._intervals[winner].index(winner_letter)
result._intervals[winner].insert(loser_to, loser_letter)
else:
loser_to = result._intervals[1-winner].index(winner_letter) + 1
result._intervals[1-winner].insert(loser_to, loser_letter)
return result
def left_rauzy_move(self, winner):
r"""
Perform a Rauzy move on the left.
INPUT:
- ``winner`` - 'top' or 'bottom'
OUTPUT:
permutation -- the Rauzy move of self
EXAMPLES:
::
sage: p = iet.GeneralizedPermutation('a a b','b c c')
sage: p.left_rauzy_move(0)
a a b b
c c
sage: p.left_rauzy_move(1)
a a b
b c c
::
sage: p = iet.GeneralizedPermutation('a b b','c c a')
sage: p.left_rauzy_move(0)
a b b
c c a
sage: p.left_rauzy_move(1)
b b
c c a a
TESTS::
sage: p = iet.GeneralizedPermutation('a a b','b c c')
sage: q = p.top_bottom_inverse()
sage: q = q.left_rauzy_move(0)
sage: q = q.top_bottom_inverse()
sage: q == p.left_rauzy_move(1)
True
sage: q = p.top_bottom_inverse()
sage: q = q.left_rauzy_move(1)
sage: q = q.top_bottom_inverse()
sage: q == p.left_rauzy_move(0)
True
sage: q = p.left_right_inverse()
sage: q = q.right_rauzy_move(0)
sage: q = q.left_right_inverse()
sage: q == p.left_rauzy_move(0)
True
sage: q = p.left_right_inverse()
sage: q = q.right_rauzy_move(1)
sage: q = q.left_right_inverse()
sage: q == p.left_rauzy_move(1)
True
"""
result = copy(self)
winner_letter = result._intervals[winner][0]
loser_letter = result._intervals[1-winner].pop(0)
if winner_letter in result._intervals[winner][1:]:
loser_to = result._intervals[winner][1:].index(winner_letter)+2
result._intervals[winner].insert(loser_to, loser_letter)
else:
loser_to = result._intervals[1-winner].index(winner_letter)
result._intervals[1-winner].insert(loser_to, loser_letter)
return result
def reduced(self):
r"""
Returns the associated reduced quadratic permutations.
OUTPUT:
permutation -- the underlying reduced permutation
EXAMPLES::
sage: p = iet.GeneralizedPermutation('a a','b b c c')
sage: q = p.reduced()
sage: q
a a
b b c c
sage: p.rauzy_move(0).reduced() == q.rauzy_move(0)
True
"""
from .reduced import ReducedPermutationLI
return ReducedPermutationLI(self.list(),alphabet=self._alphabet)
def rauzy_diagram(self, **kargs):
r"""
Returns the associated RauzyDiagram.
OUTPUT:
Rauzy diagram -- the Rauzy diagram of the permutation
EXAMPLES::
sage: p = iet.GeneralizedPermutation('a b c b', 'c d d a')
sage: d = p.rauzy_diagram()
sage: p in d
True
For more information, try help(iet.RauzyDiagram)
"""
return LabelledRauzyDiagram(self, **kargs)
@lazy_attribute
def _twin(self):
r"""
The twin list of the permutation
TEST::
sage: p = iet.GeneralizedPermutation('a a','b b')
sage: p._twin
[[(0, 1), (0, 0)], [(1, 1), (1, 0)]]
"""
return twin_list_li(self._intervals)
class FlippedLabelledPermutation(LabelledPermutation):
r"""
General template for labelled objects
.. warning::
Internal class! Do not use directly!
"""
def __init__(self, intervals=None, alphabet=None, flips=None):
r"""
INPUT:
- `intervals` - the intervals as a list of two lists
- `alphabet` - something that should be converted to an alphabe
- `flips` - a list of letters of the alphabet
TESTS:
::
sage: from sage.dynamics.interval_exchanges.labelled import FlippedLabelledPermutationIET
sage: p = FlippedLabelledPermutationIET([['a','b'],['a','b']],flips='a')
sage: p == loads(dumps(p))
True
sage: p = FlippedLabelledPermutationIET([['a','b'],['b','a']],flips='ab')
sage: p == loads(dumps(p))
True
::
sage: from sage.dynamics.interval_exchanges.labelled import FlippedLabelledPermutationLI
sage: p = FlippedLabelledPermutationLI([['a','a','b'],['b','c','c']],flips='a')
sage: p == loads(dumps(p))
True
sage: p = FlippedLabelledPermutationLI([['a','a'],['b','b','c','c']],flips='ac')
sage: p == loads(dumps(p))
True
"""
if intervals is None:
intervals = [[], []]
if flips is None: flips = []
super(FlippedLabelledPermutation, self).__init__(intervals, alphabet)
self._init_flips(intervals, flips)
def __copy__(self):
r"""
Returns a copy of ``self``
TESTS::
sage: p = iet.Permutation('a b c','c b a',flips='a')
sage: h = hash(p)
sage: t = p._twin
sage: q = copy(p)
sage: q == p
True
sage: q is p
False
sage: q._twin is p._twin
False
"""
result = self.__class__()
result._intervals = [self._intervals[0][:],
self._intervals[1][:]]
result._flips = [self._flips[0][:],
self._flips[1][:]]
result._alphabet = self._alphabet
result._repr_type = self._repr_type
result._repr_options = self._repr_options
return result
def list(self, flips=False):
r"""
Returns a list associated to the permutation.
INPUT:
- ``flips`` - boolean (default: ``False``)
OUTPUT:
list -- two lists of labels
EXAMPLES::
sage: p = iet.GeneralizedPermutation('0 0 1 2 2 1', '3 3', flips='1')
sage: p.list(flips=True)
[[('0', 1), ('0', 1), ('1', -1), ('2', 1), ('2', 1), ('1', -1)], [('3', 1), ('3', 1)]]
sage: p.list(flips=False)
[['0', '0', '1', '2', '2', '1'], ['3', '3']]
The list can be used to reconstruct the permutation
::
sage: p = iet.Permutation('a b c','c b a',flips='ab')
sage: p == iet.Permutation(p.list(), flips=p.flips())
True
::
sage: p = iet.GeneralizedPermutation('a b b c','c d d a',flips='ad')
sage: p == iet.GeneralizedPermutation(p.list(),flips=p.flips())
True
"""
if flips:
a0 = zip([self._alphabet.unrank(_) for _ in self._intervals[0]], self._flips[0])
a1 = zip([self._alphabet.unrank(_) for _ in self._intervals[1]], self._flips[1])
else:
a0 = [self._alphabet.unrank(_) for _ in self._intervals[0]]
a1 = [self._alphabet.unrank(_) for _ in self._intervals[1]]
return [a0,a1]
def __getitem__(self,i):
r"""
Get labels and flips of specified interval.
The result is a 2-uple (letter, flip) where letter is the name of the
sub-interval and flip is a number corresponding to the presence of flip
as following: 1 (no flip) and -1 (a flip).
EXAMPLES::
sage: p = iet.Permutation('a b', 'b a', flips='a')
sage: p[0]
[('a', -1), ('b', 1)]
sage: p = iet.GeneralizedPermutation('c p p', 't t c', flips='ct')
sage: p[1]
[('t', -1), ('t', -1), ('c', -1)]
"""
if not isinstance(i, (Integer, int)):
raise TypeError("Must be an integer")
if i != 0 and i != 1:
raise IndexError("The integer must be 0 or 1")
letters = [self._alphabet.unrank(_) for _ in self._intervals[i]]
flips = self._flips[i]
return zip(letters,flips)
def __eq__(self,other):
r"""
Test of equality
ALGORITHM:
not considering the alphabet used for the representation but just the
order
TESTS::
sage: p1 = iet.Permutation('a b c','c b a',flips='a')
sage: p2 = iet.Permutation('a b c','c b a',flips='b')
sage: p3 = iet.Permutation('d e f','f e d',flips='d')
sage: p1 == p1 and p2 == p2 and p3 == p3
True
sage: p1 == p2
False
sage: p1 == p3
True
"""
return (
type(self) is type(other) and
self._intervals == other._intervals and
self._flips == other._flips)
def __ne__(self,other):
r"""
Test of difference
ALGORITHM:
not considering the alphabet used for the representation
TESTS::
sage: p1 = iet.Permutation('a b c','c b a',flips='a')
sage: p2 = iet.Permutation('a b c','c b a',flips='b')
sage: p3 = iet.Permutation('d e f','f e d',flips='d')
sage: p1 != p1 or p2 != p2 or p3 != p3
False
sage: p1 != p2
True
sage: p1 != p3
False
"""
return (
type(self) is not type(other) or
self._intervals != other._intervals or
self._flips != other._flips)
def _inversed(self):
r"""
Inversion of the permutation (called by tb_inverse).
.. TODO::
Resolve properly the mutability problem associated to hash
value and twin list.
TESTS::
sage: p = iet.Permutation('a','a',flips='a')
sage: p.tb_inverse() #indirect doctest
-a
-a
sage: p = iet.Permutation('a b','a b',flips='a')
sage: p.tb_inverse() #indirect doctest
-a b
-a b
sage: p = iet.Permutation('a b','a b',flips='b')
sage: p.tb_inverse() #indirect doctest
a -b
a -b
sage: p = iet.Permutation('a b','b a',flips='a')
sage: p.tb_inverse() #indirect doctest
b -a
-a b
sage: p = iet.Permutation('a b','b a',flips='b')
sage: p.tb_inverse() #indirect doctest
-b a
a -b
"""
if hasattr(self, '_twin'):
delattr(self, '_twin')
if self._hash is not None:
self._hash = None
self._intervals.reverse()
self._flips.reverse()
def _reversed(self):
r"""
Reverses the permutation (called by lr_inverse)
.. TODO::
Resolve properly the mutability problem with _twin list
and the hash value.
TESTS::
sage: p = iet.Permutation('a','a',flips='a')
sage: p.lr_inverse() #indirect doctest
-a
-a
sage: p = iet.Permutation('a b','a b',flips='a')
sage: p.lr_inverse() #indirect doctest
b -a
b -a
sage: p = iet.Permutation('a b','a b',flips='b')
sage: p.lr_inverse() #indirect doctest
-b a
-b a
sage: p = iet.Permutation('a b','b a',flips='a')
sage: p.lr_inverse() #indirect doctest
b -a
-a b
sage: p = iet.Permutation('a b','b a',flips='b')
sage: p.lr_inverse() #indirect doctest
-b a
a -b
"""
if hasattr(self, '_twin'):
delattr(self, '_twin')
if self._hash is not None:
self._hash is None
self._intervals[0].reverse()
self._intervals[1].reverse()
self._flips[0].reverse()
self._flips[1].reverse()
class FlippedLabelledPermutationIET(
FlippedLabelledPermutation,
FlippedPermutationIET,
LabelledPermutationIET):
r"""
Flipped labelled permutation from iet.
EXAMPLES:
Reducibility testing (does not depends of flips)::
sage: p = iet.Permutation('a b c', 'c b a',flips='a')
sage: p.is_irreducible()
True
sage: q = iet.Permutation('a b c d', 'b a d c', flips='bc')
sage: q.is_irreducible()
False
Rauzy movability and Rauzy move::
sage: p = iet.Permutation('a b c', 'c b a',flips='a')
sage: p
-a b c
c b -a
sage: p.rauzy_move(1)
-c -a b
-c b -a
sage: p.rauzy_move(0)
-a b c
c -a b
Rauzy diagrams::
sage: d = iet.RauzyDiagram('a b c d','d a b c',flips='a')
AUTHORS:
- <NAME> (2009-09-29): initial version
"""
def reduced(self):
r"""
The associated reduced permutation.
OUTPUT:
permutation -- the associated reduced permutation
EXAMPLES::
sage: p = iet.Permutation('a b c','c b a',flips='a')
sage: q = iet.Permutation('a b c','c b a',flips='a',reduced=True)
sage: p.reduced() == q
True
"""
from sage.dynamics.interval_exchanges.reduced import FlippedReducedPermutationIET
return FlippedReducedPermutationIET(
intervals=self.list(flips=False),
flips=self.flips(),
alphabet=self.alphabet())
def __hash__(self):
r"""
ALGORITHM:
Uses hash of string
TESTS::
sage: p =[]
sage: p.append(iet.Permutation('a b','a b',flips='a'))
sage: p.append(iet.Permutation('a b','a b',flips='b'))
sage: p.append(iet.Permutation('a b','a b',flips='ab'))
sage: p.append(iet.Permutation('a b','b a',flips='a'))
sage: p.append(iet.Permutation('a b','b a',flips='b'))
sage: p.append(iet.Permutation('a b','b a',flips='ab'))
sage: h = list(map(hash, p))
sage: for i in range(len(h)-1):
....: if h[i] == h[i+1]:
....: print("You choose a bad hash!")
"""
if self._hash is None:
f = self._flips
i = self._intervals
l = []
l.extend([str(j*(1+k)) for j,k in zip(f[0],i[0])])
l.extend([str(-j*(1+k)) for j,k in zip(f[1],i[1])])
self._hash = hash(''.join(l))
return self._hash
def rauzy_move(self,winner=None,side=None):
r"""
Returns the Rauzy move.
INPUT:
- ``winner`` - 'top' (or 't' or 0) or 'bottom' (or 'b' or 1)
- ``side`` - (default: 'right') 'right' (or 'r') or 'left' (or 'l')
OUTPUT:
permutation -- the Rauzy move of ``self``
EXAMPLES:
::
sage: p = iet.Permutation('a b','b a',flips='a')
sage: p.rauzy_move('top')
-a b
b -a
sage: p.rauzy_move('bottom')
-b -a
-b -a
::
sage: p = iet.Permutation('a b c','c b a',flips='b')
sage: p.rauzy_move('top')
a -b c
c a -b
sage: p.rauzy_move('bottom')
a c -b
c -b a
"""
winner = interval_conversion(winner)
side = side_conversion(side)
result = copy(self)
winner_letter = result._intervals[winner][side]
loser_letter = result._intervals[1-winner].pop(side)
winner_flip = result._flips[winner][side]
loser_flip = result._flips[1-winner].pop(side)
loser_twin = result._intervals[winner].index(loser_letter)
result._flips[winner][loser_twin] = winner_flip * loser_flip
loser_to = result._intervals[1-winner].index(winner_letter) - side
if winner_flip == -1: loser_to += 1 + 2*side
result._intervals[1-winner].insert(loser_to, loser_letter)
result._flips[1-winner].insert(loser_to, winner_flip * loser_flip)
return result
def rauzy_diagram(self, **kargs):
r"""
Returns the Rauzy diagram associated to this permutation.
For more information, try help(iet.RauzyDiagram)
OUTPUT:
RauzyDiagram -- the Rauzy diagram of ``self``
EXAMPLES::
sage: p = iet.Permutation('a b c', 'c b a',flips='a')
sage: p.rauzy_diagram()
Rauzy diagram with 3 permutations
"""
return FlippedLabelledRauzyDiagram(self, **kargs)
class FlippedLabelledPermutationLI(FlippedLabelledPermutation,
FlippedPermutationLI,
LabelledPermutationLI):
r"""
Flipped labelled quadratic (or generalized) permutation.
EXAMPLES:
Reducibility testing::
sage: p = iet.GeneralizedPermutation('a b b', 'c c a', flips='a')
sage: p.is_irreducible()
True
Reducibility testing with associated decomposition::
sage: p = iet.GeneralizedPermutation('a b c a', 'b d d c', flips='ab')
sage: p.is_irreducible()
False
sage: test, decomp = p.is_irreducible(return_decomposition = True)
sage: test
False
sage: decomp
(['a'], ['c', 'a'], [], ['c'])
Rauzy movability and Rauzy move::
sage: p = iet.GeneralizedPermutation('a a b b c c', 'd d', flips='d')
sage: p.has_rauzy_move(0)
False
sage: p.has_rauzy_move(1)
True
sage: p = iet.GeneralizedPermutation('a a b','b c c',flips='c')
sage: p.has_rauzy_move(0)
True
sage: p.has_rauzy_move(1)
True
"""
def reduced(self):
r"""
The associated reduced permutation.
OUTPUT:
permutation -- the associated reduced permutation
EXAMPLE::
sage: p = iet.GeneralizedPermutation('a a','b b c c',flips='a')
sage: q = iet.GeneralizedPermutation('a a','b b c c',flips='a',reduced=True)
sage: p.reduced() == q
True
"""
from sage.dynamics.interval_exchanges.reduced import FlippedReducedPermutationLI
return FlippedReducedPermutationLI(
intervals=self.list(flips=False),
flips=self.flips(),
alphabet=self.alphabet())
def right_rauzy_move(self, winner):
r"""
Perform a Rauzy move on the right (the standard one).
INPUT:
- ``winner`` - either 'top' or 'bottom' ('t' or 'b' for short)
OUTPUT:
permutation -- the Rauzy move of ``self``
EXAMPLES:
::
sage: p = iet.GeneralizedPermutation('a a b','b c c',flips='c')
sage: p.right_rauzy_move(0)
a a b
-c b -c
sage: p.right_rauzy_move(1)
a a
-b -c -b -c
::
sage: p = iet.GeneralizedPermutation('a b b','c c a',flips='ab')
sage: p.right_rauzy_move(0)
a -b a -b
c c
sage: p.right_rauzy_move(1)
b -a b
c c -a
"""
result = copy(self)
winner_letter = result._intervals[winner][-1]
winner_flip = result._flips[winner][-1]
loser_letter = result._intervals[1-winner].pop(-1)
loser_flip = result._flips[1-winner].pop(-1)
if loser_letter in result._intervals[winner]:
loser_twin = result._intervals[winner].index(loser_letter)
result._flips[winner][loser_twin] = loser_flip*winner_flip
else:
loser_twin = result._intervals[1-winner].index(loser_letter)
result._flips[1-winner][loser_twin] = loser_flip*winner_flip
if winner_letter in result._intervals[winner][:-1]:
loser_to = result._intervals[winner].index(winner_letter)
if winner_flip == -1: loser_to += 1
result._intervals[winner].insert(loser_to, loser_letter)
result._flips[winner].insert(loser_to, loser_flip*winner_flip)
else:
loser_to = result._intervals[1-winner].index(winner_letter)
if loser_flip == 1: loser_to += 1
result._intervals[1-winner].insert(loser_to, loser_letter)
result._flips[1-winner].insert(loser_to, loser_flip*winner_flip)
return result
def left_rauzy_move(self, winner):
r"""
Perform a Rauzy move on the left.
INPUT:
- ``winner`` - either 'top' or 'bottom' ('t' or 'b' for short)
OUTPUT:
-- a permutation
EXAMPLES:
::
sage: p = iet.GeneralizedPermutation('a a b','b c c')
sage: p.left_rauzy_move(0)
a a b b
c c
sage: p.left_rauzy_move(1)
a a b
b c c
::
sage: p = iet.GeneralizedPermutation('a b b','c c a')
sage: p.left_rauzy_move(0)
a b b
c c a
sage: p.left_rauzy_move(1)
b b
c c a a
"""
result = copy(self)
winner_letter = result._intervals[winner][0]
loser_letter = result._intervals[1-winner].pop(0)
if winner_letter in result._intervals[winner][1:]:
loser_to = result._intervals[winner][1:].index(winner_letter)+2
result._intervals[winner].insert(loser_to, loser_letter)
else:
loser_to = result._intervals[1-winner].index(winner_letter)
result._intervals[1-winner].insert(loser_to, loser_letter)
return result
def rauzy_diagram(self, **kargs):
r"""
Returns the associated Rauzy diagram.
For more information, try help(RauzyDiagram)
OUTPUT :
-- a RauzyDiagram
EXAMPLES::
sage: p = iet.GeneralizedPermutation('a b b a', 'c d c d')
sage: d = p.rauzy_diagram()
"""
return FlippedLabelledRauzyDiagram(self, **kargs)
class LabelledRauzyDiagram(RauzyDiagram):
r"""
Template for Rauzy diagrams of labelled permutations.
.. WARNING::
DO NOT USE
"""
class Path(RauzyDiagram.Path):
r"""
Path in Labelled Rauzy diagram.
"""
def matrix(self):
r"""
Returns the matrix associated to a path.
The matrix associated to a Rauzy induction, is the linear
application that allows to recover the lengths of ``self``
from the lengths of the induced.
OUTPUT:
matrix -- a square matrix of integers
EXAMPLES:
::
sage: p = iet.Permutation('a1 a2','a2 a1')
sage: d = p.rauzy_diagram()
sage: g = d.path(p,'top')
sage: g.matrix()
[1 0]
[1 1]
sage: g = d.path(p,'bottom')
sage: g.matrix()
[1 1]
[0 1]
::
sage: p = iet.Permutation('a b c','c b a')
sage: d = p.rauzy_diagram()
sage: g = d.path(p)
sage: g.matrix() == identity_matrix(3)
True
sage: g = d.path(p,'top')
sage: g.matrix()
[1 0 0]
[0 1 0]
[1 0 1]
sage: g = d.path(p,'bottom')
sage: g.matrix()
[1 0 1]
[0 1 0]
[0 0 1]
"""
return self.composition(self._parent.edge_to_matrix)
def interval_substitution(self):
r"""
Returns the substitution of intervals obtained.
OUTPUT:
WordMorphism -- the word morphism corresponding to the interval
EXAMPLES::
sage: p = iet.Permutation('a b','b a')
sage: r = p.rauzy_diagram()
sage: p0 = r.path(p,0)
sage: s0 = p0.interval_substitution()
sage: s0
WordMorphism: a->a, b->ba
sage: p1 = r.path(p,1)
sage: s1 = p1.interval_substitution()
sage: s1
WordMorphism: a->ab, b->b
sage: (p0 + p1).interval_substitution() == s1 * s0
True
sage: (p1 + p0).interval_substitution() == s0 * s1
True
"""
return self.right_composition(self._parent.edge_to_interval_substitution)
def orbit_substitution(self):
r"""
Returns the substitution on the orbit of the left extremity.
OUTPUT:
WordMorhpism -- the word morphism corresponding to the orbit
EXAMPLES::
sage: p = iet.Permutation('a b','b a')
sage: d = p.rauzy_diagram()
sage: g0 = d.path(p,'top')
sage: s0 = g0.orbit_substitution()
sage: s0
WordMorphism: a->ab, b->b
sage: g1 = d.path(p,'bottom')
sage: s1 = g1.orbit_substitution()
sage: s1
WordMorphism: a->a, b->ab
sage: (g0 + g1).orbit_substitution() == s0 * s1
True
sage: (g1 + g0).orbit_substitution() == s1 * s0
True
"""
return self.composition(self._parent.edge_to_orbit_substitution)
substitution = orbit_substitution # standard name
dual_substitution = interval_substitution # standard name
def is_full(self):
r"""
Tests the fullness.
A path is full if all intervals win at least one time.
OUTPUT:
boolean -- ``True`` if the path is full and ``False`` else
EXAMPLE::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: g0 = r.path(p,'t','b','t')
sage: g1 = r.path(p,'b','t','b')
sage: g0.is_full()
False
sage: g1.is_full()
False
sage: (g0 + g1).is_full()
True
sage: (g1 + g0).is_full()
True
"""
return set(self._parent.letters()) == set(self.winners())
def edge_to_interval_substitution(self, p=None, edge_type=None):
r"""
Returns the interval substitution associated to an edge
OUTPUT:
WordMorphism -- the WordMorphism corresponding to the edge
EXAMPLE::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: r.edge_to_interval_substitution(None,None)
WordMorphism: a->a, b->b, c->c
sage: r.edge_to_interval_substitution(p,0)
WordMorphism: a->a, b->b, c->ca
sage: r.edge_to_interval_substitution(p,1)
WordMorphism: a->ac, b->b, c->c
"""
if p is None and edge_type is None:
return WordMorphism(dict((a,a) for a in self.letters()))
function_name = self._edge_types[edge_type][0] + '_interval_substitution'
if not hasattr(self._element_class,function_name):
return WordMorphism(dict((a,a) for a in self.letters()))
arguments = self._edge_types[edge_type][1]
return getattr(p,function_name)(*arguments)
def edge_to_orbit_substitution(self, p=None, edge_type=None):
r"""
Returns the interval substitution associated to an edge
OUTPUT:
WordMorphism -- the word morphism corresponding to the edge
EXAMPLE::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: r.edge_to_orbit_substitution(None,None)
WordMorphism: a->a, b->b, c->c
sage: r.edge_to_orbit_substitution(p,0)
WordMorphism: a->ac, b->b, c->c
sage: r.edge_to_orbit_substitution(p,1)
WordMorphism: a->a, b->b, c->ac
"""
if p is None and edge_type is None:
return WordMorphism(dict((a,a) for a in self.letters()))
function_name = self._edge_types[edge_type][0] + '_orbit_substitution'
if not hasattr(self._element_class,function_name):
return WordMorphism(dict((a,a) for a in self.letters()))
arguments = self._edge_types[edge_type][1]
return getattr(p,function_name)(*arguments)
def full_loop_iterator(self, start=None, max_length=1):
r"""
Returns an iterator over all full path starting at start.
INPUT:
- ``start`` - the start point
- ``max_length`` - a limit on the length of the paths
OUTPUT:
iterator -- iterator over full loops
EXAMPLE::
sage: p = iet.Permutation('a b','b a')
sage: r = p.rauzy_diagram()
sage: for g in r.full_loop_iterator(p,2):
....: print(g.matrix())
....: print("*****")
[1 1]
[1 2]
*****
[2 1]
[1 1]
*****
"""
from builtins import map
from six.moves import filter
g = self.path(start)
ifull = filter(
lambda x: x.is_loop() and x.is_full(),
self._all_path_extension(g,max_length))
return map(copy, ifull)
def full_nloop_iterator(self, start=None, length=1):
r"""
Returns an iterator over all full loops of given length.
INPUT:
- ``start`` - the initial permutation
- ``length`` - the length to consider
OUTPUT:
iterator -- an iterator over the full loops of given length
EXAMPLE::
sage: p = iet.Permutation('a b','b a')
sage: d = p.rauzy_diagram()
sage: for g in d.full_nloop_iterator(p,2):
....: print(g.matrix())
....: print("*****")
[1 1]
[1 2]
*****
[2 1]
[1 1]
*****
"""
from builtins import map
from six.moves import filter
g = self.path(start)
ifull = filter(
lambda x: x.is_loop() and x.is_full(),
self._all_npath_extension(g,length))
return map(copy, ifull)
def _permutation_to_vertex(self, p):
r"""
Translation of a labelled permutation to a vertex
INPUT:
- ``p`` - a labelled Permutation
TESTS::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: p in r #indirect doctest
True
"""
return (tuple(p._intervals[0]),tuple(p._intervals[1]))
def _set_element(self,data):
r"""
Sets self._element with data
TESTS::
sage: p = iet.Permutation('a b c','c b a')
sage: r = p.rauzy_diagram()
sage: r[p][0] == p.rauzy_move(0) #indirect doctest
True
sage: r[p][1] == p.rauzy_move(1) #indirect doctest
True
"""
self._element._intervals = [list(data[0]), list(data[1])]
class FlippedLabelledRauzyDiagram(FlippedRauzyDiagram, LabelledRauzyDiagram):
r"""
Rauzy diagram of flipped labelled permutations
"""
def _permutation_to_vertex(self, p):
r"""
Returns what must be stored from p.
INPUT:
- ``p`` - a Flipped labelled permutation
TESTS::
sage: p = iet.Permutation('a b c','c b a',flips='a')
sage: r = p.rauzy_diagram()
sage: p in r #indirect doctest
True
"""
return ((tuple(p._intervals[0]),tuple(p._intervals[1])),
(tuple(p._flips[0]), tuple(p._flips[1])))
def _set_element(self, data):
r"""
Returns what the vertex i as a permutation.
TESTS::
sage: p = iet.Permutation('a b','b a',flips='a')
sage: r = p.rauzy_diagram()
sage: p in r #indirect doctest
True
"""
self._element._intervals = [list(data[0][0]), list(data[0][1])]
self._element._flips = [list(data[1][0]), list(data[1][1])]
| StarcoderdataPython |
1970052 | <filename>export_pnml.py
import uuid
import xml.dom.minidom
class PNML():
def __init__(self, net_id, net_name):
self.net_id = net_id
self.net_name = net_name
self.place = []
self.transition_list = []
self.transition = []
self.transition_dict = {} # the transition could occur just one time in pn
self.arc = []
self.get_pnml_base()
def get_pnml_base(self):
self.doc = xml.dom.minidom.Document()
self.pnml = self.doc.createElement('pnml')
self.net = self.doc.createElement('net')
self.net.setAttribute('id', self.net_id)
self.net.setAttribute('type', "http://www.yasper.org/specs/epnml-1.1")
name = self.doc.createElement('name')
text = self.doc.createElement('text')
text.appendChild(self.doc.createTextNode(self.net_name))
name.appendChild(text)
self.net.appendChild(name)
self.pnml.appendChild(self.net)
self.doc.appendChild(self.pnml)
def get_pnml_refresh(self):
self.pnml.appendChild(self.net)
self.doc.appendChild(self.pnml)
def add_place(self, input_transition, output_transition):
place_id = str(uuid.uuid4())
inter_place = self.doc.createElement('place')
inter_place.setAttribute('id', place_id)
name = self.doc.createElement('name')
text = self.doc.createElement('text')
inter = ''
for i in input_transition:
inter = inter + ',' + str(i)
inter = inter[1:]
input_transition_str = '[%s]' % inter
inter = ''
for i in output_transition:
inter = inter + ',' + str(i)
inter = inter[1:]
output_transition_str = '[%s]' % inter
text_node = '(%s,%s)' % (input_transition_str, output_transition_str)
text.appendChild(self.doc.createTextNode(text_node))
name.appendChild(text)
inter_place.appendChild(name)
self.net.appendChild(inter_place)
inter = {'inp': input_transition, 'outp': output_transition, 'id': place_id, 'inp_len': len(input_transition),
'outp_len': len(output_transition), 'name': text_node}
self.place.append(inter)
inter = list(set(self.transition_list + input_transition + output_transition))
self.transition_list = inter
def get_transition(self):
self.transition = []
self.transition_dict = {}
for i in self.transition_list:
transition_id = str(uuid.uuid4())
inter = {}
inter['id'] = transition_id
inter['name'] = i
inter_transition = self.doc.createElement('transition')
inter_transition.setAttribute('id', transition_id)
name = self.doc.createElement('name')
text = self.doc.createElement('text')
text.appendChild(self.doc.createTextNode(i))
name.appendChild(text)
inter_transition.appendChild(name)
self.net.appendChild(inter_transition)
self.transition.append(inter)
self.transition_dict[i] = transition_id
def get_arc_xml(self):
for i in self.arc:
arc = self.doc.createElement('arc')
arc.setAttribute('id', i['id'])
arc.setAttribute('source', i['source'])
arc.setAttribute('target', i['target'])
name = self.doc.createElement('name')
text = self.doc.createElement('text')
text.appendChild(self.doc.createTextNode('1'))
name.appendChild(text)
arc.appendChild(name)
arctype = self.doc.createElement('arctype')
text = self.doc.createElement('text')
text.appendChild(self.doc.createTextNode('normal'))
arctype.appendChild(text)
arc.appendChild(arctype)
self.net.appendChild(arc)
def get_arc(self):
self.arc = []
for i in self.place:
# for input transition -> place
for j in i['inp']:
inter = {}
inter['source'] = self.transition_dict[j]
inter['target'] = i['id']
inter['id'] = str(uuid.uuid4())
self.arc.append(inter)
# for output place -> transition
for j in i['outp']:
inter = {}
inter['source'] = i['id']
inter['target'] = self.transition_dict[j]
inter['id'] = str(uuid.uuid4())
self.arc.append(inter)
self.get_arc_xml()
def get_np_info(self):
self.get_transition()
self.get_arc()
return self.place, self.transition, self.arc
def get_xml_string(self):
self.get_transition()
self.get_arc()
self.get_pnml_refresh()
xml_string = self.doc.toprettyxml(indent='\t', encoding='ISO-8859-1')
return xml_string.decode()
def gain_pnml(name,places):
pnml = PNML(str(uuid.uuid4()),name)
for i in places:
pnml.add_place(i['input'], i['output'])
pnml.get_pnml_refresh()
pnml_string = pnml.get_xml_string()
return pnml_string
# usage example
places = [{'input': ['a'], 'output': ['b']}, {'input': ['a','b'], 'output': ['c']}, {'input': ['c'], 'output': ['d']},
{'input': ['b'], 'output': ['d']}]
pnml_string = gain_pnml('example',places)
pnml_file = open('example.pnml', 'w')
pnml_file.write(pnml_string)
| StarcoderdataPython |
6427969 | <filename>HuaweiDialGrpc/producer_thread.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
功能:生产数据类,该类主要用于从路由器采集数据,包括dialin(Subscribe)和dialout(DataPublish)
版权信息:华为技术有限公司,版本所有(C)
修改记录:w30000618 创建
"""
import threading
import time
from concurrent import futures
import grpc
from data_item import DataItem
from global_args import GlobalArgs
from proto_py import huawei_grpc_dialin_pb2_grpc, huawei_grpc_dialin_pb2, huawei_grpc_dialout_pb2_grpc
from record_item import RecordItem
class DataPublish(threading.Thread):
""" DataPublish dialout rpc method."""
def __init__(self, t_name, data_queue, server_addr):
threading.Thread.__init__(self, name=t_name)
self.t_name = t_name
self.data_queue = data_queue
self.server_addr = server_addr
def run(self):
try:
server = grpc.server(futures.ThreadPoolExecutor(max_workers=150))
huawei_grpc_dialout_pb2_grpc.add_gRPCDataserviceServicer_to_server(
MdtGrpcDialOutServicer(self.data_queue), server)
server.add_insecure_port(self.server_addr)
server.start()
try:
while True:
time.sleep(GlobalArgs._ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
except Exception as e:
print("dialout error, exception {0}".format(e))
class MdtGrpcDialOutServicer(huawei_grpc_dialout_pb2_grpc.gRPCDataserviceServicer):
def __init__(self,data_queue):
self.data_queue = data_queue
return
def dataPublish(self, request_iterator, context):
for _MdtDialoutArgs in request_iterator:
if (len(_MdtDialoutArgs.data) == 0):
print("server MdtDialout JSON")
jsonMark = 1
jsonMarkByte = jsonMark.to_bytes(2, byteorder='big')
json2Bytes = bytes(_MdtDialoutArgs.data_json, encoding="utf8")
data_len = (len(json2Bytes)).to_bytes(4, byteorder='big')
data_item = DataItem(data_len, json2Bytes, jsonMarkByte)
self.data_queue.put(data_item)
else:
print("server Huawei Dialout GPB")
gpbMark = 0
gpbMarkByte = gpbMark.to_bytes(2, byteorder='big')
data_len = (len(_MdtDialoutArgs.data)).to_bytes(4, byteorder='big')
data_item = DataItem(data_len, _MdtDialoutArgs.data, gpbMarkByte)
self.data_queue.put(data_item)
class Subscribe(threading.Thread):
"""Subscribe dialin rpc method."""
def __init__(self, t_name, log_set, data_queue, dialin_server, sub_args_dict):
threading.Thread.__init__(self, name=t_name)
self.log_set = log_set
self.data_queue = data_queue
self.dialin_server = dialin_server
self.sub_args_dict = sub_args_dict
def run(self):
try:
metadata, subreq = Subscribe.generate_subArgs_and_metadata(self.sub_args_dict)
server = self.dialin_server
channel = grpc.insecure_channel(server)
stub = huawei_grpc_dialin_pb2_grpc.gRPCConfigOperStub(channel)
sub_resps = stub.Subscribe(subreq, metadata=metadata)
for sub_resp in sub_resps:
data_is_valid = Subscribe.check_sub_reply_is_data(sub_resp)
if (data_is_valid == True):
data_len = (len(sub_resp.message)).to_bytes(4, byteorder='big')
dataMark = 0
dataMarkByte = dataMark.to_bytes(2, byteorder='big')
data_item = DataItem(data_len, sub_resp.message, dataMarkByte)
self.data_queue.put(data_item)
record = RecordItem(sub_resp.subscription_id, sub_resp.request_id, server, None, "record_id")
self.log_set.add(record)
else:
record = RecordItem(None, None, server, sub_resp.message, "error")
self.log_set.add(record)
except Exception as e:
print("dialin error, exception {0}".format(e))
@staticmethod
def check_sub_reply_is_data(sub_resp):
print(sub_resp)
resp_code = sub_resp.response_code
if (resp_code == ""):
return True;
if (resp_code != "200" and resp_code != ""):
return False;
if (sub_resp.message == "ok"):
return False;
@staticmethod
def generate_subArgs_and_metadata(sub_args_dict):
metadata, paths, request_id, sample_interval = sub_args_dict['metadata'], sub_args_dict['paths'], sub_args_dict[
'request_id'], sub_args_dict['sample_interval']
sub_req = huawei_grpc_dialin_pb2.SubsArgs()
for path in paths:
sub_path = huawei_grpc_dialin_pb2.Path(path=path['path'], depth=path['depth'])
sub_req.path.append(sub_path)
sub_req.encoding = 0
sub_req.request_id = request_id
sub_req.sample_interval = sample_interval
return metadata, sub_req
| StarcoderdataPython |
5186222 | from lmnop import __version__
def test_version():
assert __version__ == "0.0.0" # noqa: S101
| StarcoderdataPython |
246160 | <reponame>monday-lesley/ponyos<gh_stars>0
#!/usr/bin/python3
"""
Calculator for ToaruOS
"""
import subprocess
import sys
import cairo
import yutani
import text_region
import toaru_fonts
from button import Button
from menu_bar import MenuBarWidget, MenuEntryAction, MenuEntrySubmenu, MenuEntryDivider, MenuWindow
from about_applet import AboutAppletWindow
import yutani_mainloop
import ast
import operator as op
operators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,
ast.Div: op.truediv, ast.Pow: op.pow, ast.BitXor: op.xor,
ast.USub: op.neg}
app_name = "Calculator"
version = "1.0.0"
_description = f"<b>{app_name} {version}</b>\n© 2017 <NAME>\n\nSimple four-function calculator using Python.\n\n<color 0x0000FF>http://github.com/klange/toaruos</color>"
def eval_expr(expr):
"""
>>> eval_expr('2^6')
4
>>> eval_expr('2**6')
64
>>> eval_expr('1 + 2*3**(4^5) / (6 + -7)')
-5.0
"""
return eval_(ast.parse(expr, mode='eval').body)
def eval_(node):
if isinstance(node, ast.Num): # <number>
return node.n
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return operators[type(node.op)](eval_(node.left), eval_(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return operators[type(node.op)](eval_(node.operand))
else:
raise TypeError("invalid operation")
class CalculatorWindow(yutani.Window):
base_width = 200
base_height = 240
def __init__(self, decorator):
super(CalculatorWindow, self).__init__(self.base_width + decorator.width(), self.base_height + decorator.height(), title=app_name, icon="calculator", doublebuffer=True)
self.move(100,100)
self.decorator = decorator
def add_string(button):
self.add_string(button.text)
def clear(button):
self.clear_text()
def calculate(button):
self.calculate()
self.buttons = [
[Button("C",clear), None, Button("(",add_string), Button(")",add_string)],
[Button("7",add_string), Button("8",add_string), Button("9",add_string), Button("/",add_string)],
[Button("4",add_string), Button("5",add_string), Button("6",add_string), Button("*",add_string)],
[Button("1",add_string), Button("2",add_string), Button("3",add_string), Button("-",add_string)],
[Button("0",add_string), Button(".",add_string), Button("=",calculate), Button("+",add_string)],
]
def exit_app(action):
menus = [x for x in self.menus.values()]
for x in menus:
x.definitely_close()
self.close()
sys.exit(0)
def about_window(action):
AboutAppletWindow(self.decorator,f"About {app_name}","/usr/share/icons/48/calculator.png",_description,"calculator")
def help_browser(action):
subprocess.Popen(["help-browser.py","calculator.trt"])
menus = [
("File", [
MenuEntryAction("Exit","exit",exit_app,None),
]),
("Help", [
MenuEntryAction("Contents","help",help_browser,None),
MenuEntryDivider(),
MenuEntryAction(f"About {app_name}","star",about_window,None),
]),
]
self.menubar = MenuBarWidget(self,menus)
self.tr = text_region.TextRegion(self.decorator.left_width()+5,self.decorator.top_height()+self.menubar.height,self.base_width-10,40)
self.tr.set_font(toaru_fonts.Font(toaru_fonts.FONT_MONOSPACE,18))
self.tr.set_text("")
self.tr.set_alignment(1)
self.tr.set_valignment(2)
self.tr.set_one_line()
self.tr.set_ellipsis()
self.error = False
self.hover_widget = None
self.down_button = None
self.menus = {}
self.hovered_menu = None
def calculate(self):
if self.error or len(self.tr.text) == 0:
self.tr.set_text("0")
self.error = False
try:
self.tr.set_text(str(eval_expr(self.tr.text)))
except Exception as e:
error = str(e)
if "(" in error:
error = error[:error.find("(")-1]
self.tr.set_richtext(f"<i><color 0xFF0000>{e.__class__.__name__}</color>: {error}</i>")
self.error = True
self.draw()
self.flip()
def add_string(self, text):
if self.error:
self.tr.text = ""
self.error = False
self.tr.set_text(self.tr.text + text)
self.draw()
self.flip()
def clear_text(self):
self.error = False
self.tr.set_text("")
self.draw()
self.flip()
def clear_last(self):
if self.error:
self.error = False
self.tr.set_text("")
if len(self.tr.text):
self.tr.set_text(self.tr.text[:-1])
self.draw()
self.flip()
def draw(self):
surface = self.get_cairo_surface()
WIDTH, HEIGHT = self.width - self.decorator.width(), self.height - self.decorator.height()
ctx = cairo.Context(surface)
ctx.translate(self.decorator.left_width(), self.decorator.top_height())
ctx.rectangle(0,0,WIDTH,HEIGHT)
ctx.set_source_rgb(204/255,204/255,204/255)
ctx.fill()
ctx.rectangle(0,5+self.menubar.height,WIDTH,self.tr.height-10)
ctx.set_source_rgb(1,1,1)
ctx.fill()
self.tr.resize(WIDTH-10, self.tr.height)
self.tr.draw(self)
offset_x = 0
offset_y = self.tr.height + self.menubar.height
button_height = int((HEIGHT - self.tr.height - self.menubar.height) / len(self.buttons))
for row in self.buttons:
button_width = int(WIDTH / len(row))
for button in row:
if button:
button.draw(self,ctx,offset_x,offset_y,button_width,button_height)
offset_x += button_width
offset_x = 0
offset_y += button_height
self.menubar.draw(ctx,0,0,WIDTH)
self.decorator.render(self)
self.flip()
def finish_resize(self, msg):
"""Accept a resize."""
if msg.width < 200 or msg.height < 200:
self.resize_offer(max(msg.width,200),max(msg.height,200))
return
self.resize_accept(msg.width, msg.height)
self.reinit()
self.draw()
self.resize_done()
self.flip()
def mouse_event(self, msg):
if d.handle_event(msg) == yutani.Decor.EVENT_CLOSE:
window.close()
sys.exit(0)
x,y = msg.new_x - self.decorator.left_width(), msg.new_y - self.decorator.top_height()
w,h = self.width - self.decorator.width(), self.height - self.decorator.height()
if x >= 0 and x < w and y >= 0 and y < self.menubar.height:
self.menubar.mouse_event(msg, x, y)
return
redraw = False
if self.down_button:
if msg.command == yutani.MouseEvent.RAISE or msg.command == yutani.MouseEvent.CLICK:
if not (msg.buttons & yutani.MouseButton.BUTTON_LEFT):
if x >= self.down_button.x and \
x < self.down_button.x + self.down_button.width and \
y >= self.down_button.y and \
y < self.down_button.y + self.down_button.height:
self.down_button.focus_enter()
self.down_button.callback(self.down_button)
self.down_button = None
redraw = True
else:
self.down_button.focus_leave()
self.down_button = None
redraw = True
else:
if y > self.tr.height + self.menubar.height and y < h and x >= 0 and x < w:
row = int((y - self.tr.height - self.menubar.height) / (self.height - self.decorator.height() - self.tr.height - self.menubar.height) * len(self.buttons))
col = int(x / (self.width - self.decorator.width()) * len(self.buttons[row]))
button = self.buttons[row][col]
if button != self.hover_widget:
if button:
button.focus_enter()
redraw = True
if self.hover_widget:
self.hover_widget.focus_leave()
redraw = True
self.hover_widget = button
if msg.command == yutani.MouseEvent.DOWN:
if button:
button.hilight = 2
self.down_button = button
redraw = True
else:
if self.hover_widget:
self.hover_widget.focus_leave()
redraw = True
self.hover_widget = None
if redraw:
self.draw()
def keyboard_event(self, msg):
if msg.event.action != 0x01:
return # Ignore anything that isn't a key down.
if msg.event.key in b"0123456789.+-/*()":
self.add_string(msg.event.key.decode('utf-8'))
if msg.event.key == b"\n":
self.calculate()
if msg.event.key == b"c":
self.clear_text()
if msg.event.keycode == 8:
self.clear_last()
if msg.event.key == b"q":
self.close()
sys.exit(0)
if __name__ == '__main__':
yutani.Yutani()
d = yutani.Decor()
window = CalculatorWindow(d)
window.draw()
yutani_mainloop.mainloop()
| StarcoderdataPython |
9791182 | from django.contrib import admin
from django.urls import path, include
from accounts.views import Top
from . import views
app_name = 'ByeByeTODO'
urlpatterns = [
path('', views.tologin, name='tologin'),
# path('', views.index, name='index'),
#path('accounts/login/', views.LoginView.as_view(), name='login'),
path('login/', views.login, name='login'),
path('home/', views.home, name='home'),
path('about/', views.about, name='about'),
]
| StarcoderdataPython |
6413072 | <gh_stars>0
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.text import slugify
from datetime import datetime
from realtor.models import Realtor
from django.urls import reverse
from autoslug import AutoSlugField
# Create your models here.
PROPERTY_CATEGORY = (
("Lands", "Lands"),
("Apartments", "Apartments"),
("Commercials", "Commercials"),
("Vehicles", "Vehicles"),
)
PROPERTY_SUB_CATEGORY = (
("Plot", "Plot"),
("Farm", "Farm"),
("Apartment", "Apartment"),
("House", "House"),
("Room", "Room"),
("Hostel", "Hostel"),
("Office Place", "Office Place"),
("Frame", "Frame"),
("Car", "Car"),
("Motorcycle", "Motorcycle"),
("Bajaji", "Bajaji"),
)
PROPERTY_TYPE = (
("For Rent", "For Rent"),
("For Sale", "For Sale"),
)
class Property(models.Model):
# contain all the proprties informations
realtor = models.ForeignKey(Realtor, on_delete=models.DO_NOTHING)
title = models.CharField(max_length=200)
address = models.CharField(max_length=200)
city = models.CharField(max_length=200)
district = models.CharField(max_length=200)
zipcode = models.CharField(max_length=200)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
description = models.TextField(max_length=500)
property_type = models.CharField(max_length=501, choices=PROPERTY_TYPE)
price = models.DecimalField(max_digits=10, decimal_places=2)
bedrooms = models.IntegerField(blank=True)
rooms = models.IntegerField(blank=True)
bathrooms = models.IntegerField(blank=True)
garage = models.IntegerField(default=0)
sqft = models.IntegerField()
lot_size = models.DecimalField(max_digits=5, decimal_places=2)
category = models.ForeignKey(
'Category', on_delete=models.SET_NULL, null=True)
photo_main = models.ImageField(
upload_to='main_photo/%Y/%m/%d/', blank=True, null=True)
#created = models.DateTimeField(default=timezone.now)
is_published = models.BooleanField(default=True)
featured = models.BooleanField(default=False)
created = models.DateTimeField(default=datetime.now)
slug = models.SlugField(blank=True, null=True)
def save(self, *args, **kwargs):
if not self.slug and self.title:
self.slug = slugify(self.title)
super(Property, self).save(*args, **kwargs)
class Meta:
verbose_name = 'Property'
verbose_name_plural = 'Properties'
def __str__(self):
return self.title
class PropertyImages(models.Model):
property = models.ForeignKey(Property, on_delete=models.CASCADE)
image = models.ImageField(
upload_to='property/%Y/%m/%d/', blank=True, null=True)
def __str__(self):
return self.image.name
class Meta:
verbose_name = 'Property Image'
verbose_name_plural = 'Property Images'
class Category(models.Model):
category_name = models.CharField(max_length=50, choices=PROPERTY_CATEGORY)
slug = AutoSlugField(populate_from='category_name', null=True)
#image = models.ImageField(upload_to='category/', blank=True, null=True)
class Meta:
verbose_name = 'category'
verbose_name_plural = 'categories'
def __str__(self):
return self.category_name
class SubCategory(models.Model):
title = models.CharField(max_length=50, choices=PROPERTY_SUB_CATEGORY)
slug = AutoSlugField(populate_from='title', null=True)
created = models.DateTimeField(default=datetime.now)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
class Meta:
verbose_name = 'sub category'
verbose_name_plural = 'sub categories'
def __str__(self):
return self.title
| StarcoderdataPython |
208108 | import pytest
import tempfile
import zipfile
import zipfile_deflate64
from pathlib import Path
from skultrafast.quickcontrol import QC1DSpec, QC2DSpec, parse_str, QCFile
from skultrafast.data_io import get_example_path, get_twodim_dataset
def test_parse():
assert (parse_str('-8000.000000') == -8000.0)
assert (parse_str('75') == 75)
assert (parse_str('TRUE') == True)
assert (parse_str('FALSE') == False)
flist = '-8000.000000,-7950.000000,-7900.000000,-7850.000000'
res = parse_str(flist)
assert (isinstance(res, list))
assert (res[0] == -8000)
assert (len(res) == 4)
@pytest.fixture(scope='session')
def datadir(tmp_path_factory):
p = get_example_path('quickcontrol')
tmp = tmp_path_factory.mktemp("data")
zipfile.ZipFile(p).extractall(tmp)
return tmp
@pytest.fixture(scope='session')
def datadir2d(tmp_path_factory):
p = get_twodim_dataset()
return p
def test_info(datadir):
qc = QCFile(fname=datadir / '20201029#07')
def test_1d(datadir):
qc = QC1DSpec(fname=datadir / '20201029#07')
assert (qc.par_data.shape == qc.per_data.shape)
assert (qc.par_data.shape[1] == len(qc.t))
assert (qc.par_data.shape[2] == 128)
ds = qc.make_pol_ds()
ds.plot.spec(1)
def test_2d(datadir2d):
infos = list(Path(datadir2d).glob('*320.info'))
ds = QC2DSpec(infos[0])
ds.make_ds()
| StarcoderdataPython |
1882887 | <reponame>grvkmrpandit/competitiveprogramming
def combine(left,right):
z=min(left[1],right[2])
a=left[0]+right[0]+z
b=left[1]+right[1]-z
c=left[2]+right[2]-z
return (a,b,c)
def build(idx,l,r):
if l==r:
if string[l]=='(':
tree[1]+=1
else:
tree[2]+=1
else:
mid=(l+r)//2
build(2*idx+1,l,mid)
build(2*idx+2,mid+1,r)
tree[idx]=combine(tree[2*idx+1],tree[2*idx+2])
def query(idx,x,y,l,r):
if l>=y or x>=r:
return (0,0,0)
if x<=l and r<=y:
return tree[idx]
mid=(l+r)//2
left=query(2*idx+1,x,y,l,mid)
right=query(2*idx+2,x,y,mid+1,r)
return combine(left,right)
if __name__ == '__main__':
string=input()
n=len(string)
m=int(input())
tree=[(0,0,0) for i in range(5*n)]
build(0,0,n-1)
for i in range(m):
a,b=map(int,input().split())
a=a-1
b=b-1
ans=query(0,a,b,0,n-1)
print(ans)
| StarcoderdataPython |
6535542 | # coding=utf-8
# Copyright (C) 2015 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import sys
from pathlib import Path
from invoke import run, task
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(thread)-5d %(filename)s:%(lineno)s | %(funcName)s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
logging.disable(logging.NOTSET)
logging.debug('Loading %s', __name__)
@task
def bump(ctx, patch=True):
if patch:
ctx.run("bumpversion patch --no-tag")
else:
ctx.run("bumpversion minor")
@task
def register_pypi(ctx):
ctx.run("git checkout master")
ctx.run("python setup.py register -r pypi")
@task
def register_pypi_test(ctx):
ctx.run("git checkout master")
ctx.run("python setup.py register -r pypitest")
@task
def upload_pypi(ctx):
ctx.run("git checkout master")
ctx.run("python setup.py sdist upload -r pypi")
@task
def sync(ctx):
"""
Sync master and develop branches in both directions
"""
ctx.run("git checkout develop")
ctx.run("git pull origin develop --verbose")
ctx.run("git checkout master")
ctx.run("git pull origin master --verbose")
ctx.run("git checkout develop")
ctx.run("git merge master --verbose")
ctx.run("git checkout master")
ctx.run("git merge develop --verbose")
@task(sync, bump, upload_pypi)
def release(ctx):
ctx.run("git checkout develop")
ctx.run("git merge master --verbose")
ctx.run("git push origin develop --verbose")
ctx.run("git push origin master --verbose")
| StarcoderdataPython |
3259293 | """ Implementation of Odd-Even Sort algorithm
"""
def oddeven(data):
""" OddEvenSort is a variation of bubble sort where the sorting is divided
into two phases, Odd and Even Phase and it runs until all the elements
are sorted. In the odd phase we perform bubble sort on odd indexed
elements and in the even phase we perform bubble sort on even indexed
elements.
:param array: list of elements that needs to be sorted
:type array: list
"""
is_sorted = False
data_len = len(data)
while not is_sorted:
is_sorted = True
for i in range(1, data_len-1, 2):
if data[i] > data[i+1]:
data[i], data[i+1] = data[i+1], data[i]
is_sorted = False
for i in range(0, data_len-1, 2):
if data[i] > data[i+1]:
data[i], data[i+1] = data[i+1], data[i]
is_sorted = False
def main():
""" operational function """
arr = [34, 56, 23, 67, 3, 68]
print(f"unsorted array: {arr}")
oddeven(arr)
print(f" sorted array: {arr}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
6549265 | <filename>tanksworld/minimap_util.py
# ©2020 Johns Hopkins University Applied Physics Laboratory LLC.
import cv2
import numpy as np
import math, random, time
IMG_SZ = 128 #how big is the output image
UNITY_SZ = 100.0 #what is the side length of the space in unity coordintaes
SCALE = 120.0 #what is the side length of the space when drawn on our image
def point_offset_point(p_origin, angle, radius):
px = p_origin[0] + math.cos(angle)*radius
py = p_origin[1] + math.sin(angle)*radius
return px, py
def point_relative_point_heading(point, new_origin, heading):
#get point (x,y) as (radius, angle)
dx = point[0] - new_origin[0]
dy = point[1] - new_origin[1]
angle = math.atan2(dy, dx)
rad = math.sqrt(dx*dx + dy*dy)
#rotate
angle -= heading
#recover x and y relative to point
nx = rad * math.cos(angle)
ny = rad * math.sin(angle)
return [nx, ny]
def points_relative_point_heading(points, new_origin, heading):
return [point_relative_point_heading(p, new_origin, heading) for p in points]
def draw_arrow(image, x, y, heading, health):
#value of the tank is how much hp it has
arrow_size = 3.0
angle_dev = 2.0
value = int((health/100.0)*128.0 + 127.0)
#make verticies of an asteroid-like arrow shape
h = heading - 1.57
pnose = point_offset_point([x, y], h, 2.0*arrow_size)
pleft = point_offset_point([x, y], h-angle_dev, arrow_size)
porg = [x, y]
pright = point_offset_point([x, y], h+angle_dev, arrow_size)
verts = np.asarray([[pnose, pleft, porg, pright]], dtype=np.int32)
#draw arrow onto the image and return
cv2.fillPoly(image, verts, value)
return image
def draw_bullet(image, x, y):
cv2.circle(image, (int(x),int(y)), 2, 255.0, thickness=1)
return image
def draw_tanks_in_channel(tank_data, reference_tank):
img = np.zeros((IMG_SZ, IMG_SZ, 1), np.uint8)
#draw tanks
for td in tank_data:
if td[3] <= 0.0:
continue
rel_x, rel_y = point_relative_point_heading([td[0],td[1]], reference_tank[0:2], reference_tank[2])
x = (rel_x/UNITY_SZ) * SCALE + float(IMG_SZ)*0.5
y = (rel_y/UNITY_SZ) * SCALE + float(IMG_SZ)*0.5
heading = td[2]
health = td[3]
rel_heading = heading - reference_tank[2]
img = draw_arrow(img, x, y, rel_heading, health)
# draw bullet if present
if len(td) > 4:
bx = td[4]
by = td[5]
if bx < 900:
rel_x, rel_y = point_relative_point_heading([bx, by], reference_tank[0:2], reference_tank[2])
x = (rel_x/UNITY_SZ) * SCALE + float(IMG_SZ)*0.5
y = (rel_y/UNITY_SZ) * SCALE + float(IMG_SZ)*0.5
img = draw_bullet(img, x, y)
return img
def barriers_for_player(barriers, reference_tank):
img = np.zeros((IMG_SZ, IMG_SZ, 1), np.uint8)
# constants
wall_value = 255
border_allowance_global = -1.0
unity32 = UNITY_SZ*1.5
unityhf = UNITY_SZ*0.5
#draw walls
wleft = [[-unity32, -unity32], [-unityhf-border_allowance_global, -unity32], [-unityhf-border_allowance_global, unity32], [-unity32, unity32]] #in world
wleft_rel = np.asarray([points_relative_point_heading(wleft, reference_tank[0:2], reference_tank[2])])
wleft_rel = (wleft_rel/UNITY_SZ) * SCALE + float(IMG_SZ)*0.5
cv2.fillPoly(img, wleft_rel.astype(np.int32), wall_value)
wright = [[unityhf+border_allowance_global, -unity32], [unity32, -unity32], [unity32, unity32], [unityhf+border_allowance_global, unity32]] #in world
wright_rel = np.asarray([points_relative_point_heading(wright, reference_tank[0:2], reference_tank[2])])
wright_rel = (wright_rel/UNITY_SZ) * SCALE + float(IMG_SZ)*0.5
cv2.fillPoly(img, wright_rel.astype(np.int32), wall_value)
wtop = [[-unity32, -unity32], [unity32, -unity32], [unity32, -unityhf-border_allowance_global], [-unity32, -unityhf-border_allowance_global]] #in world
wtop_rel = np.asarray([points_relative_point_heading(wtop, reference_tank[0:2], reference_tank[2])])
wtop_rel = (wtop_rel/UNITY_SZ) * SCALE + float(IMG_SZ)*0.5
cv2.fillPoly(img, wtop_rel.astype(np.int32), wall_value)
wbot = [[-unity32, unityhf+border_allowance_global], [unity32, unityhf+border_allowance_global], [unity32, unity32], [-unity32, unity32]] #in world
wbot_rel = np.asarray([points_relative_point_heading(wbot, reference_tank[0:2], reference_tank[2])])
wbot_rel = (wbot_rel/UNITY_SZ) * SCALE + float(IMG_SZ)*0.5
cv2.fillPoly(img, wbot_rel.astype(np.int32), wall_value)
#draw internal barriers
res = cv2.resize(np.squeeze(barriers[:,:,0]), dsize=(int(SCALE),int(SCALE)), interpolation=cv2.INTER_CUBIC)
threshold_indices = res > 0.05
res[threshold_indices] = 1.0
res *= 255.0
#draw in larger image
scl = int(SCALE)
barrier_img = np.ones((scl*2, scl*2), np.uint8)
barrier_img[(scl//2):3*scl//2, (scl//2):3*scl//2] = res
#translate
dx = (reference_tank[0]/UNITY_SZ) * SCALE
dy = (reference_tank[1]/UNITY_SZ) * SCALE
M = np.float32([[1,0,-dx],[0,1,-dy]])
barrier_img = cv2.warpAffine(barrier_img,M,(scl*2,scl*2))
#rotate about center
ang = reference_tank[2]*(180.0/3.14)
M = cv2.getRotationMatrix2D((float(scl*2)*0.5,float(scl*2)*0.5),ang,1)
barrier_img = cv2.warpAffine(barrier_img,M,(scl*2,scl*2))
#extract central area without padding
padd = (IMG_SZ-int(SCALE))//2
barrier_img = barrier_img[(scl//2)-padd:(scl//2)+IMG_SZ-padd, (scl//2)-padd:(scl//2)+IMG_SZ-padd]
#add channel
barrier_img = np.expand_dims(barrier_img, axis=2)
#concat the walls and the barriers
ch = np.maximum(img, barrier_img)
return ch
# expects state data chopped on a tank by tank basis
# ie. for 5 red, 5 blue, 2 neutral, expects a length 12 array
def minimap_for_player(tank_data_original, tank_idx, barriers):
barriers = np.flipud(barriers)
tank_data = []
for td in tank_data_original:
tank_data.append([td[0], -td[1], td[2], td[3], td[4], -td[5]])
my_data = tank_data[tank_idx]
if my_data[3] <= 0.0:
#display_cvimage("tank"+str(tank_idx), np.zeros((IMG_SZ, IMG_SZ, 3)))
return np.zeros((IMG_SZ,IMG_SZ,4), dtype=np.float32)
if tank_idx < 5:
ally = tank_data[:5]
enemy = tank_data[5:10]
neutral = tank_data[10:]
flip = True
else:
enemy = tank_data[:5]
ally = tank_data[5:10]
neutral = tank_data[10:]
flip = False
this_channel = draw_tanks_in_channel([my_data], my_data)
ally_channel = draw_tanks_in_channel(ally, my_data)
enemy_channel = draw_tanks_in_channel(enemy, my_data)
neutral_channel = draw_tanks_in_channel(neutral, my_data)
#barriers_channel = np.zeros((84, 84, 1), np.uint8)
#barriers_channel[:80,:80,0] = barriers[:,:,0]
barriers_channel = barriers_for_player(barriers, my_data)
#flip images for red team so they are on the correct side of the map from their POV
# if flip:
# this_channel = np.fliplr(np.flipud(this_channel))
# ally_channel = np.fliplr(np.flipud(ally_channel))
# enemy_channel = np.fliplr(np.flipud(enemy_channel))
# neutral_channel = np.fliplr(np.flipud(neutral_channel))
# image = np.asarray([ally_channel, enemy_channel, barriers_channel]).astype(np.float32)
# image = np.squeeze(image)
#print(image.shape)
#display_cvimage("tank"+str(tank_idx), np.transpose(image,(1,2,0)))
ret = np.asarray([ally_channel, neutral_channel, enemy_channel, barriers_channel]).astype(np.float32) / 255.0
ret = np.squeeze(np.array(ret).transpose((3,1,2,0)))
return ret
def display_cvimage(window_name, img):
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.imshow(window_name, img)
cv2.waitKey(1)
#time.sleep(0.2)
def displayable_rgb_map(minimap):
ally = minimap[:,:,0]*255.0
neutral = minimap[:,:,1]*255.0
enemy = minimap[:,:,2]*255.0
barrier = minimap[:,:,3]*255.0
r_ch = np.maximum(ally, barrier)
g_ch = np.maximum(neutral, barrier)
b_ch = np.maximum(enemy, barrier)
ret = np.asarray([b_ch, g_ch, r_ch]).astype(np.uint8) #cv2 actually uses bgr
return ret.transpose((1,2,0))
if __name__ == "__main__":
# 12 random tanks
tank_data = []
for i in range(12):
x = 500
y = 500
h = 0
hp = 100.0
tank_data.append([x, y, h, hp])
#ally
tank_data[0] = [-15,15,0,100]
tank_data[1] = [15,20,0,100]
tank_data[5] = [-15,-15,3.14,100]
tank_data[6] = [15,-20,3.14,100]
no_barriers = np.zeros((40,40,1))
for i in range(1000):
tank_data[0][2] += 0.1
# tank_data[1][2] -= 0.1
# get image for tank 2
minimap0 = minimap_for_player(tank_data, 0, no_barriers)
minimap1 = minimap_for_player(tank_data, 1, no_barriers)
minimap6 = minimap_for_player(tank_data, 5, no_barriers)
minimap6 = minimap_for_player(tank_data, 6, no_barriers)
#display the channels
# display_cvimage("allies2", minimap0[1])
# display_cvimage("allies3", minimap1[1])
#pause for a few seconds so we can view the images
time.sleep(0.2)
| StarcoderdataPython |
11245925 | from .importer import *
from .settings import *
__all__ = [
'DraftstarsCSVImporter', 'DraftstarsAFLSettings', 'DraftstarsGolfSettings', 'DraftstarsNBASettings'
] | StarcoderdataPython |
120327 | import os
import time
import random
from multiprocessing import Process, Queue
def _monte_carlo_processing(x_nums, fun, cons, bounds, random_times, q: Queue):
"""
monte_carlo 的子进程函数,完成随机试验,向 Queue 传递结果
"""
random.seed(time.time() + os.getpid())
pb = 0
xb = []
for i in range(random_times):
x = [random.randint(bounds[i][0], bounds[i][1]) for i in range(x_nums)] # 产生一行x_nums列的区间[0, 99] 上的随机整数
rf = fun(x)
rg = cons(x)
if all((a < 0 for a in rg)): # 若 rg 中所有元素都小于 0,即符合约束条件
if pb < rf:
xb = x
pb = rf
q.put({"fun": pb, "x": xb})
def monte_carlo(x_nums, fun, cons, bounds, random_times=10 ** 5):
"""
monte_carlo 对整数规划问题使用「蒙特卡洛法」求满意解
对于线性、非线性的整数规划,在一定计算量下可以考虑用 蒙特卡洛法 得到一个满意解。
注意:蒙特卡洛法只能在一定次数的模拟中求一个满意解(通常不是最优的),而且对于每个变量必须给出有明确上下界的取值范围。
问题模型:
Minimize: fun(x)
Subject to: cons(x) <= 0
(x are integers)
Parameters
----------
:param x_nums: `int`, 未知数向量 x 的元素个数
:param fun: `(x: list) -> float`, 要最小化的目标函数
:param cons: `(x: list) -> list`, 小于等于 0 的约束条件
:param bounds: `list`, 各个 x 的取值范围
:param random_times: `int`, 随机模拟次数
Returns
-------
:return: {"x": array([...]), "fun": ...}
- x: 最优解
- fun: 最优目标函数值
Examples
--------
试求得如下整数规划问题的一个满意解:
Min x_0 + x_1
s.t. 2 * x_0 + x_1 <= 6
4 * x_0 + 5 * x_1 <= 20
(x_0、x_1 为整数)
编写目标函数:
>>> fun = lambda x: x[0] + x[1]
编写约束条件:
>>> cons = lambda x: [2 * x[0] + x[1] - 6, 4 * x[0] + 5 * x[1] - 20]
指定取值范围:
>>> bounds = [(0, 100), (0, 100)]
调用蒙特卡洛法求解:
>>> monte_carlo(2, fun, cons, bounds)
{'fun': 4, 'x': [1, 3]}
可以看的 monte_carlo 返回了一个满意解(事实上,这是个最优解,但一般情况下不是)。
"""
result_queue = Queue()
processes = []
cpus = os.cpu_count() + 2
if cpus < 1:
cpus = 1
sub_times = random_times // cpus
for i in range(cpus):
processes.append(Process(target=_monte_carlo_processing,
args=(x_nums, fun, cons, bounds, sub_times, result_queue)))
for p in processes:
p.start()
for p in processes:
p.join()
if not result_queue.empty():
data = result_queue.get()
best = dict(data)
while not result_queue.empty():
data = result_queue.get()
if data["fun"] < best["fun"]:
best = dict(data)
return best
return None
def _test1():
def fun(x):
return x[0] + x[1]
def cons(x):
return [
2 * x[0] + x[1] - 6,
4 * x[0] + 5 * x[1] - 20,
]
bounds = [(0, 100), (0, 100)]
r = monte_carlo(2, fun, cons, bounds)
print(r)
def _test2():
def fun(x):
return 40 * x[0] + 90 * x[1]
def cons(x):
return [
9 * x[0] + 7 * x[1] - 56,
7 * x[0] + 20 * x[1] - 70,
]
bounds = [(0, 100), (0, 100)]
r = monte_carlo(2, fun, cons, bounds)
print(r)
def _test3():
def f(x: list) -> int:
return x[0] ** 2 + x[1] ** 2 + 3 * x[2] ** 2 + \
4 * x[3] ** 2 + 2 * x[4] ** 2 - 8 * x[0] - 2 * x[1] - \
3 * x[2] - x[3] - 2 * x[4]
def g(x: list) -> list:
return [
sum(x) - 400,
x[0] + 2 * x[1] + 2 * x[2] + x[3] + 6 * x[4] - 800,
2 * x[0] + x[1] + 6 * x[2] - 200,
x[2] + x[3] + 5 * x[4] - 200
]
bounds = [(0, 99)] * 5
r = monte_carlo(5, f, g, bounds, random_times=10 ** 6)
print(r)
if __name__ == "__main__":
_test1()
_test2()
_test3()
| StarcoderdataPython |
1877641 | import helpfunctions as hlp
import numpy as np
import scipy.stats as stat
import scipy.special as spec
import nestedFAPF2 as nsmc
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-d", type=int, help="State dimension")
parser.add_option("--tauPhi", type=float, help="Measurement precision")
parser.add_option("-N", type=int, help="Particles")
(args, options) = parser.parse_args()
def logPhi(x,y): return -0.5*tauPhi*(x-y)**2
tauPhi = args.tauPhi
d=args.d
filename = 'simulatedData/d'+str(d)+'tauPhi'+str(tauPhi)+'y.txt'
y = np.loadtxt(filename)
T = y.shape[1]
a = 0.5
tauPsi = 1.
tauRho = 1.
N = args.N
NT = N/2
Xcur = np.zeros( (N, d) )
Xprev = np.zeros( (N, d) )
logW = np.zeros(N)
w = np.ones( N )
ancestors = np.zeros( N )
ESS = N*np.ones( T*d )
filename = './results/d'+str(d)+'_N'+str(N)+'tauPhi'+str(tauPhi)+'_nipsSMC.csv'
f = open(filename, 'w')
f.close()
for t in range(T):
if ESS[t*d-1] < NT:
ancestors = hlp.resampling(w,scheme='sys')
Xprev = Xprev[ancestors,:]
w = np.ones( N )
Xcur[:, 0] = a*Xprev[:, 0] + (1/np.sqrt(tauRho))*np.random.normal(size=N)
logW = logPhi(Xcur[:,0],y[0,t])
maxLogW = np.max(logW)
w *= np.exp(logW - maxLogW)
w /= np.sum(w)
ESS[t*d] = 1/np.sum(w**2)
for i in np.arange(1,d):
# Resampling
if ESS[t*d+i-1] < NT:
ancestors = hlp.resampling(w,scheme='sys')
Xprev = Xprev[ancestors,:]
Xcur[:,:i] = Xcur[ancestors,:i]
w = np.ones( N )
# Propagate
tau = tauRho + tauPsi
mu = (tauRho*a*Xprev[:, i] + tauPsi*Xcur[:,i-1])/tau
Xcur[:,i] = mu + (1/np.sqrt(tau))*np.random.normal(size=N)
# Weighting
logW = logPhi(Xcur[:,i],y[i,t])
maxLogW = np.max(logW)
w *= np.exp(logW - maxLogW)
w /= np.sum(w)
ESS[t*d+i] = 1/np.sum(w**2)
Xprev = Xcur
f = open(filename, 'a')
tmpVec = np.r_[t+1, np.sum(np.tile(w,(d,1)).T*Xcur,axis=0), np.sum(np.tile(w,(d,1)).T*Xcur**2,axis=0)]
np.savetxt(f, tmpVec.reshape((1,len(tmpVec))),delimiter=',')
f.close()
| StarcoderdataPython |
230177 | # https://leetcode.com/problems/minimum-operations-to-make-the-array-increasing/
# Return the minimum number of operations needed to make nums strictly increasing.
# An array nums is strictly increasing if nums[i] < nums[i+1] for all 0 <= i < nums.length - 1.
# An array of length 1 is trivially strictly increasing.
import pytest
class Solution:
def minOperations(self, nums: list[int]) -> int:
i = 0
count = 0
while i + 1 < len(nums):
if nums[i + 1] <= nums[i]:
diff = (nums[i] - nums[i + 1]) + 1
nums[i + 1] += diff
count += diff
i += 1
return count
@pytest.mark.parametrize(
("nums", "expected"), [([1, 1, 1], 3), ([1, 2, 3], 0), ([1, 5, 2, 3], 8)]
)
def test_basic(nums: list[int], expected: int):
assert expected == Solution().minOperations(nums)
| StarcoderdataPython |
273894 | <reponame>Tian99/Robust-eye-gaze-tracker
#!/usr/bin/env python
"""Video eye tracking
Usage:
vid_track <vid.mov> <behave.csv> [--box <x,y,w,h>] [--dur <len_secs>] [--start <start_secs>] [--method <method>] [--fps <fps>]
vid_track methods
vid_track (-h | --help)
vid_track --version
Options:
--box POS initial pos of box containing pupil. csv like x,y,w,h. no spaces. [default: 64,46,70,79]
--dur SECS Only run for SECS of the video [default: 9e9]
--method METH Eye tracking method [default: kcf]
--start SECS time to start [default: 0]
--fps FPS frames per second [default: 60]
-h --help Show this screen.
--version Show version.
Example:
./cli.py input/run1.mov input/10997_20180818_mri_1_view.csv --start 6 --dur 6
"""
from docopt import docopt
from tracker import auto_tracker
from extraction import extraction
if __name__ == '__main__':
args = docopt(__doc__, version='VidTrack 0.1')
# print(args); exit()
init_box = tuple([int(x) for x in args['--box'].split(',')])
print(init_box)
fps = int(args['--fps'])
start_frame = int(args['--start']) * fps
max_frames=int(args['--dur']) * fps + start_frame
tracker_name=args["--method"]
track = auto_tracker(args['<vid.mov>'], init_box,
write_img=False,
tracker_name=tracker_name, max_frames=max_frames,
start_frame=start_frame)
track.set_events(args['<behave.csv>'])
track.run_tracker()
track.annotated_plt()
| StarcoderdataPython |
11233004 | <gh_stars>1-10
import sys
import os
def redirect(stdout=None, stderr=None):
for dev, v in [(stdout, 'stdout'), (stderr, 'stderr')]:
s = dev or os.getenv(v.upper())
if s:
setattr(sys, v, open(s, 'w'))
| StarcoderdataPython |
4834184 | #!/usr/bin/env python3
# mc_gibbs_lj.py
#------------------------------------------------------------------------------------------------#
# This software was written in 2016/17 #
# by <NAME> <<EMAIL>>/<<EMAIL>> #
# and <NAME> <<EMAIL>> ("the authors"), #
# to accompany the book "Computer Simulation of Liquids", second edition, 2017 ("the text"), #
# published by Oxford University Press ("the publishers"). #
# #
# LICENCE #
# Creative Commons CC0 Public Domain Dedication. #
# To the extent possible under law, the authors have dedicated all copyright and related #
# and neighboring rights to this software to the PUBLIC domain worldwide. #
# This software is distributed without any warranty. #
# You should have received a copy of the CC0 Public Domain Dedication along with this software. #
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #
# #
# DISCLAIMER #
# The authors and publishers make no warranties about the software, and disclaim liability #
# for all uses of the software, to the fullest extent permitted by applicable law. #
# The authors and publishers do not recommend use of this software for any purpose. #
# It is made freely available, solely to clarify points made in the text. When using or citing #
# the software, you should not imply endorsement by the authors or publishers. #
#------------------------------------------------------------------------------------------------#
"""Monte Carlo, Gibbs ensemble."""
def calc_variables ( ):
"""Calculates all variables of interest.
They are collected and returned as a list, for use in the main program.
"""
# In this example we simulate using the cut (but not shifted) potential
# The values of < p_c >, < e_c > and < density > should be consistent (for this potential)
# For simplicity, long-range corrections are not applied here to give estimates of
# < e_f > and < p_f > for the full (uncut) potential, but this is straightforward to do.
# The value of the cut-and-shifted potential is not used, in this example
import numpy as np
import math
from averages_module import VariableType
from lrc_module import potential_lrc, pressure_lrc, pressure_delta
# Preliminary calculations (n1,n2,r1,r2,etc are taken from the calling program)
vol1 = box1**3 # Volume
vol2 = box2**3 # Volume
rho1 = n1 / vol1 # Density
rho2 = n2 / vol2 # Density
# Variables of interest, of class VariableType, containing three attributes:
# .val: the instantaneous value
# .nam: used for headings
# .method: indicating averaging method
# If not set below, .method adopts its default value of avg
# The .nam and some other attributes need only be defined once, at the start of the program,
# but for clarity and readability we assign all the values together below
# Move, swap, volume exchange acceptance ratios
m1_r = VariableType ( nam = 'Move ratio (1)', val = m1_ratio, instant = False )
m2_r = VariableType ( nam = 'Move ratio (2)', val = m2_ratio, instant = False )
x12_r = VariableType ( nam = 'Swap ratio (1->2)', val = x12_ratio, instant = False )
x21_r = VariableType ( nam = 'Swap ratio (2->1)', val = x21_ratio, instant = False )
v_r = VariableType ( nam = 'Volume ratio', val = v_ratio, instant = False )
# Number of particles
n_1 = VariableType ( nam = 'Number (1)', val = float(n1) )
n_2 = VariableType ( nam = 'Number (2)', val = float(n2) )
# Density
density_1 = VariableType ( nam = 'Density (1)', val = rho1 )
density_2 = VariableType ( nam = 'Density (2)', val = rho2 )
# Internal energy per atom for simulated, cut, potential
# Ideal gas contribution plus cut (but not shifted) PE divided by N
e1_c = VariableType ( nam = 'E/N cut (1)', val = 1.5*temperature + total1.pot/n1 )
e2_c = VariableType ( nam = 'E/N cut (2)', val = 1.5*temperature + total2.pot/n2 )
# Pressure for simulated, cut, potential
# Delta correction plus ideal gas contribution plus total virial divided by V
p1_c = VariableType ( nam = 'P cut (1)', val = pressure_delta(rho1,r_cut) + rho1*temperature + total1.vir/vol1 )
p2_c = VariableType ( nam = 'P cut (2)', val = pressure_delta(rho2,r_cut) + rho2*temperature + total2.vir/vol2 )
# Collect together into a list for averaging
return [ m1_r, m2_r, x12_r, x21_r, v_r, n_1, n_2, density_1, density_2, e1_c, e2_c, p1_c, p2_c ]
# Takes in a pair of configurations of atoms (positions)
# Cubic periodic boundary conditions
# Conducts Gibbs ensemble Monte Carlo at the given temperature, total volume and total N
# To avoid some inconvenient tests, we disallow configurations in which either box is empty
# Uses no special neighbour lists
# Reads several variables and options from standard input using JSON format
# Leave input empty "{}" to accept supplied defaults
# Positions r are divided by box length after reading in
# However, input configuration, output configuration, most calculations, and all results
# are given in simulation units defined by the model
# For example, for Lennard-Jones, sigma = 1, epsilon = 1
# Note that long-range corrections are not included in the acceptance/rejection
# of creation and destruction moves
# Despite the program name, there is nothing here specific to Lennard-Jones
# The model is defined in mc_lj_module
import json
import sys
import numpy as np
import math
from config_io_module import read_cnf_atoms, write_cnf_atoms
from averages_module import run_begin, run_end, blk_begin, blk_end, blk_add
from maths_module import random_translate_vector, metropolis
from mc_lj_module import introduction, conclusion, potential, potential_1, PotentialType
cnf1_prefix = 'cnf1.'
cnf2_prefix = 'cnf2.'
inp_tag = 'inp'
out_tag = 'out'
sav_tag = 'sav'
print('mc_gibbs_lj')
print('Monte Carlo, Gibbs ensemble')
print('Simulation uses cut (but not shifted) potential')
# Read parameters in JSON format
try:
nml = json.load(sys.stdin)
except json.JSONDecodeError:
print('Exiting on Invalid JSON format')
sys.exit()
# Set default values, check keys and typecheck values
defaults = {"nblock":10, "nstep":1000, "nswap":20, "temperature":1.0,
"r_cut":2.5, "dr_max":0.15, "dv_max":10.0}
for key, val in nml.items():
if key in defaults:
assert type(val) == type(defaults[key]), key+" has the wrong type"
else:
print('Warning', key, 'not in ',list(defaults.keys()))
# Set parameters to input values or defaults
nblock = nml["nblock"] if "nblock" in nml else defaults["nblock"]
nstep = nml["nstep"] if "nstep" in nml else defaults["nstep"]
nswap = nml["nswap"] if "nswap" in nml else defaults["nswap"]
temperature = nml["temperature"] if "temperature" in nml else defaults["temperature"]
r_cut = nml["r_cut"] if "r_cut" in nml else defaults["r_cut"]
dr_max = nml["dr_max"] if "dr_max" in nml else defaults["dr_max"]
dv_max = nml["dv_max"] if "dv_max" in nml else defaults["dv_max"]
introduction()
np.random.seed()
# Write out parameters
print( "{:40}{:15d} ".format('Number of blocks', nblock) )
print( "{:40}{:15d} ".format('Number of steps per block', nstep) )
print( "{:40}{:15d} ".format('Swap attempts per step', nswap) )
print( "{:40}{:15.6f}".format('Specified temperature', temperature) )
print( "{:40}{:15.6f}".format('Potential cutoff distance', r_cut) )
print( "{:40}{:15.6f}".format('Maximum displacement', dr_max) )
print( "{:40}{:15.6f}".format('Maximum volume change', dv_max) )
# Read in initial configurations
n1, box1, r1 = read_cnf_atoms ( cnf1_prefix+inp_tag )
n2, box2, r2 = read_cnf_atoms ( cnf2_prefix+inp_tag )
print( "{:40}{:15d}{:15d} ".format('Number of particles', n1, n2 ) )
print( "{:40}{:15.6f}{:15.6f}".format('Simulation box length', box1, box2 ) )
print( "{:40}{:15.6f}{:15.6f}".format('Density', n1/box1**3, n2/box2**3 ) )
r1 = r1 / box1 # Convert positions to box units
r2 = r2 / box2 # Convert positions to box units
r1 = r1 - np.rint ( r1 ) # Periodic boundaries
r2 = r2 - np.rint ( r2 ) # Periodic boundaries
# Initial energy and overlap check
total1 = potential ( box1, r_cut, r1 )
assert not total1.ovr, 'Overlap in initial configuration 1'
total2 = potential ( box2, r_cut, r2 )
assert not total2.ovr, 'Overlap in initial configuration 2'
# Initialize arrays for averaging and write column headings
m1_ratio = 0.0
m2_ratio = 0.0
x12_ratio = 0.0
x21_ratio = 0.0
v_ratio = 0.0
run_begin ( calc_variables() )
# Initialize histograms
nh = 300
rho_min, rho_max = 0.0, 0.9
eng_min, eng_max = -3.3, 1.2
rho_vals = np.zeros ( (nstep,2), dtype = np.float_ ) # Stores density values for both boxes
eng_vals = np.zeros ( (nstep,2), dtype = np.float_ ) # Stores energy values for both boxes
rho_hist = np.zeros ( nh, dtype = np.float_ ) # Density histogram
eng_hist = np.zeros ( nh, dtype = np.float_ ) # Energy histogram
for blk in range(1,nblock+1): # Loop over blocks
blk_begin()
for stp in range(nstep): # Loop over steps
m_acc = 0
for i in range(n1): # Loop over atoms in system 1
rj = np.delete(r1,i,0) # Array of all the other atoms
partial_old = potential_1 ( r1[i,:], box1, r_cut, rj ) # Old atom potential, virial etc
assert not partial_old.ovr, 'Overlap in current configuration'
ri = random_translate_vector ( dr_max/box1, r1[i,:] ) # Trial move to new position (in box=1 units)
ri = ri - np.rint ( ri ) # Periodic boundary correction
partial_new = potential_1 ( ri, box1, r_cut, rj ) # New atom potential, virial etc
if not partial_new.ovr: # Test for non-overlapping configuration
delta = partial_new.pot - partial_old.pot # Use cut (but not shifted) potential
delta = delta / temperature
if metropolis ( delta ): # Accept Metropolis test
total1 = total1 + partial_new - partial_old # Update total values
r1[i,:] = ri # Update position
m_acc = m_acc + 1 # Increment move counter
m1_ratio = m_acc / n1
m_acc = 0
for i in range(n2): # Loop over atoms in system 2
rj = np.delete(r2,i,0) # Array of all the other atoms
partial_old = potential_1 ( r2[i,:], box2, r_cut, rj ) # Old atom potential, virial etc
assert not partial_old.ovr, 'Overlap in current configuration'
ri = random_translate_vector ( dr_max/box2, r2[i,:] ) # Trial move to new position (in box=1 units)
ri = ri - np.rint ( ri ) # Periodic boundary correction
partial_new = potential_1 ( ri, box2, r_cut, rj ) # New atom potential, virial etc
if not partial_new.ovr: # Test for non-overlapping configuration
delta = partial_new.pot - partial_old.pot # Use cut (but not shifted) potential
delta = delta / temperature
if metropolis ( delta ): # Accept Metropolis test
total2 = total2 + partial_new - partial_old # Update total values
r2[i,:] = ri # Update position
m_acc = m_acc + 1 # Increment move counter
m2_ratio = m_acc / n2
x12_try = 0
x12_acc = 0
x21_try = 0
x21_acc = 0
for iswap in range(nswap):
ri = np.random.rand(3) # Three uniform random numbers in range (0,1)
ri = ri - 0.5 # Now in range (-0.5,+0.5) for box=1 units
if np.random.rand() < 0.5: # Try swapping 1->2
x12_try = x12_try + 1
if n1>1: # Disallow n1->0
i = np.random.randint(n1) # Choose atom at random in system 1
rj = np.delete(r1,i,0) # Array of all the other atoms
partial_old = potential_1 ( r1[i,:], box1, r_cut, rj ) # Old atom potential, virial, etc
assert not partial_old.ovr, 'Overlap found on particle removal'
partial_new = potential_1 ( ri, box2, r_cut, r2 ) # New atom potential, virial, etc
if not partial_new.ovr: # Test for non-overlapping configuration
delta = ( partial_new.pot - partial_old.pot ) / temperature # Use cut (not shifted) potential
delta = delta - np.log ( box2**3 / ( n2+1 ) ) # Creation in 2
delta = delta + np.log ( box1**3 / n1 ) # Destruction in 1
if metropolis ( delta ): # Accept Metropolis test
r2 = np.append ( r2, ri[np.newaxis,:], 0 ) # Add new particle to r2 array
n2 = r2.shape[0] # New value of N2
r1 = np.copy(rj) # Delete particle from r1 array
n1 = r1.shape[0] # New value of N1
total1 = total1 - partial_old # Update total values
total2 = total2 + partial_new # Update total values
x12_acc = x12_acc + 1 # Increment 1->2 move counter
else: # Try swapping 2->1
x21_try = x21_try + 1
if n2>1: # Disallow n2->0
i = np.random.randint(n2) # Choose atom at random in system 2
rj = np.delete(r2,i,0) # Array of all the other atoms
partial_old = potential_1 ( r2[i,:], box2, r_cut, rj ) # Old atom potential, virial, etc
assert not partial_old.ovr, 'Overlap found on particle removal'
partial_new = potential_1 ( ri, box1, r_cut, r1 ) # New atom potential, virial, etc
if not partial_new.ovr: # Test for non-overlapping configuration
delta = ( partial_new.pot - partial_old.pot ) / temperature # Use cut (not shifted) potential
delta = delta - np.log ( box1**3 / ( n1+1 ) ) # Creation in 1
delta = delta + np.log ( box2**3 / n2 ) # Destruction in 2
if metropolis ( delta ): # Accept Metropolis test
r1 = np.append ( r1, ri[np.newaxis,:], 0 ) # Add new particle to r1 array
n1 = r1.shape[0] # New value of N1
r2 = np.copy(rj) # Delete particle from r2 array
n2 = r2.shape[0] # New value of N
total1 = total1 + partial_new # Update total values
total2 = total2 - partial_old # Update total values
x21_acc = x21_acc + 1 # Increment 2->1 move counter
x12_ratio = x12_acc/x12_try if x12_try>0 else 0.0
x21_ratio = x21_acc/x21_try if x21_try>0 else 0.0
# Volume move
v_ratio = 0.0
dv = dv_max * ( 2.0*np.random.rand() - 1.0 ) # Uniform on (-dv_max,+dv_max)
vol1_old = box1**3 # Old volume
vol2_old = box2**3 # Old volume
vol1_new = vol1_old - dv # New volume
vol2_new = vol2_old + dv # New volume
box1_new = vol1_new**(1.0/3.0) # New box length
box2_new = vol2_new**(1.0/3.0) # New box length
assert min(box1_new,box2_new)>2.0*r_cut, 'Box length too small'
total1_new = potential ( box1_new, r_cut, r1 )
total2_new = potential ( box2_new, r_cut, r2 )
if not ( total1_new.ovr or total2_new.ovr ): # Test for non-overlapping configurations
delta = total1_new.pot + total2_new.pot - total1.pot - total2.pot
delta = delta / temperature
delta = delta - n1*np.log(vol1_new/vol1_old) # Volume scaling in system 1
delta = delta - n2*np.log(vol2_new/vol2_old) # Volume scaling in system 2
if metropolis ( delta ): # Accept Metropolis test
total1 = total1_new # Update total values
total2 = total2_new # Update total values
box1 = box1_new # Update box lengths
box2 = box2_new # Update box lengths
v_ratio = 1.0 # Set move counter
blk_add ( calc_variables() )
rho_vals[stp,:] = [n1/box1**3,n2/box2**3]
eng_vals[stp,:] = [1.5*temperature+total1.pot/n1,1.5*temperature+total2.pot/n2]
blk_end(blk) # Output block averages
sav_tag = str(blk).zfill(3) if blk<1000 else 'sav' # Number configuration by block
write_cnf_atoms ( cnf1_prefix+sav_tag, n1, box1, r1*box1 ) # Save configuration
write_cnf_atoms ( cnf2_prefix+sav_tag, n2, box2, r2*box2 ) # Save configuration
# Increment histograms
rho_h, rho_bins = np.histogram ( rho_vals, bins=nh, range=(rho_min,rho_max) )
eng_h, eng_bins = np.histogram ( eng_vals, bins=nh, range=(eng_min,eng_max) )
rho_hist = rho_hist + rho_h
eng_hist = eng_hist + eng_h
run_end ( calc_variables() )
# Write out histograms
norm = 2*nstep*nblock*(rho_bins[1]-rho_bins[0])
rho_hist = rho_hist / norm
norm = 2*nstep*nblock*(eng_bins[1]-eng_bins[0])
eng_hist = eng_hist / norm
with open("his.out","w") as f:
for k in range(nh):
rho = (rho_bins[k]+rho_bins[k+1])/2.0
eng = (eng_bins[k]+eng_bins[k+1])/2.0
print("{:15.6f}{:15.6f}{:15.6f}{:15.6f}".format(rho,rho_hist[k],eng,eng_hist[k]),file=f)
write_cnf_atoms ( cnf1_prefix+out_tag, n1, box1, r1*box1 ) # Save configuration
write_cnf_atoms ( cnf2_prefix+out_tag, n2, box2, r2*box2 ) # Save configuration
conclusion()
| StarcoderdataPython |
6705655 | def test_demo():
"""
Assign a value to `x` after an assert
Testsuite will want to ensure that we print `x = 1`, which was the value at
the time of the assert
"""
x = 1
assert x == 2
x = 2
| StarcoderdataPython |
106014 | from .LevelTile import LevelTile
from GLOBAL_DATA import Level_Tile_Data as LTD
from ..Minigames.Lockpick import Lockpick
class DoorTile(LevelTile):
_closed = True
_lock_level = 0
_lockpicking_minigame = None
def __init__(self, appearance, lock_level=0):
super().__init__(appearance)
if appearance == LTD._OPDOOR_CODE:
self._closed = False
elif appearance == LTD._CLDOOR_CODE:
self._closed = True
self._lock_level = lock_level
if lock_level == 1:
self._lockpicking_minigame = Lockpick(3, 3)
elif lock_level == 2:
self._lockpicking_minigame = Lockpick(4, 3)
def get_tile_char(self):
if self._closed:
return '+'
else:
return '\\'
def get_lockpicking_minigame(self):
return self._lockpicking_minigame
def get_color(self):
return LTD.door_lock_level_colors[self._lock_level]
def set_closed(self, closed=True):
self._closed = closed
if self._lockpicking_minigame is not None:
self._lockpicking_minigame.reset_state()
def get_closed(self):
return self._closed
def get_passable(self):
return not self._closed
def get_opaque(self):
return self._closed
| StarcoderdataPython |
9654805 | import pytest
from pytest import fixture
from command_line.main import SingleFileExperimentFactory, CLI
from methods import Method
from models import Sample, Gene
from .utilities import parsing_error
from .utilities import parsing_output
from .utilities import parse
def make_samples(samples_dict):
"""Create samples from dict representation"""
return [
Sample.from_names(name, values)
for name, values in samples_dict.items()
]
expected_cases = make_samples({
'Tumour_1': {'TP53': 7, 'BRCA2': 7},
'Tumour_2': {'TP53': 6, 'BRCA2': 9},
})
expected_controls = make_samples({
'Control_1': {'TP53': 6, 'BRCA2': 6},
'Control_2': {'TP53': 6, 'BRCA2': 7},
})
@fixture
def test_files(tmpdir):
# Here I assume that all the files are in TSV format (for now, we may want to change this in the future)
# create temporary files
files = {
# the ".ext" extensions are here just to visually
# mark that the strings represent some file names.
'c.tsv': (
'Gene Control_1 Control_2',
'TP53 6 6',
'BRCA2 6 7',
),
't.tsv': (
'Gene Tumour_1 Tumour_2',
'TP53 7 6',
'BRCA2 7 9',
),
't_2.tsv': (
'Gene Tumour_3 Tumour_4',
'TP53 7 6',
'BRCA2 6 8',
),
'control_without_headers.tsv': (
'TP53 5 6',
'BRCA2 4 8',
),
'merged.tsv': (
'Gene Control_1 Control_2 Tumour_1 Tumour_2',
'TP53 6 6 7 6',
'BRCA2 6 7 7 9',
)
}
create_files(tmpdir, files)
def create_files(tmpdir, files):
tmpdir.chdir()
for filename, lines in files.items():
file_object = tmpdir.join(filename)
file_object.write('\n'.join(lines))
class DummyMethod(Method):
name = 'dummy'
help = ''
def run(self, experiment):
pass
def p_parse(command_line):
"""Parse with prefix"""
prefix = 'dummy '
# as some method is always obligatory, each parser execution
# will be prefixed with this method selection command
try:
return parse(prefix + command_line)
except (Exception, SystemExit) as e:
# we need the command to find out what caused the error
print(command_line)
raise e
def test_simple_files_loading(test_files):
"""Only general tests, no details here"""
# one file for case, one for control
opts = p_parse('case t.tsv control c.tsv')
assert len(opts.case.sample_collection.samples) == 2
# two files for case, one for control
opts = p_parse('control c.tsv case t.tsv t_2.tsv')
assert len(opts.case.sample_collection.samples) == 4
with parsing_error(match='Neither data nor \(case & control\) have been provided!'):
p_parse('')
sample_collections = {'case': 'Control', 'control': 'Case'}
for sample_collection, name in sample_collections.items():
with parsing_error(match=f'{name} has not been provided!'):
p_parse(f'{sample_collection} c.tsv')
def test_select_samples(test_files):
# lets select only first samples from both files
commands = [
'case t.tsv --columns 0 control c.tsv --columns 0',
'case t.tsv --samples Tumour_1 control c.tsv --samples Control_1'
]
for command in commands:
opts = p_parse(command)
assert opts.control.sample_collection.samples == expected_controls[:1]
assert opts.case.sample_collection.samples == expected_cases[:1]
# get both tumour samples from file t.tsv and
# the first sample (Tumour_3) from file t_2.tsv
commands = [
'control c.tsv case t.tsv t_2.tsv --columns 0,1 0',
'case t.tsv t_2.tsv --samples Tumour_1,Tumour_2 Tumour_3 control c.tsv'
]
for command in commands:
opts = p_parse(command)
assert len(opts.case.sample_collection.samples) == 3
# lets try to grab a sample which is not in the file
expected_message = (
"Samples {'Control_1'} are not available in t.tsv file.\n"
"Following samples were found: Tumour_1, Tumour_2."
)
with parsing_error(match=expected_message):
p_parse('case t.tsv --samples Control_1 control c.tsv')
with parsing_error(match='columns for 2 files provided, expected for 1'):
# the user should use --columns 0,1 instead
p_parse('control c.tsv case t.tsv --columns 0 1')
with parsing_error(match='columns for 1 files provided, expected for 2'):
p_parse('control c.tsv case t.tsv t_2.tsv --columns 1')
def test_columns_purpose_deduction(test_files):
# all the other columns (id >= 2) are cases
commands = [
'data merged.tsv --control :2',
'data merged.tsv --control 0,1',
'data merged.tsv --case 2:',
'data merged.tsv --case 2,3'
]
for command in commands:
opts = p_parse(command)
assert opts.control.sample_collection.samples == expected_controls
assert opts.case.sample_collection.samples == expected_cases
def test_non_tab_delimiter(tmpdir):
create_files(tmpdir, {
'c.tsv': (
'Gene,Control_1,Control_2',
'TP53,6,6',
'BRCA2,6,7',
),
't.tsv': (
'Gene Tumour_1 Tumour_2',
'TP53 7 6',
'BRCA2 7 9',
),
})
opts = p_parse('case t.tsv control c.tsv --delimiter ,')
assert opts.control.sample_collection.samples == expected_controls
assert opts.case.sample_collection.samples == expected_cases
def test_file_with_description(test_files, tmpdir):
create_files(tmpdir, {
'control_with_descriptions.tsv': (
'Gene Description Control_1 Control_2',
'TP53 Tumour protein 53 6 6',
'BRCA2 Breast cancer type 2 s. protein 6 7',
)
})
expected_warning = (
'First line of your file contains "description" column, '
'but you did not provide "--description_column" argument.'
)
# user forgot
with pytest.warns(UserWarning, match=expected_warning):
opts = p_parse('case t.tsv control control_with_descriptions.tsv')
assert len(opts.control.sample_collection.samples) == 3
# user remembered
opts = p_parse('case t.tsv control control_with_descriptions.tsv -d')
assert len(opts.control.sample_collection.samples) == 2
assert set(opts.control.sample_collection.samples[0].genes) == {
Gene('TP53', description='Tumour protein 53'),
Gene('BRCA2', description='Breast cancer type 2 s. protein')
}
def test_custom_sample_names(test_files):
opts = p_parse(
'case t.tsv control c.tsv t_2.tsv --header my_control their_control'
)
# case should not be affected anyhow there
assert opts.case.sample_collection.samples == expected_cases
controls = opts.control.sample_collection.samples
# are two files loaded? (each have two samples)
assert len(controls) == 4
sample_names = {control.name for control in controls}
expected_names = {
'my_control_1', 'my_control_2',
'their_control_1', 'their_control_2'
}
assert expected_names == sample_names
def test_merged_file(test_files):
# advanced columns purpose inferring/deduction is tested separately
# in `test_columns_purpose_deduction`
# merged.tsv has two controls and two tumours, in this order
commands_first_samples = [
'data merged.tsv --case 2 --control 0',
'data merged.tsv --case 2,2 --control 0,0',
]
for command in commands_first_samples:
opts = p_parse(command)
assert opts.control.sample_collection.samples == expected_controls[:1]
assert opts.case.sample_collection.samples == expected_cases[:1]
commands_all_samples = [
'data merged.tsv --case 2: --control :2',
'data merged.tsv --case 2,3 --control 0:2',
'data merged.tsv --case 2-4 --control 0-2'
]
for command in commands_all_samples:
opts = p_parse(command)
assert opts.control.sample_collection.samples == expected_controls
assert opts.case.sample_collection.samples == expected_cases
with parsing_error(match='Neither --case nor --control provided'):
p_parse('data merged.tsv')
with parsing_error(match='Cannot handle data and case/control at once'):
p_parse('data merged.tsv --case 1 --control 2 control c.tsv')
def test_general_help(capsys):
with parsing_output(capsys) as text:
parse('--help')
for method in Method.members:
assert method in text.std
with parsing_error(match='unrecognized arguments: --controll'):
p_parse('data merged.tsv --case 1 --controll 2 control c.tsv')
def test_shows_usage_when_no_args(capsys):
# if there are no arguments provided, the parser should
# show the usage summary (and do not raise any errors)
with parsing_output(capsys) as text:
parse(None)
assert 'usage' in text.err
def test_sub_parsers_help(capsys):
# do we get the `name` substitution right?
SingleFileExperimentFactory.__doc__ = 'Description of {parser_name}'
cli = CLI()
assert cli.all_subparsers['data'].parser_name == 'data'
with parsing_output(capsys) as text:
parse('data --help')
assert 'Description of data' in text.std
# is custom sub-parser screen displayed and description used included in it?
SingleFileExperimentFactory.description = 'A dummy description'
SingleFileExperimentFactory.epilog = 'A dummy epilog'
with parsing_output(capsys) as text:
parse('data --help')
lines = text.std.split('\n')
half_of_output = len(lines) // 2
# is description in the text and is it in the first 50% of lines?
assert any(
SingleFileExperimentFactory.description in line
for line in lines[:half_of_output]
)
# is the epilog in the text and is it in the last 50% of lines?
assert any(
SingleFileExperimentFactory.epilog in line
for line in lines[half_of_output:]
)
| StarcoderdataPython |
331860 | <gh_stars>0
import os
import win32com.client as win32
class Simulation():
AspenSimulation = win32.gencache.EnsureDispatch("Apwn.Document")
def __init__(self, PATH, VISIBILITY):
self.AspenSimulation.InitFromArchive2(os.path.abspath(PATH))
self.AspenSimulation.Visible = VISIBILITY
@property
def BLK(self):
return self.AspenSimulation.Tree.Elements("Data").Elements("Blocks")
def BLK_NumberOfStages(self, NStages):
self.BLK.Elements("B1").Elements("Input").Elements("NSTAGE").Value = NStages
def BLK_FeedLocation(self, Feed_Location, Feed_Name):
self.BLK.Elements("B1").Elements("Input").Elements("FEED_STAGE").Elements(Feed_Name).Value = Feed_Location
def BLK_Pressure(self, Pressure):
self.BLK.Elements("B1").Elements("Input").Elements("PRES1").Value = Pressure
def BLK_RefluxRatio(self, RfxR):
self.BLK.Elements("B1").Elements("Input").Elements("BASIS_RR").Value = RfxR
def BLK_ReboilerRatio(self, RblR):
self.BLK.Elements("B1").Elements("Input").Elements("BASIS_BR").Value = RblR
@property
def STRM(self):
return self.AspenSimulation.Tree.Elements("Data").Elements("Streams")
def STRM_Temperature(self, Name, Temp):
self.STRM.Elements(Name).Elements("Input").Elements("TEMP").Elements("MIXED").Value = Temp
def STRM_Pressure(self, Name, Pressure):
self.STRM.Elements(Name).Elements("Input").Elements("PRES").Elements("MIXED").Value = Pressure
def STRM_Flowrate(self, Name, Chemical, Flowrate):
self.STRM.Elements(Name).Elements("Input").Elements("FLOW").Elements("MIXED").Elements(
Chemical).Value = Flowrate
def STRM_Get_Outputs(self, Name, Chemicals):
STRM_COMP = self.STRM.Elements(Name).Elements("Output").Elements("MOLEFLOW").Elements("MIXED")
COMP_1 = STRM_COMP.Elements(Chemicals[0]).Value
COMP_2 = STRM_COMP.Elements(Chemicals[1]).Value
COMP_3 = STRM_COMP.Elements(Chemicals[2]).Value
COMP_4 = STRM_COMP.Elements(Chemicals[3]).Value
COMP_5 = STRM_COMP.Elements(Chemicals[4]).Value
COMP_6 = STRM_COMP.Elements(Chemicals[5]).Value
return [COMP_1, COMP_2, COMP_3, COMP_4, COMP_5, COMP_6]
def STRM_Get_Temperature(self, Name):
return self.STRM.Elements(Name).Elements("Input").Elements("TEMP").Elements("MIXED").Value
def STRM_Get_Pressure(self, Name):
return self.STRM.Elements(Name).Elements("Input").Elements("PRES").Elements("MIXED").Value
def BLK_Get_NStages(self):
return self.BLK.Elements("B1").Elements("Input").Elements("NSTAGE").Value
def BLK_Get_FeedLocation(self, Name):
return self.BLK.Elements("B1").Elements("Input").Elements("FEED_STAGE").Elements(Name).Value
def BLK_Get_Pressure(self):
return self.BLK.Elements("B1").Elements("Input").Elements("PRES1").Value
def BLK_Get_RefluxRatio(self):
return self.BLK.Elements("B1").Elements("Input").Elements("BASIS_RR").Value
def BLK_Get_ReboilerRatio(self):
return self.BLK.Elements("B1").Elements("Input").Elements("BASIS_BR").Value
def BLK_Get_Condenser_Duty(self):
return self.BLK.Elements("B1").Elements("Output").Elements("COND_DUTY").Value
def BLK_Get_Reboiler_Duty(self):
return self.BLK.Elements("B1").Elements("Output").Elements("REB_DUTY").Value
def BLK_Get_Column_Diameter(self):
return self.BLK.Elements("B1").Elements("Input").Elements("CA_DIAM").Value
def BLK_Get_Column_Stage_Molar_Weights(self, N_stages):
M = []
for i in range(1, N_stages + 1):
M += [self.BLK.Elements("B1").Elements("Output").Elements("MW_GAS").Elements(str(i)).Value]
return M
def BLK_Get_Column_Stage_Temperatures(self, N_stages):
T = []
for i in range(1, N_stages + 1):
T += [self.BLK.Elements("B1").Elements("Output").Elements("B_TEMP").Elements(str(i)).Value]
return T
def BLK_Get_Column_Stage_Vapor_Flows(self, N_stages):
V = []
for i in range(1, N_stages + 1):
V += [self.BLK.Elements("B1").Elements("Output").Elements("VAP_FLOW").Elements(str(i)).Value]
return V
def BLK_Get_Column_Diameter(self, N_stages):
P = int(1)
f = float(1.6)
R = float(8.314)
Effective_Diameter = []
V = self.BLK_Get_Column_Stage_Vapor_Flows(N_stages)
M = self.BLK_Get_Column_Stage_Molar_Weights(N_stages)
T = self.BLK_Get_Column_Stage_Temperatures(N_stages)
for i in range(0, N_stages - 1):
Effective_Diameter += [
np.sqrt((4 * V[i]) / (3.1416 * f) * np.sqrt(R * (T[i] + 273.15) * M[i] * 1000 / (P * 1e5)))]
Diameter = 1.1 * max(Effective_Diameter)
return Diameter
def Run(self):
self.AspenSimulation.Engine.Run2() | StarcoderdataPython |
49508 | <gh_stars>0
import logging
import vispy.io
from .Canvas import Canvas
from ... import draw
import numpy as np
from ..Scene import DEFAULT_DIRECTIONAL_LIGHTS
logger = logging.getLogger(__name__)
def set_orthographic_projection(camera, left, right, bottom, top, near, far):
camera[:] = 0
camera[0, 0] = 2/(right - left)
camera[3, 0] = -(right + left)/(right - left)
camera[1, 1] = 2/(top - bottom)
camera[3, 1] = -(top + bottom)/(top - bottom)
camera[2, 2] = -2/(far - near)
camera[3, 2] = -(far + near)/(far - near)
camera[3, 3] = 1
class Scene(draw.Scene):
__doc__ = (draw.Scene.__doc__ or '') + """
This Scene supports the following features:
* *pan*: If enabled, mouse movement will translate the scene instead of rotating it
* *directional_light*: Add directional lights. The given value indicates the magnitude*direction normal vector.
* *ambient_light*: Enable trivial ambient lighting. The given value indicates the magnitude of the light (in [0, 1]).
* *translucency*: Enable order-independent transparency rendering
* *fxaa*: Enable fast approximate anti-aliasing
* *ssao*: Enable screen space ambient occlusion
* *additive_rendering*: Enable additive rendering. This mode is good for visualizing densities projected through the viewing direction. Takes an optional 'invert' argument to invert the additive rendering (i.e., black-on-white instead of white-on-black).
* *outlines*: Enable cartoony outlines. The given value indicates the width of the outlines (start small, perhaps 1e-5 to 1e-3).
* *static*: Enable static rendering. When possible (when vispy is using a non-notebook backend), display a statically-rendered image of a scene instead of the live webGL version when `Scene.show()` is called.
"""
def __init__(self, *args, canvas_kwargs={}, **kwargs):
self.camera = np.eye(4, dtype=np.float32)
self._zoom = 1
self._pixel_scale = 1
self._clip_scale = 1
self._translation = [0, 0, 0]
self._canvas = None
super(Scene, self).__init__(*args, **kwargs)
self._canvas = Canvas(self, **canvas_kwargs)
@property
def zoom(self):
return self._zoom
@zoom.setter
def zoom(self, value):
self._zoom = value
self._update_camera()
@property
def pixel_scale(self):
return self._pixel_scale
@pixel_scale.setter
def pixel_scale(self, value):
self._pixel_scale = value
if self._canvas is not None:
self._canvas.size = self.size_pixels.astype(np.uint32)
self._update_camera()
@property
def clip_scale(self):
return self._clip_scale
@clip_scale.setter
def clip_scale(self, value):
self._clip_scale = value
self._update_camera()
@property
def size(self):
return self._size
@size.setter
def size(self, value):
self._size[:] = value
self._update_camera()
def add_primitive(self, primitive):
super(Scene, self).add_primitive(primitive)
self._update_camera()
for feature in list(self.enabled_features):
self.enable(feature, **self.get_feature_config(feature))
def enable(self, name, auto_value=None, **parameters):
"""Enable an optional rendering feature.
:param name: Name of the feature to enable
:param auto_value: Shortcut for features with single-value configuration. If given as a positional argument, will be given the default configuration name 'value'.
:param parameters: Keyword arguments specifying additional configuration options for the given feature
"""
if auto_value is not None:
parameters['value'] = auto_value
if name == 'directional_light':
lights = parameters.get('value', DEFAULT_DIRECTIONAL_LIGHTS)
lights = np.atleast_2d(lights).astype(np.float32)
for prim in self._primitives:
prim.diffuseLight = lights
elif name == 'ambient_light':
light = parameters.get('value', .25)
for prim in self._primitives:
prim.ambientLight = light
if self._canvas is not None:
if name in self._canvas._VALID_FEATURES:
self._canvas._enable_feature(**{name: parameters})
super(Scene, self).enable(name, **parameters)
def disable(self, name, strict=True):
if self._canvas is not None:
if name in self._canvas._VALID_FEATURES:
self._canvas._disable_feature(name)
super(Scene, self).disable(name, strict=strict)
def _update_camera(self):
(width, height) = self.size.astype(np.float32)
dz = np.sqrt(np.sum(self.size**2))*self._clip_scale
translation = self.translation
translation[2] = -(2 + dz)/2
self.translation = translation
set_orthographic_projection(
self.camera,
-width/2, width/2,
-height/2, height/2,
1, 1 + dz)
if self._canvas is not None:
self._canvas.clip_planes = (1, 1 + dz)
self.camera[[0, 1], [0, 1]] *= self._zoom
for prim in self._primitives:
prim.camera = self.camera
def save(self, filename):
"""Render and save an image of this Scene.
:param filename: target filename to save the image into
"""
if self._canvas is not None:
img = self._canvas.render()
vispy.io.write_png(filename, img)
def show(self):
"""Display this Scene object."""
cfg = self.get_feature_config('static')
if cfg and cfg.get('value', False):
import imageio
import io
import IPython.display
import vispy.app
vispy_backend = vispy.app.use_app().backend_name
if 'webgl' not in vispy_backend:
target = io.BytesIO()
img = self._canvas.render()
imageio.imwrite(target, img, 'png')
return IPython.display.Image(data=target.getvalue())
msg = ('vispy has already loaded the {} backend, ignoring static'
' feature. Try manually selecting a desktop vispy backend '
'before importing plato, for example:\n import vispy.app; '
'vispy.app.use_app("pyglet")'.format(vispy_backend))
logger.warning(msg)
return self._canvas.show()
def render(self):
"""Have vispy redraw this Scene object."""
self._canvas.update()
| StarcoderdataPython |
9665488 | <reponame>phohenecker/exp-base<filename>examples/basic_setup/main.py
# -*- coding: utf-8 -*-
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# BSD 2-Clause License #
# #
# Copyright (c) 2021, <NAME> #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# 1. Redistributions of source code must retain the above copyright notice, this #
# list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE #
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
"""This is a very simple example that illustrates the basic setup of an experiment using the package `expbase`.
This basic setup includes the following steps:
1. define a config class,
2. implement a `TrainingExecutor`,
3. implement an `EvaluationExecutor`, and
4. launch the experiment.
"""
import expbase as xb
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2021, <NAME>"
__license__ = "BSD-2-Clause"
__version__ = "0.1.0"
__date__ = "08 May 2021"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
# ==================================================================================================================== #
# CONFIG #
# ==================================================================================================================== #
class MyConfig(xb.BaseConfig):
DEFAULT_MY_CONF = "Unknown"
# CONSTRUCTOR ####################################################################################################
def __init__(self):
super().__init__()
self._my_conf = self.DEFAULT_MY_CONF
# PROPERTIES #####################################################################################################
@property
def my_conf(self) -> str:
"""str: This is some configuration of your experiment that the user defines as a command-line arg."""
return self._my_conf
@my_conf.setter
def my_conf(self, my_conf: str) -> None:
self._my_conf = str(my_conf)
# ==================================================================================================================== #
# TRAINING EXECUTOR #
# ==================================================================================================================== #
class MyTrainingExecutor(xb.TrainingExecutor):
def _run_training(self) -> None:
print("This is where the actual training procedure is implemented.")
print("The user-defined config is accessed via self._conf.")
print(f"For example, the config my_conf (with arg --my-conf) was set to '{self._conf.my_conf}'.")
print()
print("Every now and then (usually after every training epoch), we create a training checkpoint,")
print("which should be stored in the results directory.")
print("The path of the results directory is stored in the config at self._conf.results_dir.")
print("For this experiment, the results directory was chosen to be:")
print(self._conf.results_dir)
print()
print("To deliver a checkpoint, and kick of evaluation, we use self._deliver_ckpt.")
print("As an example, we deliver a checkpoint 'test.ckpt'.")
print("(Usually, we would of course create the checkpoint file in the results directory first).")
self._deliver_ckpt("test.ckpt")
print()
print("Done.")
# ==================================================================================================================== #
# EVALUATION EXECUTOR #
# ==================================================================================================================== #
class MyEvaluationExecutor(xb.EvaluationExecutor):
def _run_evaluation(self) -> None:
print("This is where the evaluation procedure is implemented.")
print("The checkpoint that the EvaluationExecutor was launched for is stored in self._ckpt.")
print(f"In this particular case, the processed checkpoint is '{self._ckpt}'.")
print()
print("Done.")
# ==================================================================================================================== #
# MAIN #
# ==================================================================================================================== #
def main():
xb.Experiment(
MyTrainingExecutor,
MyEvaluationExecutor,
MyConfig,
"run.sh", # the name of the app printed in its synopsis
"This is in example that illustrates the basic setup of an experiment using expbase." # the app's help text
).run()
if __name__ == "__main__":
main()
| StarcoderdataPython |
5116945 | from sqlalchemy.orm import relationship
from .weak_entities import event_has_host
from .. import db, flask_bcrypt
# Based on 'User' example https://github.com/cosmic-byte/flask-restplus-boilerplate
class Host(db.Model):
""" Event hosts """
__tablename__ = "host"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
registered_on = db.Column(db.DateTime, nullable=False)
admin = db.Column(db.Boolean, nullable=False, default=False)
username = db.Column(db.String(50), unique=True)
password_hash = db.Column(db.String(100))
events = relationship(
"Event",
secondary=event_has_host,
back_populates="hosts")
@property
def password(self):
raise AttributeError('password: write-only field')
@password.setter
def password(self, password):
self.password_hash = flask_bcrypt.generate_password_hash(password).decode('utf-8')
def check_password(self, password):
return flask_bcrypt.check_password_hash(self.password_hash, password)
def __repr__(self):
return "<User '{}'>".format(self.username)
| StarcoderdataPython |
8029436 | <reponame>jrood-nrel/percept
import sys
sys.path.insert(0,"../build/build.dir/packages/PyTrilinos/src/stk/PyPercept")
from math import *
from numpy import *
import unittest
import time
import print_table
from PerceptMesh import *
class StringFunctionUnitTests(unittest.TestCase):
def setUp(self):
self.testpoints = [ [0.1234, -0.5678, 0.9, 0.812],
[0.1234e-3, -0.5678e-5, 0.9e+8, 0.812e-4],
[.101, 102., 10201.0, 0.0122],
[0.003, -100001.1, 44.1, 3.0]
]
self.testpoints_fd = [ [0.1234, -0.5678, 0.9, 0.812],
[0.1234e-3, -0.5678e-5, 0.9e-3, 0.812e-4],
[101.0, 102.0, 10.2, 0.0122],
[0.003, .002, -0.0011, 0.0]
]
def test_stringFunction_xy_basic(self):
x=1.234
y=2.345
z=0.0
sf = StringFunction(" x - y ")
input_array = array([x, y, z])
time = 0.0
output_array = sf.value(input_array, time)
print output_array
eval_print(x, y, z, time, sf)
def test_stringFunction_xy_basic_1(self):
sfx = StringFunction("x")
sfy = StringFunction("y")
sfxy = StringFunction("x-y")
x = 1.234
y = 5.678
z = 0.0
t = 0.0
xy = x-y
eval_print(1,2,3,0, sfxy)
vx = eval_func(x, y, z, t, sfx)
print "x = ", x, "vx = ", vx
vy = eval_func(x, y, z, t, sfy)
vxy = eval_func(x, y, z, t, sfxy)
print "y = ", y, "vy = ", vy
print "xy = ", xy, "vxy = ", vxy
self.assertEqual(y, vy)
self.assertEqual(xy, vxy)
def test_stringFunction_xy_basic_2(self):
sftestNA = StringFunction("x", "sftestNA", Dimensions(3), Dimensions(2, 3))
sftestNA.setDomainDimensions(Dimensions(3))
sftest = StringFunction("x", "sftestNA", Dimensions(3), Dimensions(2, 3))
sftest_domain = sftest.getNewDomain()
sftest_codomain = sftest.getNewCodomain()
sfx = StringFunction("x", "sfx")
sfy = StringFunction("y")
sfxy = StringFunction("x-y")
x = 1.234
y = 5.678
z = 0.0
t = 0.0
xy = x-y
eval_print(1,2,3,0, sfxy)
vx = eval_func(x,y,z,t, sfx)
print "x = ", x, "vx = ", vx
vy = eval_func(x, y, z, t, sfy)
print "y = ", y, "vy = ", vy
vxy = eval_func(x, y, z, t, sfxy)
print "xy = ", xy, "vxy = ", vxy
self.assertEqual(x, vx)
self.assertEqual(y, vy)
self.assertEqual(xy, vxy)
def test_stringFunction_test_alias(self):
sfx = StringFunction("x", "sfx", Dimensions(3), Dimensions(1))
sfy = StringFunction("y", "sfy", Dimensions(3), Dimensions(1))
sfxy = StringFunction("x-y", "sfxy", Dimensions(3), Dimensions(1))
sfembedded = StringFunction("sfxy", "sfembedded", Dimensions(3), Dimensions(1))
x = 1.234
y = 5.678
z = 0.0
t = 0.0
xy = x-y
eval_print(1,2,3,0, sfxy)
vx = eval_func(x,y,z,t, sfx)
print "x = ", x, "vx = ", vx
vy = eval_func(x, y, z, t, sfy)
print "y = ", y, "vy = ", vy
vxy = eval_func(x, y, z, t, sfxy)
print "xy = ", xy, "vxy = ", vxy
self.assertEqual(x, vx)
self.assertEqual(y, vy)
self.assertEqual(xy, vxy)
print "sfembedded = ...", sfembedded
eval_print(1,2,3,0,sfembedded)
print "sfembedded = ", eval_func(x,y,z,t,sfembedded)
vxy1 = eval_func(x,y,z,t,sfembedded)
sfembedded.add_alias("sfalias")
sftestalias = StringFunction("sfalias", "sftestalias")
vxy2 = eval_func(x,y,z,t,sftestalias)
print "sftestalias = ", vxy2
def test_stringFunction_vector_valued(self):
x = 1.234
y = 5.678
z = 3.456
t = 0.0
didCatch = 0
try:
sfv0 = StringFunction("v[0]=x; v[1]=y; v[2]=z; x", "sfv", Dimensions(1,4), Dimensions(1,3))
eval_vec3_print(1,2,3,0, sfv0)
except:
didCatch = 1
print "TEST::function::stringFunctionVector: expected to catch this since dom/codomain dimensions should be rank-1"
sfv = StringFunction("v[0]=x*y*z; v[1]=y; v[2]=z; x", "sfv", Dimensions(3), Dimensions(3))
eval_vec3_print(1.234, 2.345e-3, 3.456e+5, 0.0, sfv)
vec = eval_vec3(x,y,z,t,sfv)
print "x = ", x
print "y = ", y
print "z = ", z
print "val = ", (vec[0]*vec[1]*vec[2])
self.assertEqual(vec[0], (x*y*z))
self.assertEqual(vec[1], (y))
self.assertEqual(vec[2], (z))
def test_stringFunction_constants(self):
x = 1.234
y = 5.678
z = 3.456
t = 0.0
myC = 4.5678
# dummy return value sf_myC, but could be used in table printing, or other pythonic uses
sf_myC = StringFunction(str(myC), "myC", Dimensions(3), Dimensions(1));
# alternative
# sf_myC = StringFunction("4.5678", "myC", Dimensions(3), Dimensions(1));
# this string function refers to the other through "myC"
sfv = StringFunction("x+myC", "sfv", Dimensions(3), Dimensions(1))
#eval_print(x,y,z, 0.0, sfv)
vec = eval_func(x,y,z,t,sfv)
print "x = ", x
print "y = ", y
print "z = ", z
print "constants test val = ", vec, " expected = ", (myC + x)
self.assertEqual(vec, (myC + x))
# more...
myConstants = {"C":1.234,"rho":1.e-5}
sf_myC1 = []
for cname, cvalue in myConstants.items(): # note: this could become a python function
sf_myC1.append( StringFunction(str(cvalue),cname,Dimensions(3),Dimensions(1)) )
sfv1 = StringFunction("x + C*rho", "sfv1", Dimensions(3), Dimensions(1))
#eval_print(x,y,z, 0.0, sfv1)
vec = eval_func(x,y,z,t,sfv1)
expected = (x + myConstants["C"]*myConstants["rho"])
print "constants test val1 = ", vec, " expected = ", expected
self.assertEqual(vec, expected)
def test_stringFunction_arithmetic_ops(self):
for xyzt in self.testpoints:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
sfx = StringFunction("x")
sfy = StringFunction("y")
sfxy = StringFunction("x-y")
sfxy2 = sfx - sfy
xy = x - y
vxy = eval_func(x,y,z,t,sfxy)
vxy2 = eval_func(x,y,z,t,sfxy2)
sfx1 = StringFunction("x")
sfy2 = StringFunction("y")
vx = eval_func(x,y,z,t, sfx1)
vy = eval_func(x,y,z,t, sfy2)
print "vxy2 = ", vxy2, " == vxy = ", vxy
print "xy = ", xy, " == vxy = ", vxy
print "x = ", x, " == vx = ", vx
print "y = ", y, " == y = ", vy
self.assertEqual(x, vx)
self.assertEqual(y, vy)
self.assertEqual(xy, vxy)
self.assertEqual(vxy2, vxy)
sfxy_minus = sfx - sfy
xy_minus = x - y
vxy_minus = eval_func(x,y,z,t,sfxy_minus)
vxy1_minus = eval_func(x,y,z,t,sfxy_minus)
print "xy_minus = ", xy_minus, " == vxy_minus = ", vxy_minus
print "xy_minus = ", xy_minus, " == vxy1_minus = ", vxy1_minus
self.assertEqual(xy_minus, vxy_minus)
self.assertEqual(vxy_minus, vxy1_minus)
sfxy_plus = sfx + sfy
xy_plus = x + y
vxy_plus = eval_func(x,y,z,t,sfxy_plus)
vxy1_plus = eval_func(x,y,z,t,sfxy_plus)
print "xy_plus = ", xy_plus, " == vxy_plus = ", vxy_plus
print "xy_plus = ", xy_plus, " == vxy1_plus = ", vxy1_plus
self.assertEqual(xy_plus, vxy_plus)
self.assertEqual(vxy_plus, vxy1_plus)
sfxy_mult = sfx * sfy
xy_mult = x * y
vxy_mult = eval_func(x,y,z,t,sfxy_mult)
vxy1_mult = eval_func(x,y,z,t,sfxy_mult)
print "xy_mult = ", xy_mult, " == vxy_mult = ", vxy_mult
print "xy_mult = ", xy_mult, " == vxy1_mult = ", vxy1_mult
self.assertEqual(xy_mult, vxy_mult)
self.assertEqual(vxy_mult, vxy1_mult)
sfxy_div = sfx / sfy
xy_div = x / y
vxy_div = eval_func(x,y,z,t,sfxy_div)
vxy1_div = eval_func(x,y,z,t,sfxy_div)
print "xy_div = ", xy_div, " == vxy_div = ", vxy_div
print "xy_div = ", xy_div, " == vxy1_div = ", vxy1_div
self.assertEqual(xy_div, vxy_div)
self.assertEqual(vxy_div, vxy1_div)
def test_stringFunction_derivative(self):
for xyzt in self.testpoints:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
sfxy = StringFunction("x-y")
dsfxy_y = StringFunction("-1")
dy = array([["y"]])
#input_array = array([x, y, z])
print "dy= " , dy , " dy.ndim= " , dy.ndim, " dy.dtype= " , dy.dtype, " dy.itemsize= ", dy.itemsize , " dy.size= " , dy.size
#sys.exit(1)
dsfxy_y_1 = sfxy.derivative_test(dy)
dvxy = eval_func(x,y,z,t,dsfxy_y_1)
dvxy1 = eval_func(x,y,z,t,dsfxy_y)
print "dvxy = ", dvxy, " == dvxy1 = ", dvxy1
print "-1.0 = -1 == dvxy = ", dvxy
self.assertEqual(dvxy, dvxy1)
self.assertEqual(-1, dvxy)
print dsfxy_y_1
def test_stringFunction_derivative_1(self):
for xyzt in self.testpoints:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
print "here 1"
eps = 1.e-6
eps_loc = eps*(fabs(x)+fabs(y)+fabs(z)+fabs(t))/4.0
sfxy = StringFunction("x-y")
dsfxy_grad = StringFunction("v[0]=1; v[1]= -1; v[2]=0", "test", Dimensions(3), Dimensions(3))
dxyz = array([["x"],["y"],["z"]]) #new simpler user-interface
#dxyz = array([["x","y","z"]]) #new simpler user-interface
print "dxyz.shape= " , dxyz.shape
grad = array(["1","-1","0"])
sfxy.set_gradient_strings(grad)
dsfxy_grad_1 = sfxy.derivative_test(dxyz)
dsfxy_grad_fd = sfxy.derivative_test_fd(dxyz, eps_loc)
dsfxy_grad_2 = sfxy.derivative(dxyz)
dvxy1 = eval_vec3(x,y,z,t,dsfxy_grad_1)
dvxy_fd = eval_vec3(x,y,z,t,dsfxy_grad_fd)
dvxy2 = eval_vec3(x,y,z,t,dsfxy_grad_2)
dvxy = eval_vec3(x,y,z,y,dsfxy_grad)
i = 0
while i < 3:
self.assertEqual(dvxy[i], dvxy1[i])
self.assertEqual(dvxy[i], dvxy2[i])
self.assertAlmostEqual(dvxy[i], dvxy_fd[i])
i = i + 1
self.assertEqual(dvxy[0], 1.0)
self.assertEqual(dvxy[1], -1.0)
def test_stringFunction_derivative_2(self):
for xyzt in self.testpoints_fd:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
eps = 1.e-10
eps_loc = eps*(fabs(x)+fabs(y)+fabs(z)+fabs(t))/4.0
sf = StringFunction(" sin(x*y*z*z) " )
grad = array(["y*z*z*cos(x*y*z*z)", "x*z*z*cos(x*y*z*z)", "2*x*y*z*cos(x*y*z*z)"])
gradv = "v[0]="+grad[0]+"; v[1]="+grad[1]+" ; v[2]="+grad[2]+";"
dsf_grad = StringFunction(gradv, "test", Dimensions(3), Dimensions(3))
#dxyz = array([["x","y","z"]])
dxyz = array([["x"],["y"],["z"]]) #new simpler user-interface
sf.set_gradient_strings(grad)
dsf_grad_fd = sf.derivative_test_fd(dxyz, eps_loc)
dsf_grad_2 = sf.derivative(dxyz)
dv_fd = eval_vec3(x,y,z,t,dsf_grad_fd)
dv2 = eval_vec3(x,y,z,t,dsf_grad_2)
dv = eval_vec3(x,y,z,t,dsf_grad)
i = 0
while i < 3:
print "dv2[i] = ", dv2[i], " == dv[i] = ", dv[i]
self.assertEqual(dv[i], dv2[i])
if fabs(dv[i]-dv_fd[i]) > 0.5*(fabs(dv_fd[i])+fabs(dv[i]))*1.e-6:
print "\n i = ", i, "x= ", x, "y= ", y, "z= ", z, "expected= ", dv[i], "actual = ", dv_fd[i]
self.assertAlmostEqual(dv[i], dv_fd[i], delta = 1.e-1)
i = i + 1
def test_stringFunction_multiplePoints(self):
points = zeros(shape=(4,3))
output = zeros(shape=(4,1))
output_expect = zeros(shape=(4,1))
sf1 = StringFunction("x+y*z")
i = 0
for xyzt in self.testpoints:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
points[i][0] = x
points[i][1] = y
points[i][2] = z
vx = eval_func(x,y,z,t,sf1)
output_expect[i][0] = vx
print "x+y*z = ", x+y*z, " == vx = ", vx
self.assertEqual((x+y*z), vx)
i = i + 1
sf2 = StringFunction(str(sf1.getFunctionString()), "sf2", Dimensions(3), Dimensions(1))
output = sf2.value(points, output, 0.0)
i = 0
while i < 4:
print "output_expect(i, 0) = ", output_expect[i][0] , " == output(i, 0) = ", output[i][0]
self.assertEqual(output_expect[i][0], output[i][0])
i = i + 1
def test_stringFunction_expressions(self):
x = 0.1234
y = -0.5678
z = 0.9
t = 0.812
global PI
global E
PI = pi
E = e
sf1 = StringFunction("x+y")
ve = x + y
v = eval_func(x,y,z,t,sf1)
print "x = ", x, "y = ", y, "v = ", v, "ve = ", ve
EXPR_TO_TEST1 = "(exp(x)+log(x)+log10(x)+pow(x,y)+sqrt(x)+erfc(x)+erf(x)+acos(x)+asin(x)+atan(x)+atan2(x,z)+cos(x)+cosh(x)+sin(x)+sinh(x)+tan(x)+tanh(x)+abs(y)+fabs(y))"
EXPR_TO_TEST2 = "(x/y*z-t+(4*x)-(1.23e-3/z))"
EXPR_TO_TEST3 = "(4 % 2)"
EXPR_TO_TEST4 = "(-z)"
EXPR_TO_TEST5 = "(exp(E))"
EXPR_TO_TEST6 = "(PI)"
def DO_SF_TEST(expr,x,y,z,t):
sf = StringFunction(expr)
v_loc = eval_func(x,y,z,t,sf)
if isinf(v_loc): #this is kind of wierd but Python doesn't handle infinite values like C++ and otherwise generates OverflowError
ve_loc = v_loc
else:
ve_loc = eval(expr)
print "ve_loc = ", ve_loc, " == v_loc = ", v_loc
self.assertEqual(ve_loc, v_loc)
DO_SF_TEST(EXPR_TO_TEST1,x,y,z,t)
DO_SF_TEST(EXPR_TO_TEST2,x,y,z,t)
DO_SF_TEST(EXPR_TO_TEST3,x,y,z,t)
DO_SF_TEST(EXPR_TO_TEST4,x,y,z,t)
DO_SF_TEST(EXPR_TO_TEST5,x,y,z,t)
DO_SF_TEST(EXPR_TO_TEST6,x,y,z,t)
for xyzt in self.testpoints:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
DO_SF_TEST(EXPR_TO_TEST1,x,y,z,t)
DO_SF_TEST(EXPR_TO_TEST2,x,y,z,t)
DO_SF_TEST(EXPR_TO_TEST3,x,y,z,t)
DO_SF_TEST(EXPR_TO_TEST4,x,y,z,t)
DO_SF_TEST(EXPR_TO_TEST5,x,y,z,t)
DO_SF_TEST(EXPR_TO_TEST6,x,y,z,t)
def test_stringFunction_timing(self):
numIt = 1024
EXPR_TO_TEST1A = "(exp(x)+log(x)+log10(x)+pow(x,y)+sqrt(x)+erfc(x)+erf(x)+acos(x)+asin(x))"
EXPR_TO_TEST1B = "(atan(x)+atan2(x,z)+cos(x)+cosh(x)+sin(x)+sinh(x)+tan(x)+tanh(x)+abs(y)+fabs(y))"
EXPR_TO_TEST2 = "(x/y*z-t+(4*x)-(1.23e-3/z))"
EXPR_TO_TEST8 = "(sin(x+y))"
def DO_SF_TIMING_TEST_CPP(expr, numIt, x, y, z, t):
val = 0.0
for it in range(numIt):
try:
val = val + eval(expr)
except:
pass
def DO_SF_TIMING_TEST_STRING(expr, numIt, x, y, z, t):
sf = StringFunction(expr)
val = 0.0
for it in range(numIt):
try:
val = val + eval_func(x,y,z,t,sf)
except:
pass
def TIME_IT1(expr, numIt, x, y, z, t):
t_cpp = time.time()
DO_SF_TIMING_TEST_CPP(expr, numIt, x, y, z, t)
t_cpp = time.time() - t_cpp
t_string = time.time()
DO_SF_TIMING_TEST_STRING(expr, numIt, x, y, z, t)
t_string = time.time() - t_string
ratio = t_string/t_cpp
time_it = [expr, t_cpp, t_string, ratio]
return time_it
print ""
i = 0
for xyzt in self.testpoints:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
print "\n Timings for ", numIt, " iterations for point #", i, " x,y,z,t= ", x, y, z, t, " \n"
headers = ["expression", "cpp time", "string time", "ratio"]
t1 = TIME_IT1(EXPR_TO_TEST1A, numIt, x, y, z, t)
t2 = TIME_IT1(EXPR_TO_TEST1B, numIt, x, y, z, t)
t3 = TIME_IT1(EXPR_TO_TEST2, numIt, x, y, z, t)
t4 = TIME_IT1(EXPR_TO_TEST8, numIt, x, y, z, t)
table = [headers, t1, t2, t3, t4]
out = sys.stdout
print_table.print_table(out, table)
i = i + 1
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(StringFunctionUnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| StarcoderdataPython |
11226024 | # -*- coding: utf-8 -*-
# Copyright (C) 2020 <NAME>
# This code is licensed under the BSD 3-Clause license (see LICENSE for details).
import logzero
import PyQt5.QtCore
class AbstractIPC(object):
"""
Encapsulates code that both the producer and the consumer requires.
"""
def __init__(self, id, key_file_path=None, log=True):
"""
Creates the underlying system resources.
:param id: Unique system-wide unique name (str) of shared memory; this name is also used to create the unique
names for the two system-semapores `$id + "_sem_full"` and `$id + "_sem_empty"`
:param key_file_path: Optional file path to a file typically named "shared_memory.key" whose first line is used
as the unique ID/name of the shared memory IF this file exists, can be empty (the default) which then uses `id`
(first parameter)
:param log: `True` to enable logging using `logzero.logger`, `False` otherwise
"""
self._log = log
self._transaction_started = False
if self._log:
logzero.logger.info("Using PyQt v" + PyQt5.QtCore.PYQT_VERSION_STR + " and Qt v" +
PyQt5.QtCore.QT_VERSION_STR)
self._file_key = self._load_key(key_file_path)
self._shared_memory = PyQt5.QtCore.QSharedMemory(self._file_key if self._file_key else str(id))
if self._log:
logzero.logger.debug("Creating shared memory with key=\"" + self._shared_memory.key() + "\" (" +
("loaded from file)" if self._file_key else "hardcoded)"))
self._sem_empty = PyQt5.QtCore.QSystemSemaphore(str(id) + "_sem_empty", 1, PyQt5.QtCore.QSystemSemaphore.Create)
self._sem_full = PyQt5.QtCore.QSystemSemaphore(str(id) + "_sem_full", 0, PyQt5.QtCore.QSystemSemaphore.Create)
def _load_key(self, path):
"""
Alternatively loads the shared memory's name from a file named `SHARED_MEMORY_KEY_FILE`.
:param path: Path to file whose first name contains the unique name; the rest is ignored
:return: Unique name of shared memory or None if such a file did not exist
"""
if path is None:
return None
# noinspection PyBroadException
try:
with open(path) as fp:
line = fp.readline().strip()
if fp.readline() and self._log:
logzero.logger.warn("Ignoring residual lines in " + path)
return line
except:
pass
return None
| StarcoderdataPython |
397060 | <filename>hashtable/two_sum.py<gh_stars>0
#Question:
#Given an array of integers, return indices of the two numbers such that they add up to a specific target.
#You may assume that each input would have exactly one solution, and you may not use the same element twice.
#Example:
#Given nums = [2, 7, 11, 15], target = 9,
#Because nums[0] + nums[1] = 2 + 7 = 9,
#return [0, 1].
class Solution(object):
def twoSum(self, nums, target):
table = {}
for i in range(0, len(nums)):
diff = target - nums[i]
if diff in table:
return [table[diff], i]
table[nums[i]] = i
if __name__ == "__main__":
sol = Solution()
print sol.twoSum([2, 7, 11, 15], 9)
| StarcoderdataPython |
4802513 | # Copyright (c) 2021, TNO
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This script downloads a list of available maps on Zenodo.
# Please ensure that the necessary packages included in the requirements.txt are installed correctly
# TODO (1) create and fill in (line 203) your personal Zenodo access token
# TODO (2) fill in (line 206) the absolute path of the output folder e.g. r'C:\Users\me\output'
import time
import pandas as pd
import requests
from path import Path
def get_list_of_all_available_maps(ACCESS_TOKEN, search_query):
# --- search Zenodo for search query ---------------------------------------
response = requests.get('https://zenodo.org/api/records',
params={'q': search_query, 'communities': 'ucare', 'size': 500,
'access_token': ACCESS_TOKEN})
response = response.json()
hits = response['hits']
hits = hits['hits']
list_out = []
emission_maps = pd.DataFrame()
for i in range(0, len(hits), 1):
# --- per search result get record data --------------------------------
engine = hits[i]
create_date = engine['created'].split('T')[0]
metadata = engine['metadata']
title = metadata['title']
doi = engine['doi']
updated = engine['updated'].split('T')[0]
revision = engine['revision']
files = engine['files']
t_last_check = time.time()
t_allowance = 1
for k in range(0, len(files), 1):
file = files[k]
aem_file_name = file['key']
link = file['links']
link = link['self']
# --- read and check file name -------------------------------------
taxonomycode = aem_file_name.split('.')[0]
if 'map.txt' in aem_file_name:
version = ''
creator = ''
if '-v' in aem_file_name:
version = aem_file_name.split('-v')[1]
version = version.split('.')[0]
version = 'v{}'.format(version)
creator = aem_file_name.split('-v')[0].split('.')[1]
if not '-v' in aem_file_name:
creator = aem_file_name.split('.')[1]
if creator == 'map':
creator = 'not in filename'
print(taxonomycode)
splitResult = taxonomycode.split('_') # split on underscores
if not len(splitResult) == 5:
print('--- invalid file name')
continue
fuel_type = splitResult[0]
euro_class = splitResult[1]
engine_displacement = splitResult[2]
power = splitResult[3]
alliance = splitResult[4]
# --- add rate limiter: can only request 60 per minute ---------
t_current = time.time()
t_time_passed = t_current - t_last_check
t_last_check = t_current
t_allowance += t_time_passed
if t_allowance > 1:
t_allowance = 1
if t_allowance < 1:
time.sleep(1)
# --- get AEM map.txt file to scrape ---------------------------
response = requests.get(link)
t_allowance -= 1
data = response.text
# --- initiate scrape storing variables ---
maps = ''
ID = ''
TOTAL_KM = ''
TOTAL_TIME = ''
NUM_VEHICLES = ''
pollutants_coldstart = ''
pollutants_deterioration = ''
notes = []
if '�' in data:
data = data.replace('�', '')
print('--- cold start')
# --- start scraping -------------------------------------------
for line in data.splitlines():
if 'AVAILABLE MAPS' in line:
line_maps = line.strip()
[i_map, n_maps] = line_maps.split(': ')
maps = n_maps
if 'ID' in line:
line_id = line.strip()
[i_id, n_id] = line_id.split(':')
ID = n_id
if 'NOTES' in line:
line_label = line.strip()
a_note = line_label.split('NOTES: ')[1].strip('[').strip(']')
notes.append(a_note)
if 'TOTAL KM' in line:
line_km = line.strip()
[i_km, n_km] = line_km.split(':')
TOTAL_KM = n_km
if 'TOTAL TIME [h]' in line:
line_time = line.strip()
[i_time, n_time] = line_time.split(':')
n_res = n_time.replace('hours of data', '')
TOTAL_TIME = n_res
if 'NUMBER OF VEHICLES' in line:
line_veh = line.strip()
[i_veh, n_veh] = line_veh.split(':')
NUM_VEHICLES = n_veh
if 'AVAILABLE COLD START' in line:
line_maps = line.strip()
[i_map, n_maps] = line_maps.split(':')
pollutants_coldstart = n_maps
if 'AVAILABLE DETERIORATION' in line:
line_maps = line.strip()
[i_map, n_maps] = line_maps.split(':')
pollutants_deterioration = n_maps
if 'NOTES' in line:
line_label = line.strip()
a_note = line_label.split('NOTES: ')[1].strip('[').strip(']')
notes.append(a_note)
# --- check if engine code has been correctly parsed ---
if ID == '':
print('wait')
ID = 'error parsing file'
# --- create entry for final output ----------------------------
out_dict = {'filename': title, 'doi': doi, 'taxonomycode': ID, 'creator': creator,
'version_filename': version, 'createdate': create_date,
'updated': updated, 'revision': revision, 'available_basemaps': maps,
'total_km': TOTAL_KM, 'total_time': TOTAL_TIME,
'number_of_vehicles': NUM_VEHICLES, 'cold_start': pollutants_coldstart,
'deterioration': pollutants_deterioration, 'notes': notes,
'fuel_type': fuel_type, 'euro_class': euro_class,
'engine_displacement': engine_displacement, 'power': power,
'alliance': alliance, 'link': link}
list_out.append(out_dict)
emission_maps = pd.DataFrame(list_out)
return emission_maps
if __name__ == '__main__':
# TODO (1) create an access token on Zenodo (profile -> applications -> new token):
ACCESS_TOKEN = '<PASSWORD>' # <insert access code here>
output_folder = Path(r'path') # TODO (2) <insert path here>
string_to_search = 'Augmented emission maps '
df_emission_maps = get_list_of_all_available_maps(ACCESS_TOKEN, string_to_search)
# save a csv file in the supplied output folder, dated per accessed date
df_emission_maps.to_csv(output_folder / 'all_maps_on_Zenodo_{}.csv'.format(
time.strftime("%d-%m-%Y_%H%M%S", time.localtime(time.time()))),
index=False)
print(f'AEM list saved to {output_folder}')
| StarcoderdataPython |
3302822 | <filename>hexa-beta/userRegistrationScreen.py
__author__ = 'guru'
#state 40 = enter the phone number
#state 61 = phone number already exist try new number
#state 100 = Ask's user to place their 1st finger on FPS
#state 101 = Ask's user to place their 2st finger on FPS
#state 20 = Scanning ur finger now
#state 30 = remove ur finger and place it again
#state 41 = sorry user fingerprint already exists
#state 50 = processing please wait
#state 60 = initial deposit info
#state 70 = Full info
#screen = 0 vendor,screen = 1 customer.
import GLCD as g
currentState = 0
fontWidth = 6
lineLength = 21
def state40(phoneNumber = ' '):
global currentState
if currentState == 40:
num(phoneNumber)
return
currentState = 40
#vendor Screen
g.clearDisplay(0)
string = ("{:^%d}" % lineLength).format("Please Enter The")
g.displayText(string,2,0,0)
string = ("{:^%d}" % lineLength).format("Mobile Number")
g.displayText(string,3,0,0)
string = ("{:<%d}" % lineLength).format("Mob.No:")
g.displayText(string,4,0,0)
string = "{:<10}".format(phoneNumber)
g.displayText(string,4,(7*fontWidth-1),0)
#user Screen
g.clearDisplay(1)
string = ("{:<%d}" % lineLength).format("Mob.No:")
g.displayText(string,4,0,1)
string = "{:<10}".format(phoneNumber)
g.displayText(string,4,(7*fontWidth-1),1)
def num(phoneNumber = " "):
string = "{:<10}".format(phoneNumber)
g.displayText(string,4,(7*fontWidth-1),0)
string = "{:<10}".format(phoneNumber)
g.displayText(string,4,(7*fontWidth-1),1)
def state61():
global currentState
currentState = 61
#vendor Screen
g.clearDisplay(0)
string = ("{:^%d}" % lineLength).format("Mobile Number is")
g.displayText(string,3,0,0)
string = ("{:^%d}" % lineLength).format("Already Regd.")
g.displayText(string,4,0,0)
#User Screen
g.clearDisplay(1)
string = ("{:^%d}" % lineLength).format("No. Already Exists!")
g.displayText(string,3,0,1)
string = ("{:^%d}" % lineLength).format("Pls Try a New No.")
g.displayText(string,4,0,1)
def state100():
global currentState
currentState = 100
#vendor Screen
g.clearDisplay(0)
string = ("{:^%d}" % lineLength).format("Registering")
g.displayText(string,0,0,0)
string = ("{:^%d}" % lineLength).format("Waiting For 1st Finger")
g.displayText(string,3,0,0)
#User Screen
g.clearDisplay(1)
string = ("{:^%d}" % lineLength).format("Registering")
g.displayText(string,0,0,1)
string = ("{:^%d}" % lineLength).format("Place Ur 1st Finger")
g.displayText(string,3,0,1)
def state101():
global currentState
currentState = 101
#vendor Screen
g.clearDisplay(0)
string = ("{:^%d}" % lineLength).format("Registering")
g.displayText(string,0,0,0)
string = ("{:^%d}" % lineLength).format("Waiting for ")
g.displayText(string,3,0,0)
string = ("{:^%d}" % lineLength).format("the finger again")
g.displayText(string, 6, 0, 0)
#User Screen
g.clearDisplay(1)
string = ("{:^%d}" % lineLength).format("Registering")
g.displayText(string,0,0,1)
string = ("{:^%d}" % lineLength).format("Pls Remove ur Finger")
g.displayText(string,3,0,1)
string = ("{:^%d}" % lineLength).format("Place the same Finger")
g.displayText(string,5,0,1)
string = ("{:^%d}" % lineLength).format("Again")
g.displayText(string,6,0,1)
def state20():
global currentState
currentState = 20
#vendor Screen
g.clearDisplay(0)
string = ("{:^%d}" % lineLength).format("Scanning User's")
g.displayText(string,2,0,0)
string = ("{:^%d}" % lineLength).format("Finger...")
g.displayText(string,3,0,0)
#User Screen
g.clearDisplay(1)
string = ("{:^%d}" % lineLength).format("Scanning Your")
g.displayText(string,2,0,1)
string = ("{:^%d}" % lineLength).format("Finger...")
g.displayText(string,3,0,1)
def state30():
global currentState
currentState = 30
#vendor Screen
#User Screen
g.clearDisplay(1)
string = ("{:^%d}" % lineLength).format("Remove ur Finger")
g.displayText(string,2,0,1)
string = ("{:^%d}" % lineLength).format("And")
g.displayText(string,3,0,1)
string = ("{:^%d}" % lineLength).format("Place It Again")
g.displayText(string,4,0,1)
def state41():
global currentState
currentState = 41
#vendor Screen
g.clearDisplay(0)
string = ("{:^%d}" % lineLength).format("Finger Print")
g.displayText(string,3,0,0)
string = ("{:^%d}" % lineLength).format("Already Exists")
g.displayText(string,4,0,0)
#User Screen
g.clearDisplay(1)
string = ("{:^%d}" % lineLength).format("Sorry! :(")
g.displayText(string,2,0,1)
string = ("{:^%d}" % lineLength).format("Finger Print")
g.displayText(string,3,0,1)
string = ("{:^%d}" % lineLength).format("Already Exists")
g.displayText(string,4,0,1)
def state50():
global currentState
currentState = 50
#vendor Screen
g.clearDisplay(0)
string = ("{:^%d}" % lineLength).format("Processing...")
g.displayText(string,2,0,1)
string = ("{:^%d}" % lineLength).format("Please Wait")
g.displayText(string,3,0,1)
#User Screen
g.clearDisplay(1)
string = ("{:^%d}" % lineLength).format("Processing...")
g.displayText(string,2,0,1)
string = ("{:^%d}" % lineLength).format("Please Wait")
g.displayText(string,3,0,1)
def state60(accountBalance = " "):
global currentState
if currentState == 60:
s60num(accountBalance)
return
currentState = 60
#vendor Screen
# g.clearDisplay(0)
# string = ("{:^%d}" % lineLength).format("Deposit Amount")
# g.displayText(string,2,0,0)
# string = ("{:<%d}" % lineLength).format("Rs.")
# g.displayText(string,4,0,0)
# string = "{:<4}".format(accountBalance)
# g.displayText(string,4,(3*fontWidth-1),0)
#User Screen
g.clearDisplay(1)
string = ("{:^%d}" % lineLength).format("How Much do You Like")
g.displayText(string,3,0,1)
string = ("{:<%d}" % lineLength).format("To Deposit Rs.")
g.displayText(string,4,0,1)
string = "{:<4}".format(accountBalance)
g.displayText(string,4,(14*fontWidth-1),1)
def s60num(accountBalance):
# # vendor Screen
# string = "{:<4}".format(accountBalance)
# g.displayText(string, 4, (3 * fontWidth - 1), 0)
# User Screen
string = "{:<4}".format(accountBalance)
g.displayText(string, 4, (14 * fontWidth - 1), 1)
def state70(phoneNumber = "0000000000", accountBalance = "000.00"):
#vendor Screen
g.clearDisplay(0)
string = ("{:^%d}" % lineLength).format("Registration")
g.displayText(string,1,0,0)
string = ("{:^%d}" % lineLength).format("Successful")
g.displayText(string, 2, 0, 0)
string = ("{:<%d}" % lineLength).format("Mob.No:")
g.displayText(string,4,0,0)
g.displayText(phoneNumber,4,(7*fontWidth-1),0)
string = ("{:<%d}" % lineLength).format("Balance:Rs.")
g.displayText(string,5,0,0)
g.displayText(accountBalance,5,(11*fontWidth-1),0)
#User Screen
g.clearDisplay(1)
string = ("{:^%d}" % lineLength).format("Successfully Regd.")
g.displayText(string,0,0,1)
string = ("{:<%d}" % lineLength).format("Mob.No:")
g.displayText(string,4,0,1)
g.displayText(phoneNumber,4,(7*fontWidth-1),1)
string = ("{:<%d}" % lineLength).format("Balance:Rs.")
g.displayText(string,5,0,1)
g.displayText(accountBalance,5,(11*fontWidth-1),1)
string = ("{:^%d}" % lineLength).format("THANK YOU!")
g.displayText(string,7,0,1)
| StarcoderdataPython |
26209 | <reponame>wangjing1215/COM-DEV
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'command_list.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(531, 473)
Dialog.setStyleSheet("QDialog {\n"
" background-color:#ddedff;\n"
"}\n"
"QTextEdit {\n"
" border-width: 1px;\n"
" border-style: solid;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QPlainTextEdit {\n"
" border-width: 1px;\n"
" border-style: solid;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QToolButton {\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(255,255,255);\n"
"}\n"
"QToolButton:hover{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(197, 197, 197), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(197, 197, 197));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(255,255,255);\n"
"}\n"
"QToolButton:pressed{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(142,142,142);\n"
"}\n"
"QPushButton{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(255,255,255);\n"
"}\n"
"QPushButton::default{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(255,255,255);\n"
"}\n"
"QPushButton:hover{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(197, 197, 197), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(197, 197, 197));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(255,255,255);\n"
"}\n"
"QPushButton:pressed{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(142,142,142);\n"
"}\n"
"QPushButton:disabled{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: #808086;\n"
" padding: 2px;\n"
" background-color: rgb(142,142,142);\n"
"}\n"
"QLineEdit {\n"
" border-width: 1px; border-radius: 4px;\n"
" border-style: solid;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QLabel {\n"
" color: #000000;\n"
"}\n"
"QLCDNumber {\n"
" color: rgb(0, 113, 255, 255);\n"
"}\n"
"QProgressBar {\n"
" text-align: center;\n"
" color: rgb(240, 240, 240);\n"
" border-width: 1px; \n"
" border-radius: 10px;\n"
" border-color: rgb(230, 230, 230);\n"
" border-style: solid;\n"
" background-color:rgb(207,207,207);\n"
"}\n"
"QProgressBar::chunk {\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n"
" border-radius: 10px;\n"
"}\n"
"QMenuBar {\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(207, 209, 207, 255), stop:1 rgba(230, 229, 230, 255));\n"
"}\n"
"QMenuBar::item {\n"
" color: #000000;\n"
" spacing: 3px;\n"
" padding: 1px 4px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(207, 209, 207, 255), stop:1 rgba(230, 229, 230, 255));\n"
"}\n"
"\n"
"QMenuBar::item:selected {\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" color: #FFFFFF;\n"
"}\n"
"QMenu::item:selected {\n"
" border-style: solid;\n"
" border-top-color: transparent;\n"
" border-right-color: transparent;\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" border-bottom-color: transparent;\n"
" border-left-width: 2px;\n"
" color: #000000;\n"
" padding-left:15px;\n"
" padding-top:4px;\n"
" padding-bottom:4px;\n"
" padding-right:7px;\n"
"}\n"
"QMenu::item {\n"
" border-style: solid;\n"
" border-top-color: transparent;\n"
" border-right-color: transparent;\n"
" border-left-color: transparent;\n"
" border-bottom-color: transparent;\n"
" border-bottom-width: 1px;\n"
" color: #000000;\n"
" padding-left:17px;\n"
" padding-top:4px;\n"
" padding-bottom:4px;\n"
" padding-right:7px;\n"
"}\n"
"QTabWidget {\n"
" color:rgb(0,0,0);\n"
" background-color:#000000;\n"
"}\n"
"QTabWidget::pane {\n"
" border-color: rgb(223,223,223);\n"
" background-color:rgb(226,226,226);\n"
" border-style: solid;\n"
" border-width: 2px;\n"
" border-radius: 6px;\n"
"}\n"
"QTabBar::tab:first {\n"
" border-style: solid;\n"
" border-left-width:1px;\n"
" border-right-width:0px;\n"
" border-top-width:1px;\n"
" border-bottom-width:1px;\n"
" border-top-color: rgb(209,209,209);\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-bottom-color: rgb(229,229,229);\n"
" border-top-left-radius: 4px;\n"
" border-bottom-left-radius: 4px;\n"
" color: #000000;\n"
" padding: 3px;\n"
" margin-left:0px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(247, 247, 247, 255), stop:1 rgba(255, 255, 255, 255));\n"
"}\n"
"QTabBar::tab:last {\n"
" border-style: solid;\n"
" border-width:1px;\n"
" border-top-color: rgb(209,209,209);\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-right-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-bottom-color: rgb(229,229,229);\n"
" border-top-right-radius: 4px;\n"
" border-bottom-right-radius: 4px;\n"
" color: #000000;\n"
" padding: 3px;\n"
" margin-left:0px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(247, 247, 247, 255), stop:1 rgba(255, 255, 255, 255));\n"
"}\n"
"QTabBar::tab {\n"
" border-style: solid;\n"
" border-top-width:1px;\n"
" border-bottom-width:1px;\n"
" border-left-width:1px;\n"
" border-top-color: rgb(209,209,209);\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-bottom-color: rgb(229,229,229);\n"
" color: #000000;\n"
" padding: 3px;\n"
" margin-left:0px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(247, 247, 247, 255), stop:1 rgba(255, 255, 255, 255));\n"
"}\n"
"QTabBar::tab:selected, QTabBar::tab:last:selected, QTabBar::tab:hover {\n"
" border-style: solid;\n"
" border-left-width:1px;\n"
" border-right-color: transparent;\n"
" border-top-color: rgb(209,209,209);\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-bottom-color: rgb(229,229,229);\n"
" color: #FFFFFF;\n"
" padding: 3px;\n"
" margin-left:0px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"\n"
"QTabBar::tab:selected, QTabBar::tab:first:selected, QTabBar::tab:hover {\n"
" border-style: solid;\n"
" border-left-width:1px;\n"
" border-bottom-width:1px;\n"
" border-top-width:1px;\n"
" border-right-color: transparent;\n"
" border-top-color: rgb(209,209,209);\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-bottom-color: rgb(229,229,229);\n"
" color: #FFFFFF;\n"
" padding: 3px;\n"
" margin-left:0px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"\n"
"QCheckBox {\n"
" color: #000000;\n"
" padding: 2px;\n"
"}\n"
"QCheckBox:disabled {\n"
" color: #808086;\n"
" padding: 2px;\n"
"}\n"
"\n"
"QCheckBox:hover {\n"
" border-radius:4px;\n"
" border-style:solid;\n"
" padding-left: 1px;\n"
" padding-right: 1px;\n"
" padding-bottom: 1px;\n"
" padding-top: 1px;\n"
" border-width:1px;\n"
" border-color: transparent;\n"
"}\n"
"QCheckBox::indicator:checked {\n"
"\n"
" height: 10px;\n"
" width: 10px;\n"
" border-style:solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" color: #000000;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QCheckBox::indicator:unchecked {\n"
"\n"
" height: 10px;\n"
" width: 10px;\n"
" border-style:solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" color: #000000;\n"
"}\n"
"QRadioButton {\n"
" color: 000000;\n"
" padding: 1px;\n"
"}\n"
"QRadioButton::indicator:checked {\n"
" height: 10px;\n"
" width: 10px;\n"
" border-style:solid;\n"
" border-radius:5px;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" color: #a9b7c6;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QRadioButton::indicator:!checked {\n"
" height: 10px;\n"
" width: 10px;\n"
" border-style:solid;\n"
" border-radius:5px;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" color: #a9b7c6;\n"
" background-color: transparent;\n"
"}\n"
"QStatusBar {\n"
" color:#027f7f;\n"
"}\n"
"QSpinBox {\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QDoubleSpinBox {\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QTimeEdit {\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QDateTimeEdit {\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QDateEdit {\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"\n"
"QToolBox {\n"
" color: #a9b7c6;\n"
" background-color:#000000;\n"
"}\n"
"QToolBox::tab {\n"
" color: #a9b7c6;\n"
" background-color:#000000;\n"
"}\n"
"QToolBox::tab:selected {\n"
" color: #FFFFFF;\n"
" background-color:#000000;\n"
"}\n"
"QScrollArea {\n"
" color: #FFFFFF;\n"
" background-color:#000000;\n"
"}\n"
"QSlider::groove:horizontal {\n"
" height: 5px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n"
"}\n"
"QSlider::groove:vertical {\n"
" width: 5px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n"
"}\n"
"QSlider::handle:horizontal {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(207,207,207);\n"
" width: 12px;\n"
" margin: -5px 0;\n"
" border-radius: 7px;\n"
"}\n"
"QSlider::handle:vertical {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(207,207,207);\n"
" height: 12px;\n"
" margin: 0 -5px;\n"
" border-radius: 7px;\n"
"}\n"
"QSlider::add-page:horizontal {\n"
" background: rgb(181,181,181);\n"
"}\n"
"QSlider::add-page:vertical {\n"
" background: rgb(181,181,181);\n"
"}\n"
"QSlider::sub-page:horizontal {\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n"
"}\n"
"QSlider::sub-page:vertical {\n"
" background-color: qlineargradient(spread:pad, y1:0.5, x1:1, y2:0.5, x2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n"
"}\n"
"QScrollBar:horizontal {\n"
" max-height: 20px;\n"
" border: 1px transparent grey;\n"
" margin: 0px 20px 0px 20px;\n"
"}\n"
"QScrollBar:vertical {\n"
" max-width: 20px;\n"
" border: 1px transparent grey;\n"
" margin: 20px 0px 20px 0px;\n"
"}\n"
"QScrollBar::handle:horizontal {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(207,207,207);\n"
" border-radius: 7px;\n"
" min-width: 25px;\n"
"}\n"
"QScrollBar::handle:horizontal:hover {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(147, 200, 200);\n"
" border-radius: 7px;\n"
" min-width: 25px;\n"
"}\n"
"QScrollBar::handle:vertical {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(207,207,207);\n"
" border-radius: 7px;\n"
" min-height: 25px;\n"
"}\n"
"QScrollBar::handle:vertical:hover {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(147, 200, 200);\n"
" border-radius: 7px;\n"
" min-height: 25px;\n"
"}\n"
"QScrollBar::add-line:horizontal {\n"
" border: 2px transparent grey;\n"
" border-top-right-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" background: rgba(34, 142, 255, 255);\n"
" width: 20px;\n"
" subcontrol-position: right;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::add-line:horizontal:pressed {\n"
" border: 2px transparent grey;\n"
" border-top-right-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" background: rgb(181,181,181);\n"
" width: 20px;\n"
" subcontrol-position: right;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::add-line:vertical {\n"
" border: 2px transparent grey;\n"
" border-bottom-left-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" background: rgba(34, 142, 255, 255);\n"
" height: 20px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::add-line:vertical:pressed {\n"
" border: 2px transparent grey;\n"
" border-bottom-left-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" background: rgb(181,181,181);\n"
" height: 20px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:horizontal {\n"
" border: 2px transparent grey;\n"
" border-top-left-radius: 7px;\n"
" border-bottom-left-radius: 7px;\n"
" background: rgba(34, 142, 255, 255);\n"
" width: 20px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:horizontal:pressed {\n"
" border: 2px transparent grey;\n"
" border-top-left-radius: 7px;\n"
" border-bottom-left-radius: 7px;\n"
" background: rgb(181,181,181);\n"
" width: 20px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:vertical {\n"
" border: 2px transparent grey;\n"
" border-top-left-radius: 7px;\n"
" border-top-right-radius: 7px;\n"
" background: rgba(34, 142, 255, 255);\n"
" height: 20px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:vertical:pressed {\n"
" border: 2px transparent grey;\n"
" border-top-left-radius: 7px;\n"
" border-top-right-radius: 7px;\n"
" background: rgb(181,181,181);\n"
" height: 20px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::left-arrow:horizontal {\n"
" border: 1px transparent grey;\n"
" border-top-left-radius: 3px;\n"
" border-bottom-left-radius: 3px;\n"
" width: 6px;\n"
" height: 6px;\n"
" background: white;\n"
"}\n"
"QScrollBar::right-arrow:horizontal {\n"
" border: 1px transparent grey;\n"
" border-top-right-radius: 3px;\n"
" border-bottom-right-radius: 3px;\n"
" width: 6px;\n"
" height: 6px;\n"
" background: white;\n"
"}\n"
"QScrollBar::up-arrow:vertical {\n"
" border: 1px transparent grey;\n"
" border-top-left-radius: 3px;\n"
" border-top-right-radius: 3px;\n"
" width: 6px;\n"
" height: 6px;\n"
" background: white;\n"
"}\n"
"QScrollBar::down-arrow:vertical {\n"
" border: 1px transparent grey;\n"
" border-bottom-left-radius: 3px;\n"
" border-bottom-right-radius: 3px;\n"
" width: 6px;\n"
" height: 6px;\n"
" background: white;\n"
"}\n"
"QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {\n"
" background: none;\n"
"}\n"
"QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\n"
" background: none;\n"
"}")
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.comboBox = QtWidgets.QComboBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth())
self.comboBox.setSizePolicy(sizePolicy)
self.comboBox.setObjectName("comboBox")
self.horizontalLayout_2.addWidget(self.comboBox)
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.lineEdit = QtWidgets.QLineEdit(Dialog)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_2.addWidget(self.lineEdit)
self.pushButton_4 = QtWidgets.QPushButton(Dialog)
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout_2.addWidget(self.pushButton_4)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.listWidget = QtWidgets.QListWidget(Dialog)
self.listWidget.setObjectName("listWidget")
self.verticalLayout.addWidget(self.listWidget)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton_3 = QtWidgets.QPushButton(Dialog)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
spacerItem = QtWidgets.QSpacerItem(268, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "分类"))
self.label_2.setText(_translate("Dialog", "搜索"))
self.pushButton_4.setText(_translate("Dialog", "查找"))
self.pushButton_2.setText(_translate("Dialog", "导入"))
self.pushButton_3.setText(_translate("Dialog", "导出"))
self.pushButton.setText(_translate("Dialog", "添加"))
| StarcoderdataPython |
4813038 | # RTの基本データ。
from typing import Optional
from discord.ext import commands
class Colors:
normal = 0x0066ff
unknown = 0x80989b
error = 0xeb6ea5
player = 0x2ca9e1
queue = 0x007bbb
data = {
"prefixes": {
"test": [
"r2!", "R2!", "r2.", "R2.",
"りっちゃん2 ", "りっちゃん2 ", "r2>"
],
"production": [
"rt!", "Rt!", "RT!", "rt.", "Rt.",
"RT.", "りつ!", "りつ."
],
"sub": [
"rt#", "りつちゃん ", "りつたん ", "りつ ",
"りつちゃん ", "りつたん ", "りつ ", "Rt#", "RT#"
],
"alpha": ["r3!", "r3>"]
},
"colors": {name: getattr(Colors, name) for name in dir(Colors)},
"admins": [
634763612535390209, 266988527915368448,
667319675176091659, 693025129806037003
]
}
RTCHAN_COLORS = {
"normal": 0xa6a5c4,
"player": 0x84b9cb,
"queue": 0xeebbcb
}
def is_admin(user_id: Optional[int] = None):
"管理者かチェックをする関数です。"
def check(ctx):
if isinstance(user_id, int):
return user_id in data["admins"]
else:
return ctx.author.id in data["admins"]
if user_id is None:
return commands.check(check)
else:
return check(user_id)
PERMISSION_TEXTS = {
"administrator": "管理者",
"view_audit_log": "監査ログを表示",
"manage_guild": "サーバー管理",
"manage_roles": "ロールの管理",
"manage_channels": "チャンネルの管理",
"kick_members": "メンバーをキック",
"ban_members": "メンバーをBAN",
"create_instant_invite": "招待を作成",
"change_nickname": "ニックネームの変更",
"manage_nicknames": "ニックネームの管理",
"manage_emojis": "絵文字の管理",
"manage_webhooks": "ウェブフックの管理",
"manage_events": "イベントの管理",
"manage_threads": "スレッドの管理",
"use_slash_commands": "スラッシュコマンドの使用",
"view_guild_insights": "テキストチャンネルの閲覧&ボイスチャンネルの表示",
"send_messages": "メッセージを送信",
"send_tts_messages": "TTSメッセージを送信",
"manage_messages": "メッセージの管理",
"embed_links": "埋め込みリンク",
"attach_files": "ファイルを添付",
"read_message_history": "メッセージ履歴を読む",
"mention_everyone": "@everyone、@here、全てのロールにメンション",
"external_emojis": "外部の絵文字の使用",
"add_reactions": "リアクションの追加",
"connect": "接続",
"speak": "発言",
"stream": "動画",
"mute_members": "メンバーをミュート",
"deafen_members": "メンバーのスピーカーをミュート",
"move_members": "メンバーを移動",
"use_voice_activation": "音声検出を使用",
"priority_speaker": "優先スピーカー"
} | StarcoderdataPython |
11312395 | #!/usr/bin/python
# Written for Python version 2.7
# Hello World server in Python
# Binds REP socket to tcp://*:5555
# Expects b"Hello" from client, replies with b"World"
import time
import zmq
print 'pyzmq version: ', zmq.pyzmq_version()
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5555")
print "Server running."
while True:
message = socket.recv() #Wait for next request from client
print("Received request: %s" % message)
socket.send(b"World") # Send reply back to client
time.sleep(.1)
| StarcoderdataPython |
3589733 | <reponame>TristanGomez44/openseg.pytorch
import numpy as np
from PIL import Image
import glob
for path in glob.glob("results/*npy"):
npFile = np.load(path)
if len(npFile.shape) == 4:
npFile = npFile[0].transpose(1,2,0)
else:
if npFile.shape[0] == 1:
npFile = npFile[0]
if npFile.dtype != "uint8":
npFile = npFile.astype("uint8")
if npFile.shape[0] == 4:
for i in range(npFile.shape[0]):
print(npFile[i].shape,npFile[i].dtype)
im = Image.fromarray(npFile[i])
im.save(path.replace(".npy","_{}.png".format(i)))
else:
print(npFile.shape,npFile.dtype)
im = Image.fromarray(npFile)
im.save(path.replace("npy","png"))
| StarcoderdataPython |
6674878 | import numpy as np
if __name__ == '__main__':
dim = tuple(map(int, input().split()))
np.set_printoptions(sign=' ')
print(np.eye(dim[0], dim[1])) | StarcoderdataPython |
8050872 | from bson.objectid import ObjectId
import gridfs
import Controllers.Common as common
import Controllers.Constants as cons
from Controllers.FilesModel import Files
from Controllers.ECGModel import ECG
def connect_gridfs(db):
return gridfs.GridFS(db)
def save_ecg_property(ecg_col, ecg_property: ECG):
jsonecg_propertyStr = common.parse_json(ecg_property.__dict__)
output = ecg_col.insert_one(jsonecg_propertyStr)
if output:
print('ecg_id: ' + str(output))
return output.inserted_id
def save_ecg_file(db, file: Files, final_ecg_property: ECG):
file_data = open(file.file_path, "rb")
data = file_data.read()
fs = connect_gridfs(db)
result = fs.put(data=data, file_name=file.file_name)
output = fs.get(result)
file_id = output._id
db.fs.files.update(
{cons.ECG_ID_SHORT: file_id},
{cons.CONS_SET_STR: {
cons.ECG_ID: file.ecg_id,
cons.FILE_ECG_FILE_NAME_EXT: file.file_name_ext,
cons.ECG_CHANNEL: final_ecg_property.channel
}})
if file_id:
print('file_id: ' + str(file_id))
print('file_path: ' + file.file_path)
return file_id
def find_by_query(ecg_col, query_type, field_name, list_item):
query = {field_name: {query_type: list_item}}
return ecg_col.find(query)
def find(col):
return col.find()
def find_with_aggregate(my_col, query_data):
# query = [{
# "$match": {
# '_id':ObjectId('625aa279ced1267d2208e9e2')
# },
# "$lookup": {
# 'from': 'ecg', 'localField': 'source', 'foreignField': 'source', 'as': 'ecg'
# }
# }]
query = [
{
cons.CONS_QUERY_MATCH_QUERY: query_data[cons.CONS_QUERY_MATCH_QUERY]
},
{
cons.CONS_QUERY_LOOKUP_QUERY: query_data[cons.CONS_QUERY_LOOKUP_QUERY]
}]
return my_col.aggregate(query)
def retrieve_ecg_file(db, list_selected_ecg_id):
data = find_by_query(
db.fs.files, cons.CONS_QUERYIN_STR, cons.ECG_ID, list_selected_ecg_id)
fs = connect_gridfs(db)
files = []
for item in data:
files.append(Files(
file_name=item[cons.ECG_FILE_NAME],
file_name_ext=item[cons.FILE_ECG_FILE_NAME_EXT],
output_data=fs.get(item[cons.FILE_ID_SHORT]).read(),
ecg_id=item[cons.ECG_ID],
channel=item[cons.ECG_CHANNEL]))
return files
| StarcoderdataPython |
3261784 | # Author : <NAME>
# Project3 Differential Astar Turtlebot
import pygame
import numpy as np
import sys
import os
import math
from time import time
from pygame.locals import *
import argparse
# Colors
sky_blue = [135,206,235]
red = [255,0,0]
lime = [0,255,0]
white = [255,255,255]
# Robot Parameters
radius=0.076/2
l = 0.230
done = False
#-------------------------------------#
# Helper Functions #
#-------------------------------------#
def goal_achieved(node,goal,d):
c=0
if (node[0]-goal[0])**2+(node[1]-goal[1])**2<(d)**2:
c=1
return c
def movement(node,ul,ur,theta):
n_nd=[0,0]
x=node[0]
y=node[1]
for i in range(0,400):
d_theta = (radius/l)*(ur-ul)*0.005
dx = (radius/2)*(ul+ur)*(math.cos(theta))*0.005
dy = (radius/2)*(ul+ur)*(math.sin(theta))*0.005
x = x + dx
y = y + dy
theta = theta + d_theta
n_nd[0]=(n_nd[0]+x)
n_nd[1]=(n_nd[1]+y)
n_nd = [ ((math.floor(n_nd[0] * 100)) / 100.0), ((math.floor(n_nd[1] * 100)) / 100.0) ]
return n_nd,theta
def checkPoint(node, points):
flag = 0
for point in points:
if (((node[0] - point[0])**2 + (node[1] - point[1])**2) - 0.1**2 < 0):
return True
return False
# Creating Map with obstacles
def Map(x,y, resolution, d):
q = 0
if (x< (d/resolution)) or (x >(100-d)/resolution) or (y<(d/resolution)) or (y>(100-d)/resolution):
q= 1
# Center circle
if ((x-math.ceil(50/resolution))**2+(y-math.ceil(50/resolution))**2)<math.ceil((10+d)/resolution)**2:
q = 1
# Top right circle
if ((x-math.ceil(70/resolution))**2+(y-math.ceil(80/resolution))**2)<math.ceil((10+d)/resolution)**2:
q = 1
# Bottom right circle
if ((x-math.ceil(70/resolution))**2+(y-math.ceil(20/resolution))**2)<math.ceil((10+d)/resolution)**2:
q = 1
# Bottom left circle
if ((x-math.ceil(30/resolution))**2+(y-math.ceil(20/resolution))**2)<math.ceil((10+d)/resolution)**2:
q = 1
if ((22.5-d/resolution) <= x <= (37.5+d/resolution) and (72.5-d/resolution) <= y <= (87.5+d/resolution)):
q = 1
if ((2.5-d/resolution) <= x <= (15+d/resolution) and (42.5-d/resolution) <= y <= (57.5+d/resolution)):
q = 1
if ((82.5-d/resolution) <= x <= (97.5+d/resolution) and (42.5-d/resolution) <= y <= (57.5+d/resolution)):
q = 1
return q
# To display obstacles
def obstacle_disp_pts():
circle_pts1 = [50,50,10]
circle_pts2 = [70,20,10]
circle_pts3 = [70,80,10]
circle_pts4 = [30,80,10]
return circle_pts1,circle_pts2, circle_pts3, circle_pts4
# Heuristic
def heuristic(node,goal):
h = math.sqrt ( (node[0] - goal[0])**2 + (node[1] - goal[1])**2 )
return h
def main(Args):
print("Differential A-star Algorithm")
#Inputs
start = Args.start
goal = Args.goal
resolution = Args.resolution
rpm1 = Args.rpm1
rpm2 = Args.rpm2
clearance = Args.clearance
scale = Args.scale
pygame_flag = Args.pygame
# gazebo_flag = Args.gazebo
theta_i = Args.theta
rows = 100/resolution
coloums = 100/resolution
start = [10*m/resolution for m in start]
goal = [10*n/resolution for n in goal]
#-------------------------------------#
# Exploration of Robot #
#-------------------------------------#
point_node = [start]
c_nd = [start]
heuristic_node = [round(heuristic(start,goal),2)]
vp_nd=[]
vc_nd=[]
v_cst=[]
vh_nd=[]
v_theta=[]
v_act=[]
# Workspace defined
if (Map(goal[0],goal[1],resolution,radius+clearance) or Map(start[0],start[1],resolution,radius+clearance)):
sys.exit(" Error: goal point or start point lies within the obstacles")
if(start[0] not in range (0,100*scale+1) or goal[0] not in range (0,100*scale+1) or start[1] not in range(0,100*scale+1) or goal[1] not in range(0,100*scale+1)):
sys.exit("Error: Entered point outside the workspace")
print("Exploration Started")
start_time = time()
x=0
cst = [0]
ndx = start
flag = 0
exit = 0
count =0
theta = [theta_i]
act=[[0,0]]
start_time = time()
while(flag!=1):
nd, new_theta = movement(ndx,0,rpm1,theta[x])
if(Map(nd[0],nd[1],resolution,radius+clearance)!=1):
if not checkPoint(nd, vc_nd):
xl=range(0,len(c_nd))
xl = xl[::-1]
check = 0
for p in xl:
if (nd == c_nd[p]):
check= 1
if (cst[p]>=(cst[x]+0.9)):
point_node[p]=ndx
cst[p]=round((cst[x]+0.9),3)
heuristic_node[p] = round((cst[x] + 0.9 + heuristic(nd,goal)),2)
act[p] = [0,rpm1]
theta[p] = new_theta
break
if(check!=1):
point_node.append(ndx)
c_nd.append(nd)
theta.append(new_theta)
cst.append(round(0.9+cst[x],3))
heuristic_node.append(round((0.9+cst[x]+heuristic(nd,goal)),2))
act.append([0,rpm1])
nd, new_theta = movement(ndx,0,rpm2,theta[x])
if(Map(nd[0],nd[1],resolution,radius+clearance)!=1):
if not checkPoint(nd, vc_nd):
xl=range(0,len(c_nd))
xl = xl[::-1]
check = 0
for p in xl:
if (nd == c_nd[p]):
check= 1
if (cst[p]>=(cst[x]+0.8)):
point_node[p]=ndx
cst[p]=round((cst[x]+0.8),3)
heuristic_node[p] = round((cst[x] + 0.8 + heuristic(nd,goal)),2)
act[p] = [0,rpm2]
theta[p] = new_theta
break
if(check!=1):
point_node.append(ndx)
c_nd.append(nd)
theta.append(new_theta)
cst.append(round(0.8+cst[x],3))
heuristic_node.append(round((0.8+cst[x]+heuristic(nd,goal)),2))
act.append([0,rpm2])
nd, new_theta = movement(ndx,rpm1,rpm2,theta[x])
if(Map(nd[0],nd[1],resolution,radius+clearance)!=1):
if not checkPoint(nd, vc_nd):
xl=range(0,len(c_nd))
xl = xl[::-1]
check = 0
for p in xl:
if (nd == c_nd[p]):
check= 1
if (cst[p]>=(cst[x]+0.6)):
point_node[p]=ndx
cst[p]=round((cst[x]+0.6),3)
heuristic_node[p] = round((cst[x] + 0.6 + heuristic(nd,goal)),2)
act[p] = [rpm1,rpm2]
theta[p] = new_theta
break
if(check!=1):
point_node.append(ndx)
c_nd.append(nd)
theta.append(new_theta)
cst.append(round(0.6+cst[x],3))
heuristic_node.append(round((0.6+cst[x]+heuristic(nd,goal)),2))
act.append([rpm1,rpm2])
nd, new_theta = movement(ndx,rpm1,rpm1,theta[x])
if(Map(nd[0],nd[1],resolution,radius+clearance)!=1):
if not checkPoint(nd, vc_nd):
xl=range(0,len(c_nd))
xl = xl[::-1]
check = 0
for p in xl:
if (nd == c_nd[p]):
check= 1
if (cst[p]>=(cst[x]+0.7)):
point_node[p]=ndx
cst[p]=round((cst[x]+0.7),3)
heuristic_node[p] = round((cst[x] + 0.7 + heuristic(nd,goal)),2)
act[p] = [rpm1,rpm1]
theta[p] = new_theta
break
if(check!=1):
point_node.append(ndx)
c_nd.append(nd)
theta.append(new_theta)
cst.append(round(0.7+cst[x],3))
heuristic_node.append(round((0.7+cst[x]+heuristic(nd,goal)),2))
act.append([rpm1,rpm1])
nd, new_theta = movement(ndx,rpm2,rpm2,theta[x])
if(Map(nd[0],nd[1],resolution,radius+clearance)!=1):
if not checkPoint(nd, vc_nd):
xl=range(0,len(c_nd))
xl = xl[::-1]
check = 0
for p in xl:
if (nd == c_nd[p]):
check= 1
if (cst[p]>=(cst[x]+0.5)):
point_node[p]=ndx
cst[p]=round((cst[x]+0.5),3)
heuristic_node[p] = round((cst[x] + 0.5 + heuristic(nd,goal)),2)
act[p] = [rpm2,rpm2]
theta[p] = new_theta
break
if(check!=1):
point_node.append(ndx)
c_nd.append(nd)
theta.append(new_theta)
cst.append(round(0.5+cst[x],3))
heuristic_node.append(round((0.5+cst[x]+heuristic(nd,goal)),2))
act.append([rpm2,rpm2])
nd, new_theta = movement(ndx,rpm1,rpm1,theta[x])
if(Map(nd[0],nd[1],resolution,radius+clearance)!=1):
if not checkPoint(nd, vc_nd):
xl=range(0,len(c_nd))
xl = xl[::-1]
check = 0
for p in xl:
if (nd == c_nd[p]):
check= 1
if (cst[p]>=(cst[x]+0.6)):
point_node[p]=ndx
cst[p]=round((cst[x]+0.6),3)
heuristic_node[p] = round((cst[x] + 0.6 + heuristic(nd,goal)),2)
act[p] = [rpm1,rpm1]
theta[p] = new_theta
break
if(check!=1):
point_node.append(ndx)
c_nd.append(nd)
theta.append(new_theta)
cst.append(round(0.6+cst[x],3))
heuristic_node.append(round((0.6+cst[x]+heuristic(nd,goal)),2))
act.append([rpm1,rpm1])
nd, new_theta = movement(ndx,rpm2,0,theta[x])
if(Map(nd[0],nd[1],resolution,radius+clearance)!=1):
if not checkPoint(nd, vc_nd):
xl=range(0,len(c_nd))
xl = xl[::-1]
check = 0
for p in xl:
if (nd == c_nd[p]):
check= 1
if (cst[p]>=(cst[x]+0.8)):
point_node[p]=ndx
cst[p]=round((cst[x]+0.8),3)
heuristic_node[p] = round((cst[x] + 0.8 + heuristic(nd,goal)),2)
act[p] = [rpm2,0]
theta[p] = new_theta
break
if(check!=1):
point_node.append(ndx)
c_nd.append(nd)
theta.append(new_theta)
cst.append(round(0.8+cst[x],3))
heuristic_node.append(round((0.8+cst[x]+heuristic(nd,goal)),2))
act.append([rpm2,0])
nd, new_theta = movement(ndx,rpm1,0,theta[x])
if(Map(nd[0],nd[1],resolution,radius+clearance)!=1):
if not checkPoint(nd, vc_nd):
xl=range(0,len(c_nd))
xl = xl[::-1]
check = 0
for p in xl:
if (nd == c_nd[p]):
check= 1
if (cst[p]>=(cst[x]+0.9)):
point_node[p]=ndx
cst[p]=round((cst[x]+0.9),3)
heuristic_node[p] = round((cst[x] + 0.9 + heuristic(nd,goal)),2)
act[p] = [rpm1,0]
theta[p] = new_theta
break
if(check!=1):
point_node.append(ndx)
c_nd.append(nd)
theta.append(new_theta)
cst.append(round(0.9+cst[x],3))
heuristic_node.append(round((0.9+cst[x]+heuristic(nd,goal)),2))
act.append([rpm1,0])
vp_nd.append(point_node.pop(x))
vc_nd.append(c_nd.pop(x))
v_cst.append(cst.pop(x))
vh_nd.append(heuristic_node.pop(x))
v_theta.append(theta.pop(x))
v_act.append(act.pop(x))
if (goal_achieved(vc_nd[-1],goal,radius+clearance)==1):
flag=1
if (flag!=1 and c_nd!=[]):
x = heuristic_node.index(min(heuristic_node))
ndx = c_nd[x][:]
# To check the desired path
if(flag == 0 and c_nd == []):
sys.exit("Path not available")
seq=[v_act[-1]]
x=vp_nd[-1]
i=1
while(x!=start):
if(vc_nd[-i]==x):
seq.append(v_act[-i])
x=vp_nd[-i]
i=i+1
sequence=[]
sequence.append(vc_nd[-1])
sequence.append(vp_nd[-1])
x = vp_nd[-1]
i = 1
while(x!=start):
if (vc_nd[-i]==x):
sequence.append(vp_nd[-i])
x = vp_nd[-i]
i = i+1
my_list = np.array(vc_nd)
vc_nd = my_list*resolution
my_list_1 = np.array(sequence)
sequence = my_list_1*resolution
end_time = time()
# To calculate the solving time for the algorithm
print("Time taken {} seconds to solve".format(end_time-start_time))
if(pygame_flag):
print("Pygame Display Output")
pygame.init()
start_time = time()
#-------------Displaying Output----------#
# Size of the screen
size = [100*scale,100*scale]
screen = pygame.display.set_mode(size)
# Display Window
pygame.display.set_caption("Output")
clock = pygame.time.Clock()
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
screen.fill(sky_blue)
# To display obstacles
circle_pts1,circle_pts2, circle_pts3, circle_pts4 = obstacle_disp_pts()
pygame.draw.circle(screen, lime, (circle_pts1[0]*scale,circle_pts1[1]*scale), circle_pts1[2]*scale)
pygame.draw.circle(screen, lime, (circle_pts2[0]*scale,circle_pts2[1]*scale), circle_pts2[2]*scale)
pygame.draw.circle(screen, lime, (circle_pts3[0]*scale,circle_pts3[1]*scale), circle_pts3[2]*scale)
pygame.draw.circle(screen, lime, (circle_pts4[0]*scale,circle_pts4[1]*scale), circle_pts4[2]*scale)
pygame.draw.rect(screen,lime,[22.5*scale,12.5*scale,15*scale,15*scale])
pygame.draw.rect(screen,lime,[2.5*scale,42.5*scale,15*scale,15*scale])
pygame.draw.rect(screen,lime,[82.5*scale,42.5*scale,15*scale,15*scale])
# pygame.display.update()
pygame.display.flip()
# To display explored nodes
for i in vc_nd:
pygame.event.get()
pygame.time.wait(1)
pygame.draw.rect(screen,white,[i[0]*scale,100*scale-i[1]*scale,resolution*scale,resolution*scale])
pygame.display.flip()
# # To display Optimal path
for j in sequence[::-1]:
pygame.time.wait(1)
pygame.draw.rect(screen,red,[j[0]*scale,100*scale-j[1]*scale,resolution*scale,resolution*scale])
pygame.display.flip()
pygame.display.flip()
pygame.time.wait(10000)
done = True
if(done):
pygame.quit()
if __name__ == "__main__":
Parser = argparse.ArgumentParser()
Parser.add_argument('--start', type=float, nargs="+", default= [1,3], help='Initial position, Default: (1,3)')
Parser.add_argument('--goal', type=float, nargs="+", default= [5,3], help='Goal position, Default: (5,3)')
Parser.add_argument('--scale',type = int,default=5,help="Display scale")
Parser.add_argument('--pygame',default='True',help="Flag to show Pygame Simulation")
# Parser.add_argument('--gazebo',default='False',help="Flag to show Gazebo simulation")
Parser.add_argument('--clearance',type = float,default=1,help="Clearance to maintain from obstacle(in meter")
Parser.add_argument('--resolution',type = float,default=1,help="resolution of the map")
Parser.add_argument('--rpm1',type = int,default=5,help="rpm1")
Parser.add_argument('--rpm2',type = int,default=10,help="rpm2")
Parser.add_argument('--theta',type = int,default=0,help="theta")
Args = Parser.parse_args()
main(Args)
| StarcoderdataPython |
1933078 | <filename>flow_apps/base/river/core/classworkflowobject.py
from django.contrib.contenttypes.models import ContentType
from river.driver.mssql_driver import MsSqlDriver
from river.driver.orm_driver import OrmDriver
from river.models import State, TransitionApprovalMeta, Workflow, app_config, TransitionMeta
class ClassWorkflowObject(object):
def __init__(self, wokflow_object_class, field_name):
self.wokflow_object_class = wokflow_object_class
self.field_name = field_name
self.workflow = Workflow.objects.filter(field_name=self.field_name, content_type=self._content_type).first()
self._cached_river_driver = None
@property
def _river_driver(self):
if self._cached_river_driver:
return self._cached_river_driver
else:
if app_config.IS_MSSQL:
self._cached_river_driver = MsSqlDriver(self.workflow, self.wokflow_object_class, self.field_name)
else:
self._cached_river_driver = OrmDriver(self.workflow, self.wokflow_object_class, self.field_name)
return self._cached_river_driver
def get_on_approval_objects(self, as_user):
approvals = self.get_available_approvals(as_user)
object_ids = list(approvals.values_list('object_id', flat=True))
return self.wokflow_object_class.objects.filter(pk__in=object_ids)
def get_available_approvals(self, as_user):
return self._river_driver.get_available_approvals(as_user)
@property
def initial_state(self):
workflow = Workflow.objects.filter(content_type=self._content_type, field_name=self.field_name).first()
return workflow.initial_state if workflow else None
@property
def final_states(self):
all_states = TransitionMeta.objects.filter(workflow=self.workflow).values_list("source_state", "destination_state")
source_states = set([states[0] for states in all_states])
destination_states = set([states[1] for states in all_states])
final_states = destination_states - source_states
return State.objects.filter(pk__in=final_states)
@property
def _content_type(self):
return ContentType.objects.get_for_model(self.wokflow_object_class)
| StarcoderdataPython |
337074 | import base64
def encrypt(private):
public = base64.urlsafe_b64encode(private.encode()).decode()
return public
def decrypt(public):
private = base64.urlsafe_b64decode(public.encode()).decode()
return private | StarcoderdataPython |
8061663 | # Generated by Django 3.2.10 on 2022-01-01 19:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0020_OrderAuthorMigrationFix'),
]
operations = [
migrations.AddConstraint(
model_name='order',
constraint=models.CheckConstraint(check=models.Q(models.Q(models.Q(('bundle__isnull', True), ('course__isnull', False), ('record__isnull', True)), models.Q(('bundle__isnull', True), ('course__isnull', True), ('record__isnull', False)), models.Q(('bundle__isnull', False), ('course__isnull', True), ('record__isnull', True)), models.Q(('bundle__isnull', True), ('course__isnull', True), ('record__isnull', True)), _connector='OR')), name='only_one_or_zero_item_type_is_allowed'),
),
]
| StarcoderdataPython |
3593955 | def displayHero():
print("I am hero") | StarcoderdataPython |
9629115 | from Recognizers_POMDP_Antie_Sim import *
class PomdpAntiePeriphSimRecognizer(PomdpAntieSimRecognizer):
def recNeedTurn(cls,desc,viewCache):
return (len(viewCache)<4 and
False
)
recNeedTurn = classmethod(recNeedTurn)
| StarcoderdataPython |
1866394 | <reponame>juntaoy/dali-md<filename>nn_md.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import json
import threading
import numpy as np
import tensorflow as tf
import util
import md_ops
class NNMD(object):
def __init__(self, config):
self.config = config
input_props = self.add_model_specific_valuables(config)
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.predictions, self.loss = self.get_predictions_and_loss(self.input_tensors)
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.reset_global_step = tf.assign(self.global_step, 0)
learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step,
self.config["decay_frequency"], self.config["decay_rate"],
staircase=True)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"])
optimizers = {
"adam": tf.train.AdamOptimizer,
"sgd": tf.train.GradientDescentOptimizer
}
optimizer = optimizers[self.config["optimizer"]](learning_rate)
self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step)
def add_model_specific_valuables(self,config):
raise NotImplementedError
def tensorize_example(self, example, is_training):
raise NotImplementedError
def get_predictions_and_loss(self, inputs):
raise NotImplementedError
def get_top_mentions(self, num_words,candidate_starts, candidate_ends,candidate_mention_scores):
if self.config['mention_selection_method'] == 'high_f1':
k = num_words #will be filtered later
else: #default is high_recall
k = tf.to_int32(tf.floor(tf.to_float(num_words) * self.config["top_span_ratio"]))
top_span_indices = md_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
if self.config['mention_selection_method'] == 'high_f1':
sigmoid_span_mention_scores = tf.nn.sigmoid(top_span_mention_scores)
threshold_mask = tf.greater_equal(sigmoid_span_mention_scores,self.config['mention_selection_threshold'])
top_span_starts = tf.boolean_mask(top_span_starts,threshold_mask)
top_span_ends = tf.boolean_mask(top_span_ends,threshold_mask)
return top_span_starts,top_span_ends
def start_enqueue_thread(self, session,cross_validate=-1,nfold=1):
with open(self.config["train_path"]) as f:
if cross_validate <0:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
else:
train_examples = [json.loads(jsonline) for i,jsonline in enumerate(f.readlines()) if i%nfold != cross_validate]
def _enqueue_loop():
while True:
random.shuffle(train_examples)
for example in train_examples:
tensorized_example = self.tensorize_example(example, is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def tensorize_mentions(self, mentions):
starts,ends = [],[]
if len(mentions) > 0:
for m in mentions:
starts.append(m[0])
ends.append(m[1])
return np.array(starts), np.array(ends)
def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends):
same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates]
same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates]
candidate_labels = tf.reduce_any(same_span,axis=0) #[num_candidates]
return candidate_labels
def get_dropout(self, dropout_rate, is_training):
return 1 - (tf.to_float(is_training) * dropout_rate)
def sigmoid_loss(self, span_scores, span_labels):
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.to_float(span_labels),logits=span_scores)
loss = tf.reduce_sum(loss)
return loss
def get_mention_scores(self, span_emb,dropout):
with tf.variable_scope("mention_scores"):
return util.ffnn(span_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, dropout) # [k, 1]
def lstm_contextualize(self, text_emb, text_len, text_len_mask,lstm_dropout,flatten_emb=True):
num_sentences = tf.shape(text_emb)[0]
current_inputs = text_emb # [num_sentences, max_sentence_length, emb]
for layer in range(self.config["contextualization_layers"]):
with tf.variable_scope("layer_{}".format(layer)):
with tf.variable_scope("fw_cell"):
cell_fw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, lstm_dropout)
with tf.variable_scope("bw_cell"):
cell_bw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, lstm_dropout)
state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))
(fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=current_inputs,
sequence_length=text_len,
initial_state_fw=state_fw,
initial_state_bw=state_bw)
text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb]
text_outputs = tf.nn.dropout(text_outputs, lstm_dropout)
if layer > 0:
highway_gates = tf.sigmoid(util.projection(text_outputs, util.shape(text_outputs, 2))) # [num_sentences, max_sentence_length, emb]
text_outputs = highway_gates * text_outputs + (1 - highway_gates) * current_inputs
current_inputs = text_outputs
if flatten_emb:
return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
else:
return text_outputs
def flatten_emb_by_sentence(self, emb, text_len_mask):
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def load_eval_data(self):
if self.eval_data is None:
def load_line(line):
example = json.loads(line)
return self.tensorize_example(example, is_training=False), example
with open(self.config["eval_path"]) as f:
self.eval_data = [load_line(l) for l in f.readlines()]
print("Loaded {} eval examples.".format(len(self.eval_data)))
def evaluate(self, session):
self.load_eval_data()
tp,fn,fp = 0,0,0
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
top_span_starts, top_span_ends = session.run(self.predictions, feed_dict=feed_dict)
gold_mentions = set([(m[0], m[1]) for cl in example["clusters"] for m in cl])
pred_mentions = set([(s,e) for s,e in zip(top_span_starts,top_span_ends)])
tp += len(gold_mentions & pred_mentions)
fn += len(gold_mentions - pred_mentions)
fp += len(pred_mentions - gold_mentions)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
m_r = float(tp)/(tp+fn)
m_p = float(tp)/(tp+fp)
m_f1 = 2.0*m_r*m_p/(m_r+m_p)
print("Mention F1: {:.2f}%".format(m_f1*100))
print("Mention recall: {:.2f}%".format(m_r*100))
print("Mention precision: {:.2f}%".format(m_p*100))
summary_dict = {}
summary_dict["Mention F1"] = m_f1
summary_dict["Mention recall"] = m_r
summary_dict["Mention precision"] = m_p
return util.make_summary(summary_dict), m_r | StarcoderdataPython |
1830475 | <filename>sopel_modules/SpiceBot/Server.py<gh_stars>1-10
# coding=utf8
from __future__ import unicode_literals, absolute_import, division, print_function
"""
This is the SpiceBot Server system.
"""
from .Config import config as botconfig
class BotServer():
"""This Logs all server values of relevance'"""
def __init__(self):
self.linenumber = 0
self.dict = {
"host_connect": botconfig.core.host,
"host": botconfig.core.host,
}
self.isupport = {
"NETWORK": botconfig.core.host,
"TARGMAX": {
"KICK": 1,
'NOTICE': 1,
'PRIVMSG': 1,
},
}
self.myinfo = {
"servername": botconfig.core.host,
"version": None,
"usermodes": [],
"channelmodes": []
}
def rpl_welcome(self, trigger):
self.dict["host"] = str(trigger.sender).lower()
def parse_reply_myinfo(self, trigger):
self.myinfo["servername"] = trigger.args[1]
self.myinfo["version"] = trigger.args[2]
self.myinfo["usermodes"] = list(trigger.args[3])
self.myinfo["channelmodes"] = list(trigger.args[4])
def parse_reply_isupport(self, trigger):
# check against 005_Bounce
if trigger.args[-1] != 'are supported by this server':
return
parameters = trigger.args[1:-1]
for param in parameters:
# check for value associated with the parameter
if '=' not in param:
self.isupport[str(param)] = None
else:
key, raw_value = param.split('=')
if ',' not in raw_value:
if str(raw_value).isdigit():
setting_value = int(raw_value)
self.isupport[str(key)] = raw_value
else:
if str(key) not in list(self.isupport.keys()):
self.isupport[str(key)] = {}
if not isinstance(self.isupport[str(key)], dict):
self.isupport[str(key)] = {}
settings = str(raw_value).split(',')
for setting in settings:
if ":" not in setting:
self.isupport[str(key)][str(setting)] = None
else:
setting_name, setting_value = setting.split(":")
try:
setting_value = str(setting).split(':')[1] or None
except IndexError:
setting_value = None
if str(setting_value).isdigit():
setting_value = int(setting_value)
self.isupport[str(key)][str(setting_name)] = setting_value
server = BotServer()
| StarcoderdataPython |
4815356 | from google.cloud import secretmanager
from google.api_core.exceptions import NotFound
import structlog
logger = structlog.get_logger()
def get_secret_list(project_id, secret_id) -> list:
"""
Secrets are managed by Google Secret Manager.
This method returns a list of the currently enabled versions
of the secret with name "secret_id" in the given project.
"""
logger.info(f"Checking for versions of {secret_id}")
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the secret version.
parent = client.secret_path(project_id, secret_id)
secrets = []
try:
for version in client.list_secret_versions(request={"parent": parent}):
if version.state.name == "ENABLED":
logger.info(f"Getting secret { version.name} from Secret Manager")
response = client.access_secret_version(request={"name": version.name})
secrets.append(response.payload.data.decode("UTF-8"))
except NotFound as e:
logger.exception("Secret not found", secret_id=secret_id, error=str(e))
if len(secrets) < 1:
logger.error(f"No enabled versions of {secret_id}")
return secrets
| StarcoderdataPython |
155502 | <reponame>egonrian/google-research
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Outputs the overall validation accuracy on the 2016 and 2018 validation sets.
Also outputs accuracy on train set and train-style valid set.
"""
import collections
import csv
import os
import re
import time
from absl import app
from absl import flags
from absl import logging
import gin
import gin.tf
import models
import rocstories_sentence_embeddings
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
import utils
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('base_dir', '/tmp/model',
'Base directory containing checkpoints and .gin config.')
flags.DEFINE_string('checkpoint_name', None,
'Specific checkpoint to run one-time eval on. If set, '
'state of FLAGS.continuous is ignored.')
flags.DEFINE_string('output_dir', None,
'Directory in which to save evaluation results.')
flags.DEFINE_bool('continuous', False,
'If True, infintely loops over base_dir looking for new '
'checkpoints. If False, only loops once.')
flags.DEFINE_bool('sharded_eval', False,
'If True, break the dataset into shards and perform eval '
'separately on each. This is intended to be used to be able '
'to compute error bounds on accuracies.')
flags.DEFINE_float('timeout', 9600, 'If greater than 0, time out after this '
'many seconds.')
flags.DEFINE_string('data_dir', None, 'Where to look for TFDS datasets.')
tf.enable_v2_behavior()
METRICS_TO_SAVE = [
# Acc of predicting 5th sentence out of 2000 from valid set.
'valid_nolabel_acc',
# Acc of predicting 5th sentence out of 2000 from train set.
'train_subset_acc',
'valid_spring2016_acc', # Acc on 2016 Story Cloze task.
'valid_winter2018_acc', # Acc on 2018 Story Cloze task.
]
@gin.configurable('dataset')
def prepare_datasets(dataset_name=gin.REQUIRED,
shuffle_input_sentences=False,
num_eval_examples=2000,
batch_size=32):
"""Create batched, properly-formatted datasets from the TFDS datasets.
Args:
dataset_name: Name of TFDS dataset.
shuffle_input_sentences: Not used during evaluation, but arg still needed
for gin compatibility.
num_eval_examples: Number of examples to use during evaluation. For the
nolabel evaluation, this is also the number of distractors we choose
between.
batch_size: Batch size.
Returns:
A dictionary mapping from the dataset split to a Dataset object.
"""
del shuffle_input_sentences
splits_to_load = {
'valid_nolabel': 'train[:2%]',
'train_nolabel': 'train[2%:4%]',
'valid2018': rocstories_sentence_embeddings.VALIDATION_2018,
'valid2016': rocstories_sentence_embeddings.VALIDATION_2016}
datasets = tfds.load(
dataset_name,
data_dir=FLAGS.data_dir,
split=splits_to_load,
download=False)
emb_matrices = {}
valid_nolabel_ds = utils.build_train_style_dataset(
datasets['valid_nolabel'], batch_size, False,
num_examples=num_eval_examples, is_training=False)
datasets['valid_nolabel'], emb_matrices['valid_nolabel'] = valid_nolabel_ds
train_nolabel_ds = utils.build_train_style_dataset(
datasets['train_nolabel'], batch_size, False,
num_examples=num_eval_examples, is_training=False)
datasets['train_nolabel'], emb_matrices['train_nolabel'] = train_nolabel_ds
# Convert official evaluation datasets to validation data format. There are no
# embedding matrices involved here since the task has only two possible next
# sentences to pick between for each example. Ignore num_eval_examples and use
# the full datasets for these.
datasets['valid2018'] = utils.build_validation_dataset(
datasets['valid2018'])
datasets['valid2016'] = utils.build_validation_dataset(
datasets['valid2016'])
return datasets, emb_matrices
def eval_single_checkpoint(
ckpt_name, output_path, model, datasets, embedding_matrices):
"""Runs quantitative evaluation on a single checkpoint."""
if gfile.exists(output_path):
logging.info('Skipping already exists: "%s"', output_path)
return
metrics = model.create_metrics()
logging.info('Evaluating: "%s"', ckpt_name)
utils.do_evaluation(model, metrics, datasets, embedding_matrices)
# This code assumed the checkpoint name contains the epoch and step in the
# following format.
path_search = re.search(r'ep(\w+)_step(\w+)', ckpt_name)
epoch = int(path_search.group(1))
step = int(path_search.group(2))
to_write = collections.OrderedDict()
to_write['checkpoint'] = ckpt_name
to_write['epoch'] = epoch
to_write['step'] = step
for metric in metrics.values():
if metric.name in METRICS_TO_SAVE:
tf.summary.scalar(metric.name, metric.result(), step=step)
to_write[metric.name] = metric.result().numpy()
metric.reset_states()
# Save the results to a text file.
with gfile.GFile(output_path, 'w') as f:
writer = csv.DictWriter(f, fieldnames=to_write.keys())
writer.writeheader()
writer.writerow(to_write)
def do_eval(checkpoint_paths, eval_dir, datasets,
embedding_matrices, sharded_eval=False):
"""Runs quantitative eval for each checkpoint in list."""
num_input_sentences = tf.compat.v1.data.get_output_shapes(
datasets['valid2018'])[0][1]
embedding_dim = tf.compat.v1.data.get_output_shapes(
datasets['valid2018'])[0][2]
for checkpoint_path in sorted(checkpoint_paths):
checkpoint_name = os.path.splitext(os.path.basename(checkpoint_path))[0]
logging.info('Processing checkpoint %s', checkpoint_name)
model = models.build_model(
num_input_sentences=num_input_sentences,
embedding_dim=embedding_dim)
checkpoint = tf.train.Checkpoint(model=model)
result = checkpoint.restore(checkpoint_path).expect_partial()
result.assert_nontrivial_match()
if sharded_eval:
num_shards = 10
for i in range(num_shards):
sharded_datasets = {
name: ds.shard(num_shards, i) for name, ds in datasets.items()
}
output_path = os.path.join(
eval_dir, '%s_metrics_shard.%02d.csv' % (checkpoint_name, i))
eval_single_checkpoint(
checkpoint_name, output_path, model,
sharded_datasets, embedding_matrices)
else:
eval_path = os.path.join(eval_dir, '%s_metrics.csv' % checkpoint_name)
eval_single_checkpoint(
checkpoint_name, eval_path, model, datasets, embedding_matrices)
def create_single_results_file(eval_dir):
"""Merges quantitative result files for each checkpoint into a single file."""
header = ''
to_save = []
for fpath in gfile.glob(os.path.join(eval_dir, '*metrics*.csv')):
if 'all_metrics' not in fpath:
with gfile.GFile(fpath, 'r') as f:
header = next(f)
to_save.append(next(f))
if to_save:
merged_metrics_file_path = os.path.join(eval_dir, 'all_metrics.csv')
with gfile.GFile(merged_metrics_file_path, 'w') as f:
f.write(header)
for data_line in to_save:
f.write(data_line)
def run_eval():
"""Evaluate the ROCSTories next-sentence prediction model."""
base_dir = FLAGS.base_dir
if FLAGS.output_dir:
eval_dir = FLAGS.output_dir
else:
eval_dir = os.path.join(base_dir, 'eval')
gfile.makedirs(eval_dir)
datasets, embedding_matrices = prepare_datasets()
if FLAGS.checkpoint_name is not None:
logging.info('Evaluating single checkpoint: %s', FLAGS.checkpoint_name)
checkpoint_paths = [os.path.join(base_dir, FLAGS.checkpoint_name)]
do_eval(checkpoint_paths, eval_dir, datasets,
embedding_matrices, FLAGS.sharded_eval)
elif not FLAGS.continuous:
logging.info('Evaluating all checkpoints currently in %s', base_dir)
checkpoint_paths = gfile.glob(os.path.join(base_dir, '*ckpt*.index'))
checkpoint_paths = [p.replace('.index', '') for p in checkpoint_paths]
do_eval(checkpoint_paths, eval_dir, datasets,
embedding_matrices, FLAGS.sharded_eval)
create_single_results_file(eval_dir)
else:
logging.info('Continuous evaluation in %s', base_dir)
checkpoint_iter = tf.train.checkpoints_iterator(
base_dir, timeout=FLAGS.timeout)
summary_writer = tf.summary.create_file_writer(
os.path.join(base_dir, 'summaries_eval'))
with summary_writer.as_default():
for checkpoint_path in checkpoint_iter:
do_eval([checkpoint_path], eval_dir, datasets,
embedding_matrices, FLAGS.sharded_eval)
# Save a file with the results from all the checkpoints
create_single_results_file(eval_dir)
logging.info('Results written to %s', eval_dir)
def main(argv):
del argv
# Load gin.config settings stored in model directory. It is possible to run
# this script concurrently with the train script. In this case, wait for the
# train script to start up and actually write out a gin config file.
# Wait 10 minutes (periodically checking for file existence) before giving up.
gin_config_path = os.path.join(FLAGS.base_dir, 'config.gin')
num_tries = 0
while not gfile.exists(gin_config_path):
num_tries += 1
if num_tries >= 10:
raise ValueError('Could not find config.gin in "%s"' % FLAGS.base_dir)
time.sleep(60)
gin.parse_config_file(gin_config_path, skip_unknown=True)
gin.finalize()
run_eval()
if __name__ == '__main__':
app.run(main)
| StarcoderdataPython |
6436787 | # coding=utf-8
import requests
import time,datetime
#import json
#import smtplib
#import hashlib
#import pymysql
#from datetime import datetime
import pandas as pd
N =5
keys = ['牛奶','床',]
#获得即时数据
def get_real_time_data():
c_time = int(time.time())
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Host': 'www.smzdm.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
url = 'https://www.smzdm.com/homepage/json_more?timesort=' + str(c_time) + '&p=1'
r = requests.get(url=url, headers=headers)
# data = r.text.encode('utf-8').decode('unicode_escape')
data = r.text
dataa = json.loads(data)
dataa = dataa['data']
data = pd.DataFrame(dataa)
data = data[['article_id','article_title','article_price','article_date','article_link']]
data['time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#存入文件
file = data.to_csv('D:/data_1/smzdm.csv',mode='a')
#return data
get_real_time_data()
file_load = pd.read_csv('D:/data_1/smzdm.csv')
#去重
file_load=file_load.drop_duplicates(subset='article_id', keep='first', inplace=False)
#改type
file_load['time'] = pd.to_datetime(file_load['time'], errors ='coerce')
#比较时间
N =5
file_load = file_load[file_load['time'] + datetime.timedelta(days=N) >datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')]
#查找key
result_titles=[]
for key in keys:
for title in file_load['article_title']:
if type(title) is str:
if title.find(key)!= -1:
result_titles.append(title)
#从result_titles中对比之前数据
result_title = pd.DataFrame(result_titles,columns= ['article_title'])
#整合
result =result_title.join(file_load.set_index('article_title'),on='article_title',how='left')
#存入文件
result = result.drop_duplicates(subset='article_id', keep='first', inplace=False)
result = result[['article_title','article_id','article_price','article_date','article_link','time']]
result.to_csv('D:/data_1/smzdm.csv')
fila = pd.read_csv('D:/data_1/smzdm.csv' )
print(fila)
#每5分钟执行一次
for n in range(24):
for n in range(20):
print('开始运行程序')
get_real_time_data()
print('结束程序')
time.sleep(300)
fila = pd.read_csv('D:/data_1/smzdm.csv' )
print(fila)
| StarcoderdataPython |
4848678 | # Copyright (c) 2005-2006 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
import unittest
from Products.AlphaFlow.tests.AlphaFlowTestCase import AlphaFlowTestCase
from Products.AlphaFlow.interfaces import \
IProcess, IProcessVersion
from Products.AlphaFlow.process import Process, ProcessVersion
class ProcessTest(AlphaFlowTestCase):
interfaces_to_test = [(IProcess, Process),
(IProcessVersion, ProcessVersion),
]
def test_process_revert(self):
self.portal['foo'] = Process('foo')
# Acquisition wrapping
process = self.portal['foo']
self.assertRaises(Exception, process.revert)
base_version = process.editable(ProcessVersion())
self.assertRaises(Exception, process.revert)
process.update()
self.assertEquals(None, process.editable())
process.revert()
self.assertEquals(None, process.editable())
new_version = process.editable(base_version.getId())
process.revert()
self.assertEquals(None, process.editable())
process.revert()
self.assertEquals(None, process.editable())
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ProcessTest))
return suite
| StarcoderdataPython |
4941254 | import os # to read environment variables and deal with directory stuff
import sys # for sys.exit only?
import argparse # for taking arguments
import httpx # to make http requests to JIRA and Slack
import json # for working with responses from Jira, etc.
import untangle # to pull out specific data from the workflow xmls
import parsedatetime # for parsing relative and human readable time descriptions
from git import Repo,Git # for git operations
from shutil import rmtree # for deleting work dir
from urllib.parse import quote_plus # converting workflow names to URL properly
from datetime import datetime,timedelta # working with dates and comparing them
from dotenv import load_dotenv # to set environment variables, mainly
load_dotenv()
parser = argparse.ArgumentParser(description="Script to capture changes to workflows in Jira")
parser.add_argument('--firstrun',help="Gets ALL workflows and attempts to make initial commit",action='store_true')
parser.add_argument('--nocleanup',help="prevents deleting work dir after run",action='store_true')
args = parser.parse_args()
git_ssh_command = 'ssh -i %s' % os.getenv("gitkeypath")
def setupWorkdir():
if os.path.isdir("./work"):
rmtree("./work")
if (args.firstrun):
os.makedirs("work")
repo = Repo.init("./work",env={"GIT_SSH_COMMAND":git_ssh_command})
repo.git.checkout('-b',os.getenv("gitbranch"))
else:
repo = Repo.clone_from(os.getenv("gitremote"),"./work",env={"GIT_SSH_COMMAND":git_ssh_command})
repo.git.checkout(os.getenv("gitbranch"))
return repo
def commitChanges(repo):
repo.git.add(".")
if (args.firstrun):
repo.index.commit("initial commit")
origin = repo.create_remote('origin',os.getenv("gitremote"))
repo.create_head(os.getenv("gitbranch"))
origin.push(os.getenv("gitbranch"),env={"GIT_SSH_COMMAND":git_ssh_command})
elif (len(repo.index.diff("HEAD"))>0): # there exists changes
changedFiles = repo.git.diff("HEAD", name_only=True).splitlines()
commitMessage = "Updated Workflow Count: "+str(len(changedFiles))+"\n\n"
for f in changedFiles:
o = untangle.parse("./work/"+f)
commitMessage += '"'+f[:-4]+'"'+" by "+o.workflow.meta[1].cdata+"\n"
repo.index.commit(commitMessage)
origin = repo.remote('origin')
origin.push()
def cleanup():
if (args.nocleanup):
return
rmtree("./work")
def getWorkflows():
jira_url_base = os.getenv("jirabaseurl")
jira_auth_user = os.getenv("jirauser")
jira_auth_password = os.getenv("<PASSWORD>")
workflows=[]
with httpx.Client(auth=(jira_auth_user,jira_auth_password)) as jiraclient:
workflowList = jiraclient.get(jira_url_base + 'rest/api/2/workflow',timeout=10.0)
workflows = json.loads(workflowList.text)
updated_workflows = []
current_time = datetime.now()
timeparser = parsedatetime.Calendar()
# this is super gross, and is largely avoided if you use the jira config property to disable relativized dates
for wf in workflows:
modified_datetime = datetime.now()
if (args.firstrun):
updated_workflows.append(wf['name'])
continue
elif (wf['default']):
# there is only one default workflow, and it is read only
continue
elif ("lastModifiedDate" not in wf):
# going to skip workflows that have never been modified
continue
elif ("now" in wf['lastModifiedDate']): # the "Just now" case
modified_datetime = datetime.now()-timedelta(minutes=1)
elif ("ago" in wf['lastModifiedDate']): # the "X minutes ago" and "Y hours ago" cases
if ("minute" in wf['lastModifiedDate']):
timestruct,status = timeparser.parse(wf['lastModifiedDate'])
modified_datetime = datetime(*timestruct[:6])
elif ("hour" in wf['lastModifiedDate']):
timestruct,status = timeparser.parse(wf['lastModifiedDate'])
modified_datetime = datetime(*timestruct[:6])
elif ("Yesterday" in wf['lastModifiedDate']): # Yesterday H:MM PM case
timestruct,status = timeparser.parse(wf['lastModifiedDate'])
modified_datetime = datetime(*timestruct[:6])
else:
modified_datetime = datetime.strptime(wf['lastModifiedDate'], '%d/%b/%y %I:%M %p')
delta = current_time - modified_datetime
if (delta.days < 1):
updated_workflows.append(wf['name'])
jiraheaders = {"X-Atlassian-Token" : "<PASSWORD>"}
with httpx.Client(auth=(jira_auth_user,jira_auth_password),headers=jiraheaders) as jirasudoclient:
myself = jirasudoclient.get(jira_url_base + 'rest/api/2/myself')
websudo_headers = {"content-type":"application/x-www-form-urlencoded"}
for wf in updated_workflows:
websudo_data = {'webSudoPassword':<PASSWORD>,'webSudoDestination':"/secure/admin/workflows/ViewWorkflowXml.jspa?workflowMode=live&workflowName="+quote_plus(wf)}
websudo = jirasudoclient.post(jira_url_base + 'secure/admin/WebSudoAuthenticate.jspa',headers=websudo_headers,data=websudo_data)
with open("./work/"+wf.replace("/","_")+".xml",'w') as file:
file.write(websudo.text)
if __name__ == '__main__':
try:
repo = setupWorkdir()
getWorkflows()
commitChanges(repo)
cleanup()
except KeyboardInterrupt:
print('keyboard interrupt')
| StarcoderdataPython |
1886142 | import numpy as np
import sys
import time
sys.path.append('../build')
from _C_flare import SparseGP_DTC, DotProductKernel, B2_Calculator, \
StructureDescriptor
# Load training data.
NiTi_data = np.load('NiTi_AIMD.npz')
positions = NiTi_data['positions']
forces = NiTi_data['forces']
cells = NiTi_data['cells']
stresses = NiTi_data['stresses']
energies = NiTi_data['energies']
species = [0, 1, 1, 0] * 8 # Ti, Ni, Ni, Ti
# Set up descriptor.
cutoff = 5.0
sigma = 2.0
power = 2
kernel = DotProductKernel(sigma, power, 0)
cutoff_function = "quadratic"
many_body_cutoffs = [cutoff]
radial_basis = "chebyshev"
radial_hyps = [0., cutoff]
cutoff_hyps = []
descriptor_settings = [2, 8, 3]
descriptor_calculator = \
B2_Calculator(radial_basis, cutoff_function, radial_hyps, cutoff_hyps,
descriptor_settings, 0)
# Make test structure.
test_frame = 3000
test_cell = cells[test_frame]
test_positions = positions[test_frame]
# Time descriptor calculation.
iterations = 100
store_times = np.zeros(iterations)
for n in range(iterations):
time1 = time.time()
test_structure = StructureDescriptor(
test_cell, species, test_positions, cutoff,
many_body_cutoffs, [descriptor_calculator]
)
time2 = time.time()
store_times[n] = time2 - time1
print('mean: %.4f ms' % (np.mean(store_times) * 1e3))
print('std: %.4f ms' % (np.std(store_times) * 1e3))
# 8/29/20 (c741e3e3464b6f0dc3a4337e4e2c7b25574ed822)
# Laptop: 18.9(6) ms
# Tempo: 14.4(9) ms
| StarcoderdataPython |
1977615 | import unittest
import psp0
class PSP0TestCase(unittest.TestCase):
def test_construir(self):
esperado = (1, None)
observado = psp0.construir(1)
self.assertEqual(esperado, observado)
esperado = (1.5, None)
observado = psp0.construir(1.5)
self.assertEqual(esperado, observado)
esperado = (1, (2, None))
observado = psp0.construir(1, psp0.construir(2))
self.assertEqual(esperado, observado)
self.assertRaises(ValueError, psp0.construir, "a")
def test_lista(self):
esperado = None
observado = psp0.lista()
self.assertEqual(esperado, observado)
esperado = (1, None)
observado = psp0.lista(1)
self.assertEqual(esperado, observado)
esperado = (1, (2, None))
observado = psp0.lista(1, 2)
self.assertEqual(esperado, observado)
def test_primero(self):
l = psp0.lista(1, 2, 3)
esperado = 1
observado = psp0.primero(l)
self.assertEqual(esperado, observado)
def test_resto(self):
l = psp0.lista(1, 2, 3)
esperado = psp0.lista(2, 3)
observado = psp0.resto(l)
self.assertEqual(esperado, observado)
def test_vacia(self):
l = psp0.lista()
esperado = True
observado = psp0.vacia(l)
self.assertEqual(esperado, observado)
l = psp0.lista(1)
esperado = False
observado = psp0.vacia(l)
self.assertEqual(esperado, observado)
def test_largo(self):
l = psp0.lista()
esperado = 0
observado = psp0.largo(l)
self.assertEqual(esperado, observado)
l = psp0.lista(1, 2, 3)
esperado = 3
observado = psp0.largo(l)
self.assertEqual(esperado, observado)
def test_sumar(self):
l = psp0.lista()
esperado = 0
observado = psp0.sumar(l)
self.assertEqual(esperado, observado)
l = psp0.lista(1, 2, 3)
esperado = 6
observado = psp0.sumar(l)
self.assertEqual(esperado, observado)
def test_mapear(self):
l = psp0.lista(1, 2, 3)
esperado = psp0.lista(1, 4, 9)
observado = psp0.mapear(lambda x: x * x, l)
self.assertEqual(esperado, observado)
esperado = psp0.lista(0, 1, 2)
observado = psp0.mapear(lambda x: x - 1, l)
self.assertEqual(esperado, observado)
def test_promedio(self):
l = psp0.lista()
esperado = 0
observado = psp0.promedio(l)
self.assertEqual(esperado, observado)
l = psp0.lista(5)
esperado = 5
observado = psp0.promedio(l)
self.assertEqual(esperado, observado)
l = psp0.lista(1, 1.5)
esperado = 1.25
observado = psp0.promedio(l)
self.assertEqual(esperado, observado)
l = psp0.lista(186, 699, 132, 272, 291, 331, 199, 1890, 788, 1601)
esperado = 638.9
observado = psp0.promedio(l)
self.assertEqual(esperado, observado)
l = psp0.lista(160, 591, 114, 229, 230, 270, 128, 1657, 624, 1503)
esperado = 550.6
observado = psp0.promedio(l)
self.assertEqual(esperado, observado)
l = psp0.lista(15.0, 69.9, 6.5, 22.4, 28.4, 65.9, 19.4, 198.7, 38.8, 138.2)
esperado = 60.31999999999999
observado = psp0.promedio(l)
self.assertEqual(esperado, observado)
def test_desviacion(self):
l = psp0.lista()
esperado = 0
observado = psp0.desviacion(l)
self.assertEqual(esperado, observado)
l = psp0.lista(5)
esperado = 0
observado = psp0.desviacion(l)
self.assertEqual(esperado, observado)
l = psp0.lista(186, 699, 132, 272, 291, 331, 199, 1890, 788, 1601)
esperado = 625.6339806770231
observado = psp0.desviacion(l)
self.assertEqual(esperado, observado)
l = psp0.lista(160, 591, 114, 229, 230, 270, 128, 1657, 624, 1503)
esperado = 572.0268447469149
observado = psp0.desviacion(l)
self.assertEqual(esperado, observado)
l = psp0.lista(15.0, 69.9, 6.5, 22.4, 28.4, 65.9, 19.4, 198.7, 38.8, 138.2)
esperado = 62.25583060601187
observado = psp0.desviacion(l)
self.assertEqual(esperado, observado)
| StarcoderdataPython |
6699016 | <reponame>django-doctor/lite-api<gh_stars>0
from django.utils import timezone
from rest_framework import serializers
from rest_framework.fields import CharField
from rest_framework.relations import PrimaryKeyRelatedField
from api.applications.enums import (
ApplicationExportType,
ApplicationExportLicenceOfficialType,
)
from api.applications.libraries.get_applications import get_application
from api.applications.models import BaseApplication, ApplicationDenialReason, ApplicationDocument
from api.applications.serializers.document import ApplicationDocumentSerializer
from api.cases.enums import CaseTypeSubTypeEnum
from api.cases.models import CaseType
from api.core.helpers import get_value_from_enum
from api.core.serializers import KeyValueChoiceField
from api.gov_users.serializers import GovUserSimpleSerializer
from lite_content.lite_api import strings
from api.organisations.models import Organisation, Site, ExternalLocation
from api.organisations.serializers import OrganisationDetailSerializer, ExternalLocationSerializer, SiteListSerializer
from api.parties.serializers import PartySerializer
from api.staticdata.denial_reasons.models import DenialReason
from api.staticdata.statuses.enums import CaseStatusEnum
from api.staticdata.statuses.libraries.get_case_status import (
get_status_value_from_case_status_enum,
get_case_status_by_status,
)
from api.staticdata.statuses.models import CaseStatus
from api.users.libraries.notifications import get_exporter_user_notification_individual_count
from api.users.models import ExporterUser
class TinyCaseTypeSerializer(serializers.ModelSerializer):
sub_type = KeyValueChoiceField(choices=CaseTypeSubTypeEnum.choices)
class Meta:
model = CaseType
fields = ("sub_type",)
read_only_fields = fields
class GenericApplicationListSerializer(serializers.Serializer):
id = serializers.UUIDField()
name = serializers.CharField()
case_type = TinyCaseTypeSerializer()
status = serializers.SerializerMethodField()
updated_at = serializers.DateTimeField()
reference_code = serializers.CharField()
export_type = serializers.SerializerMethodField()
def get_status(self, instance):
if instance.status:
return {
"key": instance.status.status,
"value": get_status_value_from_case_status_enum(instance.status.status),
}
def get_export_type(self, instance):
if hasattr(instance, "export_type") and getattr(instance, "export_type"):
return {
"key": instance.export_type,
"value": get_value_from_enum(instance.export_type, ApplicationExportType),
}
class GenericApplicationViewSerializer(serializers.ModelSerializer):
name = CharField(
max_length=100,
required=True,
allow_blank=False,
allow_null=False,
error_messages={"blank": strings.Applications.Generic.MISSING_REFERENCE_NAME_ERROR},
)
case_type = serializers.SerializerMethodField()
export_type = serializers.SerializerMethodField()
status = serializers.SerializerMethodField()
organisation = OrganisationDetailSerializer()
case = serializers.SerializerMethodField()
exporter_user_notification_count = serializers.SerializerMethodField()
is_major_editable = serializers.SerializerMethodField(required=False)
goods_locations = serializers.SerializerMethodField()
case_officer = GovUserSimpleSerializer()
submitted_by = serializers.SerializerMethodField()
class Meta:
model = BaseApplication
fields = (
"id",
"name",
"organisation",
"case_type",
"export_type",
"created_at",
"updated_at",
"submitted_at",
"submitted_by",
"status",
"case",
"exporter_user_notification_count",
"reference_code",
"is_major_editable",
"goods_locations",
"case_officer",
"foi_reason",
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.exporter_user = kwargs.get("context").get("exporter_user") if "context" in kwargs else None
self.organisation_id = kwargs.get("context").get("organisation_id") if "context" in kwargs else None
if not isinstance(self.exporter_user, ExporterUser):
self.fields.pop("exporter_user_notification_count")
def get_submitted_by(self, instance):
return f"{instance.submitted_by.first_name} {instance.submitted_by.last_name}" if instance.submitted_by else ""
def get_export_type(self, instance):
instance = get_application(instance.pk)
if hasattr(instance, "export_type"):
return {
"key": instance.export_type,
"value": get_value_from_enum(instance.export_type, ApplicationExportType),
}
def get_status(self, instance):
if instance.status:
return {
"key": instance.status.status,
"value": get_status_value_from_case_status_enum(instance.status.status),
}
def get_case_type(self, instance):
from api.cases.serializers import CaseTypeSerializer
return CaseTypeSerializer(instance.case_type).data
def get_case(self, instance):
return instance.pk
def get_exporter_user_notification_count(self, instance):
return get_exporter_user_notification_individual_count(
exporter_user=self.exporter_user, organisation_id=self.organisation_id, case=instance,
)
def get_is_major_editable(self, instance):
return instance.is_major_editable()
def get_goods_locations(self, application):
sites = Site.objects.filter(sites_on_application__application=application)
if sites:
serializer = SiteListSerializer(sites, many=True)
return {"type": "sites", "data": serializer.data}
external_locations = ExternalLocation.objects.filter(external_locations_on_application__application=application)
if external_locations:
serializer = ExternalLocationSerializer(external_locations, many=True)
return {"type": "external_locations", "data": serializer.data}
return {}
def get_destinations(self, application):
if getattr(application, "end_user", None):
serializer = PartySerializer(application.end_user.party)
return {"type": "end_user", "data": serializer.data}
else:
return {"type": "end_user", "data": ""}
def get_additional_documents(self, instance):
documents = ApplicationDocument.objects.filter(application=instance).order_by("created_at")
return ApplicationDocumentSerializer(documents, many=True).data
class GenericApplicationCreateSerializer(serializers.ModelSerializer):
def __init__(self, case_type_id, **kwargs):
super().__init__(**kwargs)
self.initial_data["case_type"] = case_type_id
self.initial_data["organisation"] = self.context.id
name = CharField(
max_length=100,
required=True,
allow_blank=False,
allow_null=False,
error_messages={"blank": strings.Applications.Generic.MISSING_REFERENCE_NAME_ERROR},
)
case_type = PrimaryKeyRelatedField(
queryset=CaseType.objects.all(), error_messages={"required": strings.Applications.Generic.NO_LICENCE_TYPE},
)
organisation = PrimaryKeyRelatedField(queryset=Organisation.objects.all())
class Meta:
model = BaseApplication
fields = (
"id",
"name",
"case_type",
"organisation",
)
def create(self, validated_data):
validated_data["status"] = get_case_status_by_status(CaseStatusEnum.DRAFT)
return super().create(validated_data)
class GenericApplicationUpdateSerializer(serializers.ModelSerializer):
name = CharField(
max_length=100,
required=True,
allow_blank=False,
allow_null=False,
error_messages={"blank": strings.Applications.Generic.MISSING_REFERENCE_NAME_ERROR},
)
reasons = serializers.PrimaryKeyRelatedField(queryset=DenialReason.objects.all(), many=True, write_only=True)
reason_details = serializers.CharField(required=False, allow_blank=True)
status = serializers.PrimaryKeyRelatedField(queryset=CaseStatus.objects.all())
class Meta:
model = BaseApplication
fields = (
"name",
"status",
"reasons",
"reason_details",
)
def update(self, instance, validated_data):
"""
Update and return an existing `Application` instance, given the validated data.
"""
instance.name = validated_data.get("name", instance.name)
instance.status = validated_data.get("status", instance.status)
instance.clearance_level = validated_data.get("clearance_level", instance.clearance_level)
# Remove any previous denial reasons
if validated_data.get("status") == get_case_status_by_status(CaseStatusEnum.FINALISED):
ApplicationDenialReason.objects.filter(application=get_application(instance.id)).delete()
instance.last_closed_at = timezone.now()
instance = super().update(instance, validated_data)
return instance
class GenericApplicationCopySerializer(serializers.ModelSerializer):
"""
Serializer for copying applications that can handle any application type
This is only used to verify the fields are correct that the user passes in, we then process the rest of the
copy after validation
"""
name = serializers.CharField(allow_null=False, allow_blank=False)
have_you_been_informed = serializers.CharField(required=False, allow_null=True, allow_blank=True)
reference_number_on_information_form = serializers.CharField(
required=False, allow_null=True, allow_blank=True, max_length=255
)
class Meta:
model = BaseApplication
fields = (
"name",
"have_you_been_informed",
"reference_number_on_information_form",
)
def __init__(self, context=None, *args, **kwargs):
if context and context.get("application_type").sub_type == CaseTypeSubTypeEnum.STANDARD:
self.fields["have_you_been_informed"] = KeyValueChoiceField(
required=True,
choices=ApplicationExportLicenceOfficialType.choices,
error_messages={"required": strings.Goods.INFORMED},
)
if kwargs.get("data").get("have_you_been_informed") == ApplicationExportLicenceOfficialType.YES:
self.fields["reference_number_on_information_form"] = serializers.CharField(
required=True, allow_blank=True, max_length=255
)
super().__init__(*args, **kwargs)
| StarcoderdataPython |
5103668 | # This program deletes the element 'Lithium' from a given tuple of elements
# Collection of elements
elements = (
"Hydrogen", 1,
"Carbon", 6,
"Lithium", 3,
"Titanium", 22,
"Iron", 26,
"Helium", 2,
"Potassium", 19,
"Calcium", 20,
)
# Assume that 'Lithium' is not in the collection so we set it to an
# invalid index value
index = -1
print("Before deleting 'Lithium'", elements)
# Look for 'Lithium' in the collection, if it exists break the loop
# and change the index value
for i in range(0, len(elements)):
if elements[i] == "Lithium":
index = i
break
# Validate index value ('Lithium' exists) overwrite
if index != -1:
elements = elements[0:i] + elements[i+2:]
print("After deleting 'Lithium'", elements) | StarcoderdataPython |
9626837 | <gh_stars>0
# -*- coding: utf-8 -*-
import os
import json
import xmltodict
from io import BytesIO
from module.core.service.crawler import CrawlHandler
from ontology import Ontology, Document
from command import Command
from queue import Scanner
from error import *
class Mp4Handler(CrawlHandler):
def __init__(self, resolver, node):
CrawlHandler.__init__(self, resolver, node)
def preprocess_mediainfo(self, element):
if isinstance(element, dict):
if '@type' in element:
if element['@type'] == 'General':
pass
elif element['@type'] == 'Video':
if element['Format'] == 'JPEG':
pass
else:
pass
elif element['@type'] == 'Audio':
pass
elif element['@type'] == 'Text':
if element['Format'] == 'Timed Text':
pass
elif element['Format'] == 'Apple text':
pass
for k in list(element.keys()):
if k in [ 'Cover_Data' ]:
del element[k]
elif k in [ 'Actor', 'Director', 'ScreenplayBy', 'Channel_s_', 'ChannelPositions', 'Format_Profile' ]:
element[k] = [ v.strip() for v in element[k].split('/') ]
elif k in [ 'Encoded_Library_Settings']:
element[k] = dict([ v.strip().split('=') for v in element[k].split('/') ])
else:
element[k] = self.preprocess_mediainfo(element[k])
elif isinstance(element, list):
for index, o in enumerate(element):
element[index] = self.preprocess_mediainfo(o)
return element
def crawl(self, query):
self.log.debug('crawling %s', query.location['path'])
mediainfo = Command('mediainfo', query.context)
mediainfo.ontology['mediainfo full'] = True
mediainfo.ontology['mediainfo output'] = 'XML'
mediainfo.ontology['mediainfo language'] = 'raw'
mediainfo.ontology['positional'] = [ query.location['path'] ]
mediainfo.execute()
if mediainfo.returncode == 0 and mediainfo.output is not None:
if mediainfo.output:
# print(mediainfo.output)
content = BytesIO(mediainfo.output.encode('utf8'))
document = xmltodict.parse(content)
document = self.preprocess_mediainfo(document)
if 'Mediainfo' in document and 'File' in document['Mediainfo']:
document = document['Mediainfo']['File']
print(self.env.to_json(document))
for track in document['track']:
stream_type = self.env.enumeration['mediainfo stream type'].search(track['@type'])
print(stream_type.node['namespace'])
stream = Ontology(self.env, stream_type.node['namespace'])
stream.interpret(track, 'mediainfo')
print(self.env.to_json(stream))
| StarcoderdataPython |
4964575 | from . import RegexMatcher
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
import sys
def usage():
print(r"Usage: python -m vhdre <entity-name> <regex> ... [-- <test-string> ...]")
print(r"")
print(r"Generates a file by the name <entity-name>.vhd in the working directory")
print(r"which matches against the given regular expressions. If one or more test")
print(r"strings are provided, a testbench by the name <entity-name>_tb.vhd is")
print(r"also generated. To insert a unicode code point, use {0xHHHHHH:u}. To")
print(r"insert a raw byte (for instance to check error handling) use {0xHH:b}.")
print(r"{{ and }} can be used for matching { and } literally.")
sys.exit(2)
if len(sys.argv) < 3:
usage()
# Figure out where the -- is (if it exists).
split = len(sys.argv)
for i, arg in enumerate(sys.argv[3:]):
if arg == "--":
split = i + 3
# Generate the matcher.
matcher = RegexMatcher(*sys.argv[1:split])
# Generate the main file.
vhd = str(matcher)
with open(sys.argv[1] + ".vhd", "w") as f:
f.write(vhd)
# Generate the testbench if desired.
vectors = sys.argv[split + 1:]
if vectors:
vhd_tb = matcher.testbench(vectors)
with open(sys.argv[1] + "_tb.vhd", "w") as f:
f.write(vhd_tb) | StarcoderdataPython |
241523 | import csv
import datetime
from django.utils.safestring import mark_safe
from django.contrib import admin
from django.http import HttpResponse
from .models import Employee, Customer, Assignment, Task, Leave
def export_to_csv(modeladmin, request, queryset):
opts = modeladmin.model._meta
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;'\
'filename={}.csv'.format(opts.verbose_name)
writer = csv.writer(response)
fields = [field for field in opts.get_fields() if not field.many_to_many
and not field.one_to_many]
# Write a first row with header information
writer.writerow([field.verbose_name for field in fields])
# Write data rows
for obj in queryset:
data_row = []
for field in fields:
value = getattr(obj, field.name)
if isinstance(value, datetime.datetime):
value = value.strftime('%d/%m/%Y')
data_row.append(value)
writer.writerow(data_row)
return response
export_to_csv.short_description = 'Export to CSV'
@admin.register(Customer)
class CustomerAdmin(admin.ModelAdmin):
list_display = ['id','user', 'residence', 'phone','id_no']
list_filter = ['created','gender','residence']
list_per_page = 20
actions = [export_to_csv]
# fields = ('cust_type','name', 'address', 'phone','id_number' )
# def save_model(self, request, obj, form, change):
# if not change:
# obj.added_by = request.user
# obj.save()
@admin.register(Employee)
class EmployeeAdmin(admin.ModelAdmin):
list_display = ['id','user', 'residence', 'phone','id_no']
list_filter = ['created','gender','residence']
list_per_page = 20
actions = [export_to_csv]
@admin.register(Assignment)
class AssignmentAdmin(admin.ModelAdmin):
list_display = ['id','assignee', 'task']
list_filter = ['created',]
list_per_page = 20
actions = [export_to_csv]
@admin.register(Task)
class TaskAdmin(admin.ModelAdmin):
list_display = ['id','title', 'due_date','paid','done','amount','owner']
list_filter = ['created',]
list_per_page = 20
actions = [export_to_csv]
@admin.register(Leave)
class LeaveAdmin(admin.ModelAdmin):
list_display = ['id','employee','from_date','to_date']
list_filter = ['created',]
list_per_page = 20
actions = [export_to_csv] | StarcoderdataPython |
11321339 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Figure 1: Isotropic Hernquist DF.
Created: May 2021
Author: <NAME>
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import copy
from os.path import exists
sys.path.append("../src")
from constants import M_sun, kpc, G, pi
from hernquist import calc_DF_iso
from ml import load_flow, calc_DF_ensemble
def get_f_exact(rgrid, vgrid, M, a):
"""Get exact Hernquist DF evaluated on r/v grids."""
# deproject grids into 6D
N_bins = rgrid.shape[0]
dr = np.diff(rgrid[0, :], axis=0)[0]
dv = np.diff(vgrid[:, 0], axis=0)[0]
x = rgrid.reshape(N_bins**2)
vx = vgrid.reshape(N_bins**2)
zeroes = np.zeros_like(x)
q = np.stack((x, zeroes, zeroes), axis=-1)
p = np.stack((vx, zeroes, zeroes), axis=-1)
# evaluate DF
f_exact = calc_DF_iso(q, p, M, a).reshape(N_bins, N_bins)
# renormalise; expected fraction of particles in each bin
f_exact = 16 * pi**2 * f_exact * rgrid**2 * vgrid**2 * dr * dv
return f_exact
def get_f_data(r_bin_edges, v_bin_edges):
"""Get mock Hernquist sample histogrammed into r v grids."""
# load data
data = np.load('../data/hq_iso_orig.npz')
pos = data['pos']
vel = data['vel']
# get histogram
r = np.linalg.norm(pos, axis=-1)
v = np.linalg.norm(vel, axis=-1)
bins = [r_bin_edges / kpc, v_bin_edges / 1000]
H, _, _ = np.histogram2d(r / kpc, v / 1000, bins=bins)
f_data = H.T / 1e+6
return f_data
def get_f_model(rgrid, vgrid, M, a):
"""Get reconstructed Hernquist DF evaluated on r/v grids."""
# deproject grids into 6D
N_bins = rgrid.shape[0]
dr = np.diff(rgrid[0, :], axis=0)[0]
dv = np.diff(vgrid[:, 0], axis=0)[0]
x = rgrid.reshape(N_bins**2)
vx = vgrid.reshape(N_bins**2)
zeroes = np.zeros_like(x)
q = np.stack((x, zeroes, zeroes), axis=-1)
p = np.stack((vx, zeroes, zeroes), axis=-1)
# units
u_q = 10 * a
u_p = np.sqrt(2 * G * M / a)
u_f = u_q**3 * u_p**3
# load flows
n_flows = 30
flows = []
for i in range(n_flows):
fname = f"../nflow_models/hq_iso_orig/{i}_best.pth"
flows.append(load_flow(fname, 6, 8, 64))
# evaluate DF
f_model = calc_DF_ensemble(q, p, u_q, u_p, flows).reshape(N_bins, N_bins)
# renormalise; expected fraction of particles in each bin
f_model = 16 * pi**2 * f_model * rgrid**2 * vgrid**2 * dr * dv / u_f
return f_model
if __name__ == '__main__':
# Hernquist params and scaling units
M = 1e+10 * M_sun
a = 5 * kpc
u_pos = 10 * a
u_vel = np.sqrt(2 * G * M / a)
# grid dims
r_max = 5.5 * a
v_max = np.sqrt(2 * G * M / a)
N_bins = 128
# check if plot data exists, otherwise generate
dfile = "fig1_data.npz"
if not exists(dfile):
# define r/v bins in which to evaluate DF
r_bin_edges = np.linspace(0, r_max, N_bins + 1)
v_bin_edges = np.linspace(0, v_max, N_bins + 1)
r_cen = 0.5 * (r_bin_edges[1:] + r_bin_edges[:-1])
v_cen = 0.5 * (v_bin_edges[1:] + v_bin_edges[:-1])
rgrid, vgrid = np.meshgrid(r_cen, v_cen)
dr = r_max / N_bins
dv = v_max / N_bins
# f_ref
x0 = np.array([a, 0, 0])
v0 = np.array([v_max / 4, 0, 0])
f_ref = calc_DF_iso(x0, v0, M, a)
f_ref = 16 * pi**2 * f_ref * a**2 * (v_max / 4)**2 * dr * dv
# get various DFs
f_exact = get_f_exact(rgrid, vgrid, M, a) / f_ref
f_data = get_f_data(r_bin_edges, v_bin_edges) / f_ref
f_model = get_f_model(rgrid, vgrid, M, a) / f_ref
# calculate residuals
with np.errstate(divide='ignore', invalid='ignore'):
res = np.divide((f_model - f_exact), f_exact)
# save data file
np.savez(
dfile, f_exact=f_exact, f_data=f_data, f_model=f_model, res=res
)
else:
# load data file
data = np.load(dfile)
f_exact = data['f_exact']
f_model = data['f_model']
f_data = data['f_data']
res = data['res']
# set up figure
fig = plt.figure(figsize=(6.9, 3), dpi=150)
left = 0.065
right = 0.98
bottom = 0.125
top = 0.83
dX = (right - left) / 4
dY = (top - bottom)
CdY = 0.05
# plot settings
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.size'] = 9
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['xtick.labelsize'] = 8
labels = ['Exact', 'Data', 'Model', 'Residuals']
cmap = copy.copy(plt.cm.bone)
cmap.set_under('white')
vmin = 0.00001
vmax = 1.3
extent = [0, r_max / a, 0, 1]
iargs1 = {'origin': 'lower', 'cmap': cmap, 'vmin': vmin, 'vmax': vmax,
'extent': extent, 'aspect': 'auto'}
iargs2 = {'origin': 'lower', 'extent': extent, 'vmin': -0.75, 'vmax': 0.75,
'cmap': 'Spectral_r', 'aspect': 'auto'}
# loop over panels
for i in range(4):
# set up axes
ax = fig.add_axes([left + i * dX, top - dY, dX, dY])
# get relevant DF
if i == 0:
f = np.copy(f_exact)
elif i == 1:
f = np.copy(f_data)
elif i == 2:
f = np.copy(f_model)
else:
f = np.copy(res)
# plot DF
if i == 3:
im1 = ax.imshow(res, **iargs2)
else:
im0 = ax.imshow(f, **iargs1)
# text
ax.text(0.97, 0.96, labels[i], ha='right', va='top',
transform=ax.transAxes)
# ticks, axis labels etc.
ax.tick_params(top=True, right=True, direction='inout')
if i == 0:
ax.set_ylabel(r"$v\ /\ v_\mathrm{esc}(r=0)$")
else:
ax.tick_params(labelleft=False)
if i == 2:
ax.set_xlabel(r"$r\ /\ a$")
ax.xaxis.set_label_coords(0, -0.1)
# colourbars
cax0 = fig.add_axes([left, top, 3 * dX, CdY])
cax1 = fig.add_axes([left + 3 * dX, top, dX, CdY])
plt.colorbar(im0, cax=cax0, orientation='horizontal')
plt.colorbar(im1, cax=cax1, orientation='horizontal')
cax0.set_xlabel(r"$F / F_\mathrm{ref}$")
cax1.set_xlabel(r"Model / Exact - 1")
for cax in [cax0, cax1]:
cax.xaxis.set_ticks_position('top')
cax.xaxis.set_label_position('top')
# save
fig.savefig("fig1_iso.pdf")
| StarcoderdataPython |
3390624 | <reponame>Mathiasn21/Traffic_Sign_ML_final_project
import matplotlib.pyplot as plt
import numpy as np
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
from keras.models import Sequential
from keras.utils import to_categorical
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from data_loading import signs
# Load training data from source
data, labels = signs.training_data()
classes = 43 # Total number of traffic sign classes
# Splitting data into training and test data. Does shuffle before split in order to increase randomness in the data.
# Specifying the random state allows for reproducibility.
x_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=42)
# Convert labels into one hot encoding
y_train = to_categorical(y_train, classes)
y_test = to_categorical(y_test, classes)
# Function used to build and compile CNN models with varying optimizers
def build_models_diff_optimizers():
# Array consisting of various optimizers
optimizer_arr = ['adam', 'sgd', 'rmsprop', 'adadelta']
# Array consisting of various optimizer names
opt_names = ['Adam', 'SGD', 'RMSprop', 'Adadelta']
model_array = [] # Array utilized for storing compiled CNN models
# Build and compile CNN models, varying the optimizer
for optimizer in optimizer_arr:
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu', input_shape=x_train.shape[1:]))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(.25))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=256, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(.5))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model_array.append(model)
return model_array, opt_names
def train_models(models, opt_names):
"""
Function to train CNN models, where each conv2d layer has varying activation functions
:param opt_names: object - Array consisting of optimizer names
:param models: object - Array consisting of CNN models.
"""
epochs = 16
# Load test data from source
test_data, test_labels = signs.test_data()
# Array utilized for storing histories gotten from fitting a CNN model.
histories = []
for i, model in enumerate(models):
# Fit the CNN model using training data and labels.
history = model.fit(x_train, y_train, batch_size=32, epochs=epochs, validation_data=(X_test, y_test))
pred = np.argmax(model.predict(test_data), axis=-1) # Select highest probability for all classifications
histories.append(history) # Append new history stats to the histories array
# Check accuracy with the test data
print(accuracy_score(test_labels, pred))
plt.figure(0)
for i, hist in enumerate(histories):
# plotting graphs of accuracy from various optimizers
plt.plot(hist.history['accuracy'], label=opt_names[i])
plt.title('Accuracy - Optimizers')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
plt.figure(1)
for i, hist in enumerate(histories):
# plotting graphs of loss from various optimizers
plt.plot(hist.history['loss'], label=opt_names[i])
plt.title('Loss - Optimizers')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
# Build CNN models then train and display data from them.
models, opt_names = build_models_diff_optimizers()
train_models(models, opt_names)
| StarcoderdataPython |
3475752 | <gh_stars>0
from app.controllers.get_all_class_descs import * # pylint: disable=C0413
from app.controllers.get_all_gyms import * # pylint: disable=C0413
from app.controllers.get_all_gymclasses import * # pylint: disable=C0413
from app.controllers.get_all_instructors import * # pylint: disable=C0413
from app.controllers.get_all_tags import * # pylint: disable=C0413
from app.controllers.get_class_descs_by_get_param import * # pylint: disable=C0413
from app.controllers.get_favorite_gymclasses import * # pylint: disable=C0413
from app.controllers.get_gym_by_id import * # pylint: disable=C0413
from app.controllers.get_gym_class_by_id import * # pylint: disable=C0413
from app.controllers.get_gym_class_instance_by_id import * # pylint: disable=C0413
from app.controllers.get_gym_class_instances import * # pylint: disable=C0413
from app.controllers.get_gym_class_instances_by_date import * # pylint: disable=C0413
from app.controllers.get_instructor_by_id import * # pylint: disable=C0413
from app.controllers.get_quote import * # pylint: disable=C0413
from app.controllers.search_gym_classes import * # pylint: disable=C0413
from app.controllers.toggle_favorite import * # pylint: disable=C0413
controllers = [
GetAllClassDescsController(),
GetAllGymsController(),
GymClassesController(),
GetAllTagsController(),
GetAllInstructorsController(),
GetClassDescsByGetParamController(),
GetFavoriteGymClassesController(),
GetGymByIdController(),
GetGymClassByIdController(),
GetGymClassInstanceByIdController(),
GetGymClassInstancesController(),
GetGymClassInstancesByDate(),
GetInstructorByIdController(),
GetQuoteController(),
SearchGymClassesController(),
ToggleFavoriteController()
]
| StarcoderdataPython |
3299350 | <reponame>cahorn/tmonitor<filename>tmonitor/schedule.py
import datetime
import crontab
import re
cron = crontab.CronTab(user=True)
def add(args):
"""Add a new monitor poll to the schedule."""
# Construct monitor poll command
cmd_args = ['tmonitor']
for flag, arg in args.items():
if flag not in ['boot', 'hourly', 'daily', 'weekly', 'list'] \
and arg is not None:
cmd_args.append("--{0}".format(flag))
cmd_args.append("'{0}'".format(arg))
command = " ".join(cmd_args)
# Construct an identifiable comment
comment = "[tmonitor] {0}".format(datetime.datetime.now())
# Schedule the new poll
job = cron.new(command=command, comment=comment)
if args['weekly'] is not None:
job.dow.on(args['weekly'])
if args['daily'] is not None:
job.hour.on(args['daily'])
if args['hourly']:
job.minute.on(0)
if args['boot']:
job.every_reboot()
cron.write()
def list():
"""List all scheduled monitor polls."""
for job in cron:
if job.comment.startswith("[tmonitor]"):
print(job)
| StarcoderdataPython |
3474588 | <filename>scripts/sensitivity_analysis/npi_leaveout.py
"""
:code:`npi_leaveout.py`
Leave our specified NPIs. Useful to study the sensitivity of the results to the inclusion of particular NPIs.
"""
import pymc3 as pm
from epimodel import EpidemiologicalParameters
from epimodel.preprocessing.data_preprocessor import preprocess_data
import argparse
from scripts.sensitivity_analysis.utils import *
argparser = argparse.ArgumentParser()
argparser.add_argument('--npis', nargs='+', dest='npis', type=int)
add_argparse_arguments(argparser)
if __name__ == '__main__':
args, extras = argparser.parse_known_args()
data = preprocess_data(get_data_path(), last_day='2020-05-30')
data.mask_reopenings()
output_string = ''
for npi_index in args.npis:
data.ActiveCMs[:, npi_index, :] = 0
output_string = f'{output_string}{npi_index}'
output_string = f'{output_string}.txt'
ep = EpidemiologicalParameters()
model_class = get_model_class_from_str(args.model_type)
bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)}
with model_class(data) as model:
model.build_model(**bd)
with model.model:
model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14,
target_accept=0.96, init='adapt_diag')
save_cm_trace(output_string, model.trace.CMReduction, args.exp_tag,
generate_base_output_dir(args.model_type, parse_extra_model_args(extras)))
if model.country_specific_effects:
output_string = output_string.replace('.txt', '-cs.txt')
nS, nCMs = model.trace.CMReduction.shape
full_trace = np.exp(
np.log(model.trace.CMReduction) + np.random.normal(size=(nS, nCMs)) * model.trace.CMAlphaScales)
save_cm_trace(output_string, full_trace, args.exp_tag,
generate_base_output_dir(args.model_type, parse_extra_model_args(extras)))
| StarcoderdataPython |
5110645 | import pytest
import numpy as np
import pandas as pd
from marketcrush import strategies
@pytest.fixture(params=strategies.strategies.keys())
def strategy(request):
return strategies.strategies[request.param]
@pytest.fixture(params=[strategies.MACrossOverDayTrade,
strategies.TrendFollowerDayTrade])
def strategy_day_trade(request):
return request.param
def test_generate_filtered_trading_signals(gbm, config_test, strategy):
df = pd.DataFrame({'close': gbm})
strat = strategy(**config_test.strategy)
filtered_signals = strat.enter_trades(df)
assert np.max(filtered_signals) == 1
assert np.min(filtered_signals) == -1
def test_strategy(ohlc_data, config_test, strategy):
strat = strategy(**config_test.strategy)
profit_df = strat.backtest(ohlc_data)
assert profit_df.sum()['total_profit'] > - 1.0e5
def test_day_trade(ohlc_data, config_test, strategy_day_trade):
strat = strategy_day_trade(**config_test.strategy)
profit_df = strat.backtest(ohlc_data)
assert profit_df.sum()['total_profit'] > - 1.0e5
| StarcoderdataPython |
3512829 | <filename>test/test_phones.py<gh_stars>0
import re
from model.contact import Contact
def test_main_page_db(app, db):
contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
contacts_from_db = sorted(db.get_contact_list(), key=Contact.id_or_max)
assert len(contacts_from_home_page) == len(contacts_from_db)
for i in range(len(contacts_from_home_page)):
assert contacts_from_home_page[i].all_phones == merge_phones_like_on_home_page(contacts_from_db[i])
assert contacts_from_home_page[i].all_emails == merge_emails_like_on_home_page(contacts_from_db[i])
assert contacts_from_home_page[i].firstname == contacts_from_db[i].firstname
assert contacts_from_home_page[i].lastname == contacts_from_db[i].lastname
assert contacts_from_home_page[i].address == contacts_from_db[i].address
def test_phones_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.all_emails == merge_emails_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == contact_from_edit_page.address
def test_pfones_on_contact_view_page(app):
contact_from_view_page = app.contact.get_contact_from_view_page(0)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_view_page.home == contact_from_edit_page.home
assert contact_from_view_page.work == contact_from_edit_page.work
assert contact_from_view_page.mobile == contact_from_edit_page.mobile
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home, contact.mobile, contact.work]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3])))
| StarcoderdataPython |
4914704 | <filename>cards98/run_game_with_Qlearning.py
from game.cards98 import GameCards98
from reinforced.rl_agent import RLAgent
class Game(GameCards98):
def main_loop(self): # override
''' Main loop with game logic'''
while True:
self.hand_fill()
status, comment = self.end_condition()
if status is not None:
print('\n')
return status, comment
self.display_table()
self.agent_input()
self.play_card(self.hand_ind, self.pile_ind)
self.print_move_reward()
def agent_input(self):
self.hand_ind = 0
self.pile_ind = 0
def print_move_reward(self):
print("Card played {card} -> Pile {pile}".format(card=self.last_card_played,
pile=self.pile_ind + 1))
print("Reward: {reward}".format(reward=self.score_gained))
print(' _' * 80)
app = Game()
app.start_game()
| StarcoderdataPython |
9724671 |
import os
import re
import fnmatch
from harmony import hashers
from harmony import serialization
from harmony.serialization import FileSerializable
class Ruleset(FileSerializable):
RELATIVE_PATH = 'rules'
_state = (
'rules',
)
class FileInfo:
pass
@classmethod
def init(class_, path):
r = super().init(path)
# When starting from scratch, create a few basic rules
# that ensure a minimal sanity of the repository state
# (especially not to track .harmony)
r.add_rule(
match = {},
hasher = hashers.DEFAULT,
commit = True,
action = 'continue'
)
r.add_rule(
match = {'path': '/.harmony/**'},
commit = False,
action = 'stop'
)
r.add_rule(
match = {'filename': ['*.swp', '*.bak', '*~', '*.pyc']},
commit = False,
action = 'stop'
)
r.save()
return r
@classmethod
def load(class_, path):
# If we load an empty ruleset that would mean we treat .harmony
# the same as the rest of the working dir, that probably means
# something is very wrong.
r = super().load(path)
assert len(r.rules) > 0
return r
@staticmethod
def match_path(path, pattern):
anchored = pattern.startswith('/')
if anchored:
pattern = pattern[1:]
path_elements = path.split(os.path.sep)
pattern_elements = pattern.split(os.path.sep)
def match_recursive(i_pattern, i_path):
if i_pattern >= len(pattern_elements):
return i_path >= len(path_elements)
e_pattern = pattern_elements[i_pattern]
if i_path >= len(path_elements):
return e_pattern != '**'
e_path = path_elements[i_path]
if e_pattern == '**':
# If the pattern ended with **, everything that comes,
# matches, so we are done
if i_pattern == len(pattern_elements) - 1:
return True
# No try all possible chains of subdirectories for '**'
i = i_path
while i < len(path_elements):
if match_recursive(i_pattern + 1, i):
return True
i += 1
return False
else:
match = fnmatch.fnmatch(e_path, e_pattern)
if not match:
return False
return match_recursive(i_pattern + 1, i_path + 1)
return match_recursive(0, 0)
# TODO: this should more behave like rsyncs matching
#pattern = pattern.replace('.', '\\.')
#pattern = pattern.replace('?', '.')
#pattern, _ = re.subn(r'(?<!\*)\*(?!\*)', '[^/]*', pattern)
#pattern, _ = re.subn(r'\*\*', '.*', pattern)
#pattern += '$'
#m = re.match(pattern, path)
#return m is not None
@staticmethod
def match_directory(path, pattern):
path_elements = path.split(os.path.sep)
for e in path_elements[:-1]:
if fnmatch.fnmatch(e, pattern):
return True
return False
@staticmethod
def match_filename(path, pattern):
path_elements = path.split(os.path.sep)
if not isinstance(pattern, list):
pattern = [pattern]
for p in pattern:
if fnmatch.fnmatch(path_elements[-1], p):
return True
return False
def __init__(self, path, rules = None):
super().__init__(path)
self.rules = rules if rules else []
self.matchers = {
'path': Ruleset.match_path,
'dirname': Ruleset.match_directory,
'filename': Ruleset.match_filename,
}
def iterate_committable_files(self, working_directory):
for file_info in self.iterate_files(working_directory):
if file_info.rule['commit']:
yield file_info
def iterate_files(self, working_directory):
# TODO: do this with pathlib
working_directory = str(working_directory)
for root, dirs, files in os.walk(working_directory):
for filename in files:
absfn = os.path.join(root, filename)
relfn = os.path.relpath(absfn, working_directory)
rule = self.get_rule(relfn)
file_info = Ruleset.FileInfo()
file_info.absolute_filename = absfn
file_info.relative_filename = relfn
file_info.rule = rule
yield file_info
def get_rule(self, relfn):
result = {
'commit': True
}
for rule in self.rules:
matches = True
for matcher, parameters in rule['match'].items():
if not self.matchers[matcher](relfn, parameters):
matches = False
break
if matches:
result.update(rule)
if rule['action'] == 'continue':
continue
break
return result
def add_rule(self, **kws):
self.rules.append(kws)
| StarcoderdataPython |
307122 | <filename>challenges/platformids.py
platforms = {
"UA": "usaco.org",
"CC": "codechef.com",
"EOL":"e-olimp.com",
"CH24": "ch24.org",
"HR": "hackerrank.com",
"HE": "hackerearth.com",
"ICPC": "icfpcontest.org",
"GCJ": "google.com/codejam",
"DE24": "deadline24.pl",
"IOI": "stats.ioinformatics.org",
"PE": "projecteuler.net",
"SN": "contests.snarknews.info",
"CG": "codingame.com",
"CF": "codeforces.com",
"ELY": "e-olymp.com",
"DMOJ": "dmoj.ca",
"MARA": "marathon24.com",
"IPSC": "ipsc.ksp.sk",
"UVA": "uva.onlinejudge.org",
"OPEN": "opener.itransition.com",
"HSIN": "hsin.hr/coci",
"CFT": "ctftime.org",
"KA": "kaggle.com",
"TC": "topcoder.com",
"FBHC": "facebook.com/hackercup"
} | StarcoderdataPython |
100356 | <reponame>mv20100/phd_code
from PyDAQmx import *
import numpy as np
import ctypes, time
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from collections import deque
class SyncAIAO(object):
compatibility_mode = 0 # Set this to 1 on some PC (Mouss)
trigName = "ai/StartTrigger"
timeout = 10.0
mean = 64
sampling_rate = 1e5
numSamp=2000
nbSampCroppedFactor=0.5
vpp=1.
offset = 0.
def __init__(self,device="Dev2",outChan="ao1",inChanList=["ai0"],inRange=(-10.,10.),outRange=(-10.,10.)):
self.device = device
self.outChan = outChan
self.inChanList = inChanList
self.inRange = inRange
self.outRange = outRange
self.running = False
self.initialize()
def initialize(self):
self._sampling_rate = self.sampling_rate
self._numSamp = self.numSamp
self.nbSampCropped = int(self.nbSampCroppedFactor * self._numSamp)
self.AImean = np.zeros(self._numSamp*len(self.inChanList),dtype=np.float64)
self.AIdata = np.zeros((self.mean,self._numSamp*len(self.inChanList)),dtype=np.float64)
self.ptr = 0
self.deque = deque([],self.mean)
self.AOdata = self.offset + np.hstack([np.linspace(-self.vpp/2.,self.vpp/2.,self._numSamp/2,dtype=np.float64,endpoint=False),
np.linspace(self.vpp/2.,-self.vpp/2.,self._numSamp/2,dtype=np.float64,endpoint=False)])
self.counter=0
self.totalAI=0
self.AItaskHandle = None
self.AOtaskHandle = None
def makeInputStr(self):
return ",".join([self.device+"/"+inChan for inChan in self.inChanList])
def makeOutputStr(self):
return self.device+"/"+self.outChan
def getNthFullChanName(self,index):
return self.device+"/"+self.inChanList[index]
def getNthChanAIdata(self,index):
return self.AOdata[0:self._numSamp-self.nbSampCropped],self.AIdata[self.ptr,index*self._numSamp:(index+1)*self._numSamp-self.nbSampCropped]
def getNthChanAImean(self,index):
return self.AOdata[0:self._numSamp-self.nbSampCropped],self.AImean[index*self._numSamp:(index+1)*self._numSamp-self.nbSampCropped]
def start(self):
assert not self.running
self.running = True
self.initialize()
def EveryNCallback(taskHandle, everyNsamplesEventType, nSamples, callbackData):
# global AItaskHandle, totalAI, AIdata, ptr
readAI = c_int32()
self.ptr=(self.ptr+1)%self.mean
self.deque.append(self.ptr)
DAQmxReadAnalogF64(self.AItaskHandle,self._numSamp,self.timeout,DAQmx_Val_GroupByChannel,self.AIdata[self.ptr],self._numSamp*len(self.inChanList),byref(readAI),None)
self.AImean=np.mean(self.AIdata[self.deque],axis=0)
self.totalAI = self.totalAI + readAI.value
self.counter=self.counter+1
# print self.totalAI
return int(0)
def DoneCallback(taskHandle, status, callbackData):
self.clearTasks()
return int(0)
self.AItaskHandle = TaskHandle()
self.AOtaskHandle = TaskHandle()
self.totalAI=0
DAQmxCreateTask(None,byref(self.AItaskHandle))
DAQmxCreateAIVoltageChan(self.AItaskHandle,self.makeInputStr(), None, DAQmx_Val_Cfg_Default, self.inRange[0],self.inRange[1], DAQmx_Val_Volts, None)
DAQmxCfgSampClkTiming(self.AItaskHandle,None, self._sampling_rate, DAQmx_Val_Rising, DAQmx_Val_ContSamps, self._numSamp)
DAQmxCreateTask(None,byref(self.AOtaskHandle))
DAQmxCreateAOVoltageChan(self.AOtaskHandle,self.makeOutputStr(),None,self.outRange[0],self.outRange[1],DAQmx_Val_Volts,None)
DAQmxCfgSampClkTiming(self.AOtaskHandle,None,self._sampling_rate,DAQmx_Val_Rising,DAQmx_Val_ContSamps,self._numSamp)
DAQmxCfgDigEdgeStartTrig(self.AOtaskHandle,self.trigName,DAQmx_Val_Rising)
if self.compatibility_mode == 0:
EveryNCallbackCWRAPPER = CFUNCTYPE(c_int32,c_void_p,c_int32,c_uint32,c_void_p)
else:
EveryNCallbackCWRAPPER = CFUNCTYPE(c_int32,c_ulong,c_int32,c_uint32,c_void_p)
self.everyNCallbackWrapped = EveryNCallbackCWRAPPER(EveryNCallback)
DAQmxRegisterEveryNSamplesEvent(self.AItaskHandle,DAQmx_Val_Acquired_Into_Buffer,self._numSamp,0,self.everyNCallbackWrapped,None)
if self.compatibility_mode == 0:
DoneCallbackCWRAPPER = CFUNCTYPE(c_int32,c_void_p,c_int32,c_void_p)
else:
DoneCallbackCWRAPPER = CFUNCTYPE(c_int32,c_ulong,c_int32,c_void_p)
self.doneCallbackWrapped = DoneCallbackCWRAPPER(DoneCallback)
DAQmxRegisterDoneEvent(self.AItaskHandle,0,self.doneCallbackWrapped,None)
DAQmxWriteAnalogF64(self.AOtaskHandle, self._numSamp, 0, self.timeout, DAQmx_Val_GroupByChannel, self.AOdata, None, None)
DAQmxStartTask(self.AOtaskHandle)
DAQmxStartTask(self.AItaskHandle)
print "Starting acquisition"
def clearTasks(self):
if self.AItaskHandle:
DAQmxStopTask(self.AItaskHandle)
DAQmxClearTask(self.AItaskHandle)
self.AItaskHandle = None
if self.AOtaskHandle:
DAQmxStopTask(self.AOtaskHandle)
DAQmxClearTask(self.AOtaskHandle)
self.AOtaskHandle = None
def stop(self):
if self.running:
self.clearTasks()
self.setZero()
self.running = False
def setZero(self):
print "Setting output to 0 V"
clearTaskHandle = TaskHandle()
DAQmxCreateTask("", byref(clearTaskHandle))
DAQmxCreateAOVoltageChan(clearTaskHandle, self.makeOutputStr(), None, self.outRange[0],self.outRange[1], DAQmx_Val_Volts, None)
DAQmxWriteAnalogF64(clearTaskHandle,1,1,self.timeout,DAQmx_Val_GroupByChannel,np.array([0.]),None,None)
DAQmxStartTask(clearTaskHandle)
DAQmxClearTask(clearTaskHandle)
def __del__(self):
self.stop()
if __name__=="__main__":
app = QtGui.QApplication([])
win = pg.GraphicsWindow()
win.resize(1000,600)
win.setWindowTitle('Pyqtgraph : Live NIDAQmx data')
pg.setConfigOptions(antialias=True)
outChan="ao2"
inChanList=["ai20"]
syncAiAo = SyncAIAO(device = "Dev1", inChanList=inChanList,outChan=outChan)
p = win.addPlot(title="Live plot")
p.addLegend()
colors = ['m','y','c']
assert len(colors)>=len(inChanList)
curves = []
for idx,inChan in enumerate(inChanList):
curve = p.plot(pen=colors[idx],name=syncAiAo.getNthFullChanName(idx))
curves.append(curve)
def update():
for idx,curve in enumerate(curves):
x, y = syncAiAo.getNthChanAIdata(idx)
curve.setData(x=x, y=y)
if syncAiAo.counter == 1:
p.enableAutoRange('xy', False) ## stop auto-scaling after the first data set is plotted
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
syncAiAo.start()
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
ret = QtGui.QApplication.instance().exec_()
print "Closing"
syncAiAo.stop()
sys.exit(ret)
| StarcoderdataPython |
5128222 | <gh_stars>0
# testing spectral-net using data imported from xhm
# jianhong, 1/19
# %%
import pandas as pd
import numpy as np
import scipy.io as sio
import tensorflow as tf
import keras.backend as K
# %% load types
types = sio.loadmat('types.mat')
i2x = types['i2x']
# num
n = {}
n['i'] = len(i2x)
k = {}
k['i'] = np.max(i2x)
print('There are {} workers in {} types.'.format(n['i'], k['i']))
j2cj = types['j2cj']
n['j'], _ = j2cj.shape
cj2y = types['cj2y']
_, n['f'] = cj2y.shape
k['f'] = np.max(cj2y)
print('There are {} firms in {} types.'.format(n['f'], k['f']))
# %% load affinity matrix
a = sio.loadmat('a.mat')
A = {}
A['ii'] = np.array(a['A_ii'])
print('The shape of the affinity matrix A: {}'.format(A['ii'].shape))
# %% load sufficient stats
ss = sio.loadmat('ss.mat')
G = ss['G_if']
H = ss['H_if']
H_ss = ss['H_ss_if']
W_s = ss['W_s_if']
W_ss = ss['W_ss_if']
W_ssb = ss['W_ssb_if']
W_ssw = ss['W_ssw_if']
S = {}
S['if'] = {'G': G, 'H': H, 'H_ss': H_ss,
'W_s': W_s, 'W_ssb': W_ssb, 'W_ssw': W_ssw}
print('The shape of the matrix S: {}'.format(G.shape))
# %% load estimators
e = sio.loadmat('e.mat')
W_m = e['W_m_if'] # average wage (i,f)
W_se = e['W_se_if'] # std err for average wage (i,f)
z_se = e['z_se_if'] # std err for match spec shock (i,f)
E = {}
E['if'] = {'W_m': W_m, 'W_se': W_se, 'z_se': z_se}
W_se = e['W_se']
E['11'] = {'W_se': W_se}
print(W_se)
w = W_m / W_se # t stat
print(w[:50,0])
print('The shape of the matrix E: {}'.format(W_m.shape))
# %%
A2 = np.sum((np.abs(w[:1000,np.newaxis,:1000] - w[:1000,:1000])<2),axis=-1)
# %%
print(A['ii'][:10,:10]*256)
print(A2[:10,:10])
#%%
| StarcoderdataPython |
9641965 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SkyAlchemy
Copyright ©2016 <NAME>
Licensed under the terms of the MIT License.
See LICENSE for details.
@author: <NAME>
"""
# Built-in imports
from __future__ import division, unicode_literals, print_function
import sys
import os
import os.path as osp
import subprocess
import ctypes
import logging
from importlib import reload
reload(logging) # Needed inside Spyder IDE
import argparse
import base64
from io import BytesIO
from collections import OrderedDict
try:
from queue import PriorityQueue # PY3
except ImportError:
from Queue import PriorityQueue # PY2
import operator
import math
#%% Setup PyQt's v2 APIs. Must be done before importing PyQt or PySide
import rthook
#%% Third-party imports
from qtpy import QtCore, QtGui, QtWidgets, uic
import six
from jinja2 import Environment, FileSystemLoader
#%% Application imports
import savegame
import skyrimdata
from skyrimdata import db
import alchemy
#%% Global functions
# PyInstaller utilities
def frozen(filename):
"""Returns the filename for a frozen file (program file which may be
included inside the executable created by PyInstaller).
"""
if getattr(sys, 'frozen', False):
return osp.join(sys._MEIPASS, filename)
else:
return filename
# I usually need some sort of file/dir opening function
if sys.platform == 'darwin':
def show_file(path):
subprocess.Popen(['open', '--', path])
def open_default_program(path):
subprocess.Popen(['start', path])
elif sys.platform == 'linux2':
def show_file(path):
subprocess.Popen(['xdg-open', '--', path])
def open_default_program(path):
subprocess.Popen(['xdg-open', path])
elif sys.platform == 'win32':
def show_file(path):
subprocess.Popen(['explorer', '/select,', path])
open_default_program = os.startfile
#%% Simple thread example (always useful to avoid locking the GUI)
class SavegameThread(QtCore.QThread):
"""Savegame thread.
Concentrates all intensive processing to avoid GUI freezes.
"""
newJob = QtCore.Signal(str, int)
jobStatus = QtCore.Signal(int)
generalData = QtCore.Signal(str)
inventoryItem = QtCore.Signal(int, int)
recipeItem = QtCore.Signal(int, alchemy.Recipe)
def __init__(self, queue, *args, **kwargs):
queue.put((1, 'load'))
self.queue = queue
self.running = True
super(SavegameThread, self).__init__(*args, **kwargs)
# Setup Jinja templating
self.env = Environment(loader=FileSystemLoader(frozen('data')))
self.inv_types = OrderedDict([
('ARMO', self.tr("Armor")),
('WEAP', self.tr("Weapons")),
('ALCH', self.tr("Potions")),
('INGR', self.tr("Ingredients")),
('SCRL', self.tr("Scrolls")),
('MISC', self.tr("Miscellaneous")),
('BOOK', self.tr("Books")),
('AMMO', self.tr("Ammunition")),
('SLGM', self.tr("Soul gems")),
('KEYM', self.tr("Keys")),
('Other', self.tr("Other")),
])
def __del__(self):
self.wait()
def run(self):
while True:
job = self.queue.get()
prio, job, data = job[0], job[1], job[2:]
print("SavegameThread job:", job)
if job == 'stop':
break
else:
self.newJob.emit(job, 0)
if job == 'load':
skyrimdata.loadData()
self.newJob.emit("", 0)
elif job == 'savegame':
filename = data[0]
self.newJob.emit("savegame", os.stat(filename).st_size)
sg = savegame.Savegame(filename, load_now=False)
for status in sg.loadGame():
self.jobStatus.emit(status)
sg.populate_ids()
html = self.dict2html(sg.d)
self.sg = sg
self.generalData.emit(html.encode("ascii", "xmlcharrefreplace").decode())
for count, formid in sg.player_ingrs():
self.inventoryItem.emit(count, formid)
self.newJob.emit("", 0)
elif job == 'combs':
alch_skill, fortify_alch, perks, model_ingrs = data[0]
n = len(model_ingrs) + 1
if n > 3:
f = math.factorial
ncombs = f(n)//(f(3)*f(n-3))
else:
ncombs = 1
self.newJob.emit("combs", ncombs)
ingr_formids = set()
for formid, name, count, value, weight, hformid in model_ingrs:
ingr_formids.add(formid)
ingrs = []
for ingr_id, ingr in skyrimdata.db['INGR'].items():
if ingr_id in ingr_formids:
ingrs.append(ingr)
rf = alchemy.RecipeFactory(ingrs)
recipe_iter = rf.calcRecipesIter(alch_skill, fortify_alch, perks)
for i, recipe in enumerate(recipe_iter):
self.recipeItem.emit(i, recipe)
# if i > 100:
# break
self.newJob.emit("", 0)
def stop(self):
self.running = False
self.queue.put((0, 'stop'))
def dict2html(self, dic):
template_filename = 'general_'+QtCore.QLocale().name()+'.html'
if not osp.exists(osp.join(frozen('data'), template_filename)):
template_filename = 'general_en_US.html'
buf = BytesIO()
dic['screenshotImage'].save(buf, format="BMP")
template = self.env.get_template(template_filename)
inventory = {v: [] for v in self.inv_types.values()}
inventory_weight = {v: 0 for v in self.inv_types.values()}
for inv_item in dic['inventory']:
# TODO: fix torch (MISC ID 0x0001D4EC)
if inv_item.item.type not in {'C', 'F'} and inv_item.itemcount>0:
item_ref = inv_item.item.name
type_ = self.inv_types.get(inv_item.item.name.type,
self.inv_types['Other'])
inventory[type_].append((item_ref.FullName,
getattr(item_ref, "Value", 0.0),
getattr(item_ref, "Weight", 0.0),
inv_item.itemcount))
inventory_weight[type_] += (getattr(item_ref, "Weight", 0.0) *
inv_item.itemcount)
for item_list in inventory.values():
item_list.sort()
total_weight = sum(inventory_weight.values())
html = template.render(d=dic, screenshotData=
base64.b64encode(buf.getvalue()).decode(),
inventory=inventory,
inventory_weight=inventory_weight,
total_weight=total_weight)
return html
#%% QTableView model
class IngrTable(QtCore.QAbstractTableModel):
def __init__(self, ingrs=[], parent=None):
super(IngrTable, self).__init__(parent)
self.ingrs = ingrs
self.layoutChanged.emit()
self.headers = [self.tr("Name"),
self.tr("#"),
self.tr("Value"),
self.tr("Weight"),
self.tr("FormID")]
self.sort_col = 0
self.sort_order = QtCore.Qt.AscendingOrder
def rowCount(self, parent=None):
return len(self.ingrs)
def columnCount(self, parent=None):
return len(self.headers)
def addItem(self, formid, count):
self.layoutAboutToBeChanged.emit()
ingr = db['INGR'][formid]
self.ingrs.append((formid, ingr.FullName, count, ingr.Value, ingr.Weight,
"{:08X}".format(formid)))
self.sort(self.sort_col, self.sort_order)
# self.layoutChanged.emit()
def data(self, index, role):
if not index.isValid():
return None
elif role != QtCore.Qt.DisplayRole:
return None
return self.ingrs[index.row()][index.column() + 1]
def headerData(self, col, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.headers[col]
if orientation == QtCore.Qt.Vertical and role == QtCore.Qt.DisplayRole:
return col + 1
return None
def sort(self, Ncol, order):
"""Sort table by given column number.
"""
self.sort_col = Ncol
self.sort_order = order
self.layoutAboutToBeChanged.emit()
# self.emit(SIGNAL("layoutAboutToBeChanged()"))
self.ingrs = sorted(self.ingrs, key=operator.itemgetter(Ncol + 1))
if order == QtCore.Qt.DescendingOrder:
self.ingrs.reverse()
self.layoutChanged.emit()
# self.emit(SIGNAL("layoutChanged()"))
def clear(self):
self.layoutAboutToBeChanged.emit()
self.ingrs = []
self.layoutChanged.emit()
#%% QTableView model
class RecipeTable(QtCore.QAbstractTableModel):
def __init__(self, recipes=[], parent=None):
super(RecipeTable, self).__init__(parent)
self.recipes = recipes
self.layoutChanged.emit()
self.headers = [self.tr("Name"),
self.tr("Value"),
self.tr("Weight"),
self.tr("Ingredients"),
self.tr("Effects")]
self.sort_col = 0
self.sort_order = QtCore.Qt.AscendingOrder
def rowCount(self, parent=None):
return len(self.recipes)
def columnCount(self, parent=None):
return len(self.headers)
def addItem(self, recipe):
# self.layoutAboutToBeChanged.emit()
effects = ', '.join(["{}: {}".format(ef.MGEF.FullName, ef.Description)
for ef in recipe.effects])
ingrs = ', '.join([ingr.FullName for ingr in recipe.ingrs])
self.recipes.append((recipe, recipe.Name, recipe.Value, "?", ingrs,
effects))
# self.sort(self.sort_col, self.sort_order)
# self.layoutChanged.emit()
def data(self, index, role):
if not index.isValid():
return None
elif role != QtCore.Qt.DisplayRole:
return None
return self.recipes[index.row()][index.column() + 1]
def headerData(self, col, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.headers[col]
if orientation == QtCore.Qt.Vertical and role == QtCore.Qt.DisplayRole:
return col + 1
return None
def resort(self):
self.layoutAboutToBeChanged.emit()
self.sort(self.sort_col, self.sort_order)
self.layoutChanged.emit()
def sort(self, Ncol, order):
"""Sort table by given column number.
"""
self.sort_col = Ncol
self.sort_order = order
self.layoutAboutToBeChanged.emit()
# self.emit(SIGNAL("layoutAboutToBeChanged()"))
self.recipes = sorted(self.recipes, key=operator.itemgetter(Ncol + 1))
if order == QtCore.Qt.DescendingOrder:
self.recipes.reverse()
self.layoutChanged.emit()
# self.emit(SIGNAL("layoutChanged()"))
def clear(self):
self.layoutAboutToBeChanged.emit()
self.recipes = []
self.layoutChanged.emit()
#%% Main window class
class WndMain(QtWidgets.QMainWindow):
### Initialization
def __init__(self, *args, **kwargs):
super(WndMain, self).__init__(*args, **kwargs)
# Setup settings storage
self.settings = QtCore.QSettings("settings.ini",
QtCore.QSettings.IniFormat)
# Initialize UI (open main window)
self.initUI()
# Logging setup
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger.name = "<app_name>"
self.logger = logger
# Threading setup
self.queue = PriorityQueue()
self.thread = SavegameThread(self.queue, self)
# self.thread.finished.connect(self.on_thread_finished)
self.thread.newJob.connect(self.on_thread_newJob)
self.thread.jobStatus.connect(self.on_thread_jobStatus)
self.thread.generalData.connect(self.on_thread_generalData)
self.thread.inventoryItem.connect(self.on_thread_inventoryItem)
self.thread.recipeItem.connect(self.on_thread_recipeItem)
self.thread.start()
savegames = savegame.getSaveGames()
# Sort by last modified time first
savegames = sorted(savegames,
key=lambda f: osp.getmtime(f),
reverse=True)
if len(savegames):
self.comboSavegames.addItem(self.tr("Select savegame"))
# self.comboSavegames.setCurrentIndex(0)
for f in savegames:
self.comboSavegames.addItem(osp.basename(f), f)
# self.open_savegame(savegames[0])
# Setup Jinja templating
self.env = Environment(loader=FileSystemLoader(frozen('data')))
self.recipes = []
def initUI(self):
ui_file = frozen(osp.join('data', 'wndmain.ui'))
uic.loadUi(ui_file, self)
# Load window geometry and state
self.restoreGeometry(self.settings.value("geometry", ""))
self.restoreState(self.settings.value("windowState", ""))
# Status bar
statusBar = self.statusBar()
self.progressBar = QtWidgets.QProgressBar(statusBar)
statusBar.addPermanentWidget(self.progressBar, 0)
self.progressCancel = QtWidgets.QPushButton(statusBar)
self.progressCancel.setText(self.tr("Cancel"))
self.progressCancel.setEnabled(False)
statusBar.addPermanentWidget(self.progressCancel, 0)
# Table models
self.tableIngrModel = IngrTable([], self.tableIngr)
self.tableIngr.setModel(self.tableIngrModel)
self.tableIngr.selectionModel().selectionChanged.connect(
self.on_tableIngr_selectionChanged)
self.tableIngr.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.tableRecipeModel = RecipeTable([], self.tableRecipes)
self.tableRecipes.setModel(self.tableRecipeModel)
# self.tableRecipes.selectionModel().selectionChanged.connect(
# self.on_tableRecipes_selectionChanged)
self.tableRecipes.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.show()
### Function overrides:
def closeEvent(self, e):
# Write window geometry and state to config file
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("windowState", self.saveState())
e.accept()
### Qt slots
@QtCore.Slot()
def on_thread_finished(self):
QtWidgets.QMessageBox.information(self,
self.tr("Information"),
self.tr("Thread finished."))
@QtCore.Slot(int)
def on_comboSavegames_currentIndexChanged(self, index):
filename = self.comboSavegames.itemData(index)
if filename is not None:
self.open_savegame(filename)
### Core functionality
def open_savegame(self, filename):
self.queue.put((2, 'savegame', filename))
@QtCore.Slot(str, int)
def on_thread_newJob(self, job, maximum):
if job == 'load':
self.statusBar().showMessage(self.tr("Loading data..."))
self.progressBar.setMaximum(0)
self.progressBar.setMinimum(0)
self.progressCancel.setEnabled(False)
elif job == '':
self.progressBar.setMaximum(1)
self.progressCancel.setEnabled(False)
self.statusBar().clearMessage()
elif job == 'savegame':
self.tableIngr.model().clear()
self.progressComb.setValue(0)
self.statusBar().showMessage(self.tr("Loading savegame..."))
self.progressBar.setMaximum(maximum)
elif job == 'combs':
self.recipes = set()
self.progressComb.setValue(0)
self.statusBar().showMessage(self.tr("Combining ingredients..."))
self.progressComb.setMaximum(maximum)
self.progressBar.setMaximum(maximum)
@QtCore.Slot(int)
def on_thread_jobStatus(self, status):
self.progressBar.setValue(status)
@QtCore.Slot(str)
def on_thread_generalData(self, html):
self.textGeneral.setHtml(html)
@QtCore.Slot(int, int)
def on_thread_inventoryItem(self, count, formid):
model = self.tableIngr.model()
model.addItem(formid, count)
n = model.rowCount() + 1
if n > 3:
f = math.factorial
self.progressComb.setMaximum(f(n)//(f(3)*f(n-3)))
@QtCore.Slot(QtCore.QItemSelection, QtCore.QItemSelection)
def on_tableIngr_selectionChanged(self, selected, deselected):
formid = self.tableIngrModel.ingrs[selected.indexes()[0].row()][0]
ingr = skyrimdata.db['INGR'][formid]
template_filename = 'ingr_'+QtCore.QLocale().name()+'.html'
if not osp.exists(osp.join(frozen('data'), template_filename)):
template_filename = 'ingr_en_US.html'
template = self.env.get_template(template_filename)
val_weight = ingr.Value/ingr.Weight
count = self.tableIngrModel.ingrs[selected.indexes()[0].row()][2]
weight_count = ingr.Weight * count
html = template.render(ingr=ingr, val_weight=val_weight, count=count,
weight_count=weight_count)
self.textIngr.setHtml(html)
@QtCore.Slot()
def on_btnSearchComb_clicked(self):
self.recipes = []
self.tableRecipes.model().clear()
alch_skill = self.spinAlchSkill.value()
fortify_alch = self.spinAlchFortify.value()
perks = set([[0, 0xbe127, 0xc07ca, 0xc07cb, 0xc07cc, 0xc07cd]
[self.spinAlchemist.value()]])
if self.chkPhysician.isChecked():
perks.add(0x58215)
if self.chkBenefactor.isChecked():
perks.add(0x58216)
if self.chkPoisoner.isChecked():
perks.add(0x58217)
if self.chkPurity.isChecked():
perks.add(0x5821d)
self.queue.put((4, 'combs', [alch_skill, fortify_alch, perks,
self.tableIngr.model().ingrs]))
@QtCore.Slot(int, alchemy.Recipe)
def on_thread_recipeItem(self, i, recipe):
self.progressBar.setValue(i)
if recipe.valid:
model = self.tableRecipes.model()
model.addItem(recipe)
self.recipes.add(recipe)
self.progressComb.setValue(len(self.recipes))
#%% Main execution
# Runs when executing script directly (not importing).
if __name__ == '__main__':
### Properly register window icon
myappid = u'br.com.dapaixao.skyalchemy.1.0'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
### Grab existing QApplication
# Only one QApplication is allowed per process. This allows running inside
# Qt-based IDEs like Spyder.
existing = QtWidgets.qApp.instance()
if existing:
app = existing
else:
app = QtWidgets.QApplication(sys.argv)
### Parsing command-line arguments/options
parser = argparse.ArgumentParser()
parser.add_argument("--lang", nargs="?",
help="Language to run (override system language)")
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(-1)
lang = args.lang or QtCore.QLocale.system().name()
### Setup internationalization/localization (i18n/l10n)
translator = QtCore.QTranslator()
if translator.load(frozen(osp.join("data", "main_{}.qm".format(lang)))):
QtWidgets.qApp.installTranslator(translator)
QtCore.QLocale().setDefault(QtCore.QLocale(lang))
### Create main window and run
wnd = WndMain()
if existing:
self = wnd # Makes it easier to debug with Spyder's F9 inside a class
else:
sys.exit(app.exec_())
| StarcoderdataPython |
1973896 | from dataclasses import dataclass
from bindings.ows.nil_value_type import NilValueType
__NAMESPACE__ = "http://www.opengis.net/ows/2.0"
@dataclass
class NilValue(NilValueType):
class Meta:
name = "nilValue"
namespace = "http://www.opengis.net/ows/2.0"
| StarcoderdataPython |
9740801 | # List of current season shows (fetched 12/30/2018).
#
# If any show on this list returns blank, the scraper is probably
# broken, and we should be noisy about it.
#
# curl -s https://horriblesubs.info/current-season/ | \
# grep -Po '/shows/[^"]+' | \
# sed 's:$:/:' | \
# while read show_url; do \
# echo "('$show_url', $(curl -sL https://horriblesubs.info$show_url | grep -Po '(?<=hs_showid = )\d+')),"
# sleep 1
# done
KNOWN_SHOWS = [
('/shows/ace-attorney-s2/', 1173),
('/shows/akanesasu-shoujo/', 1155),
('/shows/anima-yell/', 1181),
('/shows/bakumatsu/', 1163),
('/shows/banana-fish/', 1112),
('/shows/beelzebub-jou-no-okinimesu-mama/', 1190),
('/shows/black-clover/', 959),
('/shows/bonobono/', 694),
('/shows/boruto-naruto-next-generations/', 869),
('/shows/cardfight-vanguard-2018/', 1101),
('/shows/chuukan-kanriroku-tonegawa/', 1109),
('/shows/conception/', 1186),
('/shows/dakaretai-otoko-1-i-ni-odosarete-imasu/', 1168),
('/shows/detective-conan/', 97),
('/shows/double-decker-doug-and-kirill/', 1147),
('/shows/fairy-tail-final-season/', 1179),
('/shows/gaikotsu-shotenin-honda-san/', 1183),
('/shows/gakuen-basara/', 1172),
('/shows/gegege-no-kitarou-2018/', 1058),
('/shows/goblin-slayer/', 1175),
('/shows/golden-kamuy/', 1090),
('/shows/gurazeni/', 1074),
('/shows/himote-house/', 1182),
('/shows/hinomaru-sumo/', 1166),
('/shows/irozuku-sekai-no-ashita-kara/', 1171),
('/shows/jingai-san-no-yome/', 1158),
('/shows/jojos-bizarre-adventure-golden-wind/', 1169),
('/shows/karakuri-circus/', 1188),
('/shows/kaze-ga-tsuyoku-fuiteiru/', 1159),
('/shows/ken-en-ken-aoki-kagayaki/', 1157),
('/shows/kishuku-gakkou-no-juliet/', 1170),
('/shows/kitsune-no-koe/', 1193),
('/shows/merc-storia-mukiryoku-shounen-to-bin-no-naka-no-shoujo/', 1187),
('/shows/one-piece/', 347),
('/shows/ore-ga-suki-nano-wa-imouto-dakedo-imouto-ja-nai/', 1185),
('/shows/radiant/', 1174),
('/shows/release-the-spyce/', 1178),
('/shows/rerided-tokigoe-no-derrida/', 1151),
('/shows/saint-seiya-saintia-shou/', 1195),
('/shows/seishun-buta-yarou-wa-bunny-girl-senpai-no-yume-wo-minai/', 1161),
('/shows/senran-kagura-shinovi-master-tokyo-youma-hen/', 1191),
('/shows/shounen-ashibe-go-go-goma-chan/', 667),
('/shows/sora-to-umi-no-aida/', 1160),
('/shows/souten-no-ken-re-genesis/', 1062),
('/shows/ssss-gridman/', 1177),
('/shows/sword-art-online-alicization/', 1176),
('/shows/tensei-shitara-slime-datta-ken/', 1152),
('/shows/the-idolmster-side-m-wake-atte-mini/', 1184),
('/shows/thunderbolt-fantasy-s2/', 1154),
('/shows/toaru-majutsu-no-index-iii/', 1189),
('/shows/tokyo-ghoul-re/', 1068),
('/shows/tonari-no-kyuuketsuki-san/', 1164),
('/shows/tsurune/', 1192),
('/shows/uchi-no-maid-ga-uzasugiru/', 1167),
('/shows/uchuu-senkan-tiramisu-s2/', 1156),
('/shows/ulysses-jeanne-darc-to-renkin-no-kishi/', 1180),
('/shows/yagate-kimi-ni-naru/', 1165),
('/shows/yu-gi-oh-vrains/', 901),
('/shows/zombieland-saga/', 1162),
]
| StarcoderdataPython |
3200482 | from collections import defaultdict
import mock
from searx.engines import soundcloud
from searx.testing import SearxTestCase
from searx.url_utils import quote_plus
class TestSoundcloudEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
params = soundcloud.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('soundcloud.com', params['url'])
def test_response(self):
self.assertRaises(AttributeError, soundcloud.response, None)
self.assertRaises(AttributeError, soundcloud.response, [])
self.assertRaises(AttributeError, soundcloud.response, '')
self.assertRaises(AttributeError, soundcloud.response, '[]')
response = mock.Mock(text='{}')
self.assertEqual(soundcloud.response(response), [])
response = mock.Mock(text='{"data": []}')
self.assertEqual(soundcloud.response(response), [])
json = """
{
"collection": [
{
"kind": "track",
"id": 159723640,
"created_at": "2014/07/22 00:51:21 +0000",
"user_id": 2976616,
"duration": 303780,
"commentable": true,
"state": "finished",
"original_content_size": 13236349,
"last_modified": "2015/01/31 15:14:50 +0000",
"sharing": "public",
"tag_list": "seekae flume",
"permalink": "seekae-test-recognise-flume-re-work",
"streamable": true,
"embeddable_by": "all",
"downloadable": true,
"purchase_url": "http://www.facebook.com/seekaemusic",
"label_id": null,
"purchase_title": "Seekae",
"genre": "freedownload",
"title": "This is the title",
"description": "This is the content",
"label_name": "Future Classic",
"release": "",
"track_type": "remix",
"key_signature": "",
"isrc": "",
"video_url": null,
"bpm": null,
"release_year": 2014,
"release_month": 7,
"release_day": 22,
"original_format": "mp3",
"license": "all-rights-reserved",
"uri": "https://api.soundcloud.com/tracks/159723640",
"user": {
"id": 2976616,
"kind": "user",
"permalink": "flume",
"username": "Flume",
"last_modified": "2014/11/24 19:21:29 +0000",
"uri": "https://api.soundcloud.com/users/2976616",
"permalink_url": "http://soundcloud.com/flume",
"avatar_url": "https://i1.sndcdn.com/avatars-000044475439-4zi7ii-large.jpg"
},
"permalink_url": "http://soundcloud.com/this.is.the.url",
"artwork_url": "https://i1.sndcdn.com/artworks-000085857162-xdxy5c-large.jpg",
"waveform_url": "https://w1.sndcdn.com/DWrL1lAN8BkP_m.png",
"stream_url": "https://api.soundcloud.com/tracks/159723640/stream",
"download_url": "https://api.soundcloud.com/tracks/159723640/download",
"playback_count": 2190687,
"download_count": 54856,
"favoritings_count": 49061,
"comment_count": 826,
"likes_count": 49061,
"reposts_count": 15910,
"attachments_uri": "https://api.soundcloud.com/tracks/159723640/attachments",
"policy": "ALLOW"
}
],
"total_results": 375750,
"next_href": "https://api.soundcloud.com/search?&q=test",
"tx_id": ""
}
"""
response = mock.Mock(text=json)
results = soundcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'This is the title')
self.assertEqual(results[0]['url'], 'http://soundcloud.com/this.is.the.url')
self.assertEqual(results[0]['content'], 'This is the content')
self.assertIn(quote_plus('https://api.soundcloud.com/tracks/159723640'), results[0]['embedded'])
json = """
{
"collection": [
{
"kind": "user",
"id": 159723640,
"created_at": "2014/07/22 00:51:21 +0000",
"user_id": 2976616,
"duration": 303780,
"commentable": true,
"state": "finished",
"original_content_size": 13236349,
"last_modified": "2015/01/31 15:14:50 +0000",
"sharing": "public",
"tag_list": "seekae flume",
"permalink": "seekae-test-recognise-flume-re-work",
"streamable": true,
"embeddable_by": "all",
"downloadable": true,
"purchase_url": "http://www.facebook.com/seekaemusic",
"label_id": null,
"purchase_title": "Seekae",
"genre": "freedownload",
"title": "This is the title",
"description": "This is the content",
"label_name": "Future Classic",
"release": "",
"track_type": "remix",
"key_signature": "",
"isrc": "",
"video_url": null,
"bpm": null,
"release_year": 2014,
"release_month": 7,
"release_day": 22,
"original_format": "mp3",
"license": "all-rights-reserved",
"uri": "https://api.soundcloud.com/tracks/159723640",
"user": {
"id": 2976616,
"kind": "user",
"permalink": "flume",
"username": "Flume",
"last_modified": "2014/11/24 19:21:29 +0000",
"uri": "https://api.soundcloud.com/users/2976616",
"permalink_url": "http://soundcloud.com/flume",
"avatar_url": "https://i1.sndcdn.com/avatars-000044475439-4zi7ii-large.jpg"
},
"permalink_url": "http://soundcloud.com/this.is.the.url",
"artwork_url": "https://i1.sndcdn.com/artworks-000085857162-xdxy5c-large.jpg",
"waveform_url": "https://w1.sndcdn.com/DWrL1lAN8BkP_m.png",
"stream_url": "https://api.soundcloud.com/tracks/159723640/stream",
"download_url": "https://api.soundcloud.com/tracks/159723640/download",
"playback_count": 2190687,
"download_count": 54856,
"favoritings_count": 49061,
"comment_count": 826,
"likes_count": 49061,
"reposts_count": 15910,
"attachments_uri": "https://api.soundcloud.com/tracks/159723640/attachments",
"policy": "ALLOW"
}
],
"total_results": 375750,
"next_href": "https://api.soundcloud.com/search?&q=test",
"tx_id": ""
}
"""
response = mock.Mock(text=json)
results = soundcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
json = """
{
"collection": [],
"total_results": 375750,
"next_href": "https://api.soundcloud.com/search?&q=test",
"tx_id": ""
}
"""
response = mock.Mock(text=json)
results = soundcloud.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
| StarcoderdataPython |
5167570 | <gh_stars>0
"""
setup_wizard template tags
==========================
Tags for including setup_wizard app javascript assets ina template. To use:
.. code-block:: html
{% load setup_wizard_tags %}
<!-- Render inclusion tag for frontend JS elements -->
{% setup_wizard_assets %}
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django import template
from .. import hooks
from kolibri.core.webpack.utils import webpack_asset_render
register = template.Library()
@register.simple_tag()
def setup_wizard_assets():
"""
Using in a template will inject script tags that include the javascript assets defined
by any concrete hook that subclasses ManagementSyncHook.
:return: HTML of script tags to insert into setup_wizard/setup_wizard.html
"""
return webpack_asset_render(hooks.SetupWizardSyncHook, is_async=False)
@register.simple_tag()
def setup_wizard_async_assets():
"""
Using in a template will inject script tags that include the javascript assets defined
by any concrete hook that subclasses ManagementSyncHook.
:return: HTML of script tags to insert into setup_wizard/setup_wizard.html
"""
return webpack_asset_render(hooks.SetupWizardAsyncHook, is_async=True)
| StarcoderdataPython |
3484692 | <reponame>etomteknoloji/charm-trove
# Copyright 2016 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import json
import subprocess
import time
import troveclient.client as trove_client
import charmhelpers.contrib.openstack.amulet.deployment as amulet_deployment
import charmhelpers.contrib.openstack.amulet.utils as os_amulet_utils
# Use DEBUG to turn on debug logging
u = os_amulet_utils.OpenStackAmuletUtils(os_amulet_utils.DEBUG)
class TroveBasicDeployment(amulet_deployment.OpenStackAmuletDeployment):
"""Amulet tests on a basic trove deployment."""
def __init__(self, series, openstack=None, source=None, stable=False):
"""Deploy the entire test environment."""
super(TroveBasicDeployment, self).__init__(series, openstack,
source, stable)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
u.log.info('Waiting on extended status checks...')
exclude_services = ['mysql', 'mongodb']
self._auto_wait_for_status(exclude_services=exclude_services)
self._initialize_tests()
def _add_services(self):
"""Add services
Add the services that we're testing, where trove is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'trove'}
other_services = [{'name': 'mysql'},
{'name': 'rabbitmq-server'},
{'name': 'keystone'},]
super(TroveBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'trove:shared-db': 'mysql:shared-db',
'trove:amqp': 'rabbitmq-server:amqp',
'trove:identity-service': 'keystone:identity-service',
'keystone:shared-db': 'mysql:shared-db',
}
super(TroveBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {'admin-password': '<PASSWORD>',
'admin-token': '<PASSWORD>'}
configs = {'keystone': keystone_config}
super(TroveBasicDeployment, self)._configure_services(configs)
def _get_token(self):
return self.keystone.service_catalog.catalog['token']['id']
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.trove_sentry = self.d.sentry['trove'][0]
self.mysql_sentry = self.d.sentry['mysql'][0]
self.keystone_sentry = self.d.sentry['keystone'][0]
self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
self.trove_svcs = ['trove-api', 'trove-taskmanager', 'trove-conductor',]
# Authenticate admin with keystone endpoint
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='<PASSWORD>',
tenant='admin')
# Authenticate admin with trove endpoint
trove_ep = self.keystone.service_catalog.url_for(
service_type='database',
interface='publicURL')
keystone_ep = self.keystone.service_catalog.url_for(
service_type='identity',
interface='publicURL')
self.trove = trove_client.Client(
version='1.0',
auth_url=keystone_ep,
username="admin",
password="<PASSWORD>",
tenant_name="admin",
endpoint=trove_ep)
def check_and_wait(self, check_command, interval=2, max_wait=200,
desc=None):
waited = 0
while not check_command() or waited > max_wait:
if desc:
u.log.debug(desc)
time.sleep(interval)
waited = waited + interval
if waited > max_wait:
raise Exception('cmd failed {}'.format(check_command))
def _run_action(self, unit_id, action, *args):
command = ["juju", "action", "do", "--format=json", unit_id, action]
command.extend(args)
print("Running command: %s\n" % " ".join(command))
output = subprocess.check_output(command)
output_json = output.decode(encoding="UTF-8")
data = json.loads(output_json)
action_id = data[u'Action queued with id']
return action_id
def _wait_on_action(self, action_id):
command = ["juju", "action", "fetch", "--format=json", action_id]
while True:
try:
output = subprocess.check_output(command)
except Exception as e:
print(e)
return False
output_json = output.decode(encoding="UTF-8")
data = json.loads(output_json)
if data[u"status"] == "completed":
return True
elif data[u"status"] == "failed":
return False
time.sleep(2)
def test_services(self):
"""Verify the expected services are running on the corresponding
service units."""
u.log.debug('Checking system services on units...')
service_names = {
self.trove_sentry: self.trove_svcs,
}
ret = u.validate_services_by_name(service_names)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
u.log.debug('OK')
def test_service_catalog(self):
"""Verify that the service catalog endpoint data is valid."""
u.log.debug('Checking keystone service catalog data...')
endpoint_check = {
'adminURL': u.valid_url,
'id': u.not_null,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url
}
expected = {
'database': [endpoint_check],
}
actual = self.keystone.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
u.log.debug('OK')
def test_trove_api_endpoint(self):
"""Verify the trove api endpoint data."""
u.log.debug('Checking trove api endpoint data...')
endpoints = self.keystone.endpoints.list()
u.log.debug(endpoints)
admin_port = internal_port = public_port = '8779'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
message = 'Trove endpoint: {}'.format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
u.log.debug('OK')
def test_trove_identity_relation(self):
"""Verify the trove to keystone identity-service relation data"""
u.log.debug('Checking trove to keystone identity-service '
'relation data...')
unit = self.trove_sentry
relation = ['identity-service', 'keystone:identity-service']
trove_ip = unit.relation(
'identity-service',
'keystone:identity-service')['private-address']
trove_endpoint = "http://{}:8779/v1.0/%(tenant_id)s".format(trove_ip)
expected = {
'admin_url': trove_endpoint,
'internal_url': trove_endpoint,
'private-address': trove_ip,
'public_url': trove_endpoint,
'region': 'RegionOne',
'service': 'trove',
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('trove identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
u.log.debug('OK')
def test_keystone_trove_identity_relation(self):
"""Verify the keystone to trove identity-service relation data"""
u.log.debug('Checking keystone:trove identity relation data...')
unit = self.keystone_sentry
relation = ['identity-service', 'trove:identity-service']
id_relation = unit.relation('identity-service',
'trove:identity-service')
id_ip = id_relation['private-address']
expected = {
'admin_token': '<PASSWORD>',
'auth_host': id_ip,
'auth_port': "35357",
'auth_protocol': 'http',
'private-address': id_ip,
'service_host': id_ip,
'service_password': u.not_null,
'service_port': "5000",
'service_protocol': 'http',
'service_tenant': 'services',
'service_tenant_id': u.not_null,
'service_username': 'trove',
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('keystone identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
u.log.debug('OK')
def test_trove_amqp_relation(self):
"""Verify the trove to rabbitmq-server amqp relation data"""
u.log.debug('Checking trove:rabbitmq amqp relation data...')
unit = self.trove_sentry
relation = ['amqp', 'rabbitmq-server:amqp']
expected = {
'username': 'trove',
'private-address': u.valid_ip,
'vhost': 'openstack'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('trove amqp', ret)
amulet.raise_status(amulet.FAIL, msg=message)
u.log.debug('OK')
def test_amqp_trove_relation(self):
"""Verify the rabbitmq-server to trove amqp relation data"""
u.log.debug('Checking rabbitmq:trove amqp relation data...')
unit = self.rabbitmq_sentry
relation = ['amqp', 'trove:amqp']
expected = {
'hostname': u.valid_ip,
'private-address': u.valid_ip,
'password': <PASSWORD>,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('rabbitmq amqp', ret)
amulet.raise_status(amulet.FAIL, msg=message)
u.log.debug('OK')
def test_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed.
"""
sentry = self.trove_sentry
juju_service = 'trove'
# Expected default and alternate values
set_default = {'debug': 'False'}
set_alternate = {'debug': 'True'}
# Services which are expected to restart upon config change,
# and corresponding config files affected by the change
conf_file = '/etc/trove/trove.conf'
services = {svc: conf_file for svc in self.trove_svcs}
# Make config change, check for service restarts
u.log.debug('Making config change on {}...'.format(juju_service))
mtime = u.get_sentry_time(sentry)
self.d.configure(juju_service, set_alternate)
sleep_time = 50
for s, conf_file in services.iteritems():
u.log.debug("Checking that service restarted: {}".format(s))
if not u.validate_service_config_changed(sentry, mtime, s,
conf_file,
retry_count=4,
retry_sleep_time=20,
sleep_time=sleep_time):
self.d.configure(juju_service, set_default)
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
sleep_time = 0
self.d.configure(juju_service, set_default)
u.log.debug('OK')
| StarcoderdataPython |
376700 | '''
pi433.util
(c) <NAME>, 2017 [MIT License, see LICENSE]
Utility / helper functions
'''
import uuid
from gevent import socket
def byteify(str_input):
''' Recursively encode any `unicode` strings in `str_input` to utf-8
'''
if isinstance(str_input, dict):
return {byteify(key): byteify(value)
for key, value in str_input.iteritems()}
elif isinstance(str_input, list):
return [byteify(element) for element in str_input]
elif isinstance(str_input, unicode):
return str_input.encode('utf-8')
else:
return str_input
def makeSerial(dev_name):
''' Generate a deterministic UUID serial number from `dev_name`
Args:
dev_name (str): Unique device name
Derived from https://github.com/n8henrie/fauxmo
'''
return str(uuid.uuid3(uuid.NAMESPACE_X500, dev_name))
def getLocalIP():
''' Get the local IP address as a string
Derived from https://github.com/n8henrie/fauxmo
'''
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
# Workaround for Linux returning localhost
# See: SO question #166506 by @UnkwnTech
if ip_address in ['127.0.1.1', '127.0.0.1', 'localhost']:
tempsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tempsock.connect(('8.8.8.8', 0))
ip_address = tempsock.getsockname()[0]
tempsock.close()
return ip_address | StarcoderdataPython |
3294646 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ModelCheckpoint callback defination."""
import logging
import vega
from vega.common import Config
from vega.common import ClassFactory, ClassType
from vega.networks.model_config import ModelConfig
from vega.model_zoo import ModelZoo
from .callback import Callback
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class ModelBuilder(Callback):
"""Callback that saves the evaluated Performance."""
def __init__(self):
"""Initialize ModelCheckpoint callback."""
super(ModelBuilder, self).__init__()
self.priority = 200
def init_trainer(self, logs=None):
"""Set trainer object for current callback."""
model = self.trainer.model
if not model:
model = self._init_model()
if hasattr(model, "desc"):
self.trainer.model_desc = model.desc
self.trainer.model = self._set_device(model)
def _init_model(self):
"""Load model desc from save path and parse to model."""
config = Config(ModelConfig().to_dict())
if self.trainer.model_desc:
config.model_desc = self.trainer.model_desc
if not config.model_desc:
raise Exception("Failed to Init model, can not get model description.")
if self.trainer.load_weights_file:
config.pretrained_model_file = self.trainer.config.kwargs.get(
"pretrained_model_file") or config.pretrained_model_file
return ModelZoo.get_model(**config)
def _set_device(self, model):
if vega.is_torch_backend():
if vega.is_gpu_device():
model = model.cuda()
elif vega.is_npu_device():
model = model.to(vega.get_devices())
return model
| StarcoderdataPython |
9744709 | <filename>src/bgraph/utils.py<gh_stars>1-10
import functools
import logging
import pathlib
from bgraph.types import Dict, List, Any, Generator, Tuple, Callable, Union
def recurse(mapping: Dict[Any, Any]) -> Generator[Tuple[Any, Any], None, None]:
"""Recurse through a mapping and yield every key value pairs.
:param mapping: A mapping to unroll
"""
for key, value in mapping.items():
if type(value) is dict:
yield from recurse(value)
else:
yield key, value
def create_logger(logger_name: str) -> logging.Logger:
"""Set up logging using the `logger_name`"""
logger = logging.getLogger(logger_name)
# Create console handler if not already present
if not logger.handlers:
ch = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def no_except(f: Callable) -> Callable:
"""Prevent failures when running f."""
logger: logging.Logger = create_logger(__name__)
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
logger.exception(e)
return wrapper
def clean_mirror_path(mirror: str) -> Union[str, pathlib.Path]:
"""Convert the user input from command line to an acceptable format for the app.
Note: If the user provided an URL, it will remove any trailing '/'.
:param mirror: User input
:return: Either a Path object or a string
"""
# First, detect if the string is an url
mirror_path: Union[str, pathlib.Path] = (
pathlib.Path(mirror) if "http" not in mirror else mirror
)
# Remove trailing '/' if needed
if isinstance(mirror_path, str) and mirror_path.endswith("/"):
mirror_path = mirror_path[:-1]
return mirror_path
| StarcoderdataPython |
1972109 | <reponame>sparkslabs/pyxie-bob
#
# Copyright 2016 British Broadcasting Corporation and Contributors(1)
#
# (1) Contributors are listed in the AUTHORS file (please extend AUTHORS,
# not this header)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pyxiebob.settings as settings
from django.contrib.auth.models import User
# Read in the word list
WORD_LIST = [
line.strip().lower()
for line in open(settings.WORD_LIST_FILE,'r')
if len(line)<11
]
# Return a single random word from the list
def random_word():
return random.choice(WORD_LIST)
# Return a number of random words from the list, all joined with _s.
def random_phrase(word_count):
words = []
for x in range(0, word_count):
words.append(random_word())
return '_'.join(words)
# Returns a random unused username
def random_username():
while True:
username = random_phrase(settings.WORDS_IN_USERNAMES)
if len(username) < 16:
user_count = User.objects.filter(username=username).count()
if user_count==0:
return username
# Returns a random unused password
def random_password():
return random_phrase(settings.WORDS_IN_PASSWORDS)
# Returns a random edit phrase
def random_edit_phrase():
return random_phrase(settings.WORDS_IN_EDIT_PHRASES)
| StarcoderdataPython |
9652238 | <reponame>roadt/raspberryjammod-minetest<gh_stars>10-100
#
# Code under the MIT license by <NAME>
#
from mc import *
import text
import datetime
import time
import sys
import fonts
import ast
foreground = SEA_LANTERN # this needs Minecraft 1.8
background = AIR
def parseBlock(s):
try:
return ast.literal_eval(s)
except:
return globals()[s.upper()]
try:
foreground = parseBlock(sys.argv[1])
except:
pass
try:
background = parseBlock(sys.argv[2])
except:
pass
mc = Minecraft()
pos = mc.player.getTilePos()
forward = text.angleToTextDirection(mc.player.getRotation())
prevTime = ""
while True:
curTime = datetime.datetime.now().strftime("%I:%M:%S %p")
if curTime[0]=='0':
curTime = ' ' + curTime[1:]
if prevTime != curTime:
for i in range(len(curTime)):
if i >= len(prevTime) or prevTime[i] != curTime[i]:
text.drawText(mc, fonts.FONTS['8x8'], pos + forward * (8*i), forward, Vec3(0,1,0), curTime[i:], foreground, background)
break
prevTime = curTime
time.sleep(0.1)
| StarcoderdataPython |
5087564 | __docformat__ = "restructuredtext en"
import numpy
from theano.tests import unittest_tools as utt
from theano.tensor.raw_random import *
from theano.tensor import (raw_random, ivector, dvector, iscalar, dcol,
dtensor3)
from theano import tensor
from theano import compile, config, gof
class T_random_function(utt.InferShapeTester):
def setUp(self):
utt.seed_rng()
def test_basic_usage(self):
rf = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector)
assert not rf.inplace
assert getattr(rf, 'destroy_map', {}) == {}
rng_R = random_state_type()
# If calling RandomFunction directly, all args have to be specified,
# because shape will have to be moved to the end
post_r, out = rf(rng_R, (4,), 0., 1.)
assert out.type == tensor.dvector
f = compile.function([rng_R], out)
rng_state0 = numpy.random.RandomState(utt.fetch_seed())
f_0 = f(rng_state0)
f_1 = f(rng_state0)
assert numpy.all(f_0 == f_1)
def test_inplace_norun(self):
rf = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector,
inplace=True)
assert rf.inplace
assert getattr(rf, 'destroy_map', {}) != {}
def test_args(self):
"""Test that arguments to RandomFunction are honored"""
rf2 = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector)
rf4 = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector,
inplace=True)
rng_R = random_state_type()
# use make_node to override some of the self.args
post_r2, out2 = rf2(rng_R, (4,), -2, 2) # NOT INPLACE
post_r4, out4 = rf4(rng_R, (4,), -4, 4) # INPLACE
post_r2_4, out2_4 = rf2(rng_R, (4, ), -4.0, 2) # NOT INPLACE
post_r2_4_4, out2_4_4 = rf2(rng_R, (4, ), -4.0, 4.0) # NOT INPLACE
# configure out4 to be computed inplace
# The update expression means that the random state rng_R will
# be maintained by post_r4
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r4,
mutable=True)],
[out2, out4, out2_4, out2_4_4],
accept_inplace=True)
f2, f4, f2_4, f2_4_4 = f()
f2b, f4b, f2_4b, f2_4_4b = f()
#print f2
#print f4
#print f2_4
#print f2_4_4
#print f2b
#print f4b
#print f2_4b
#print f2_4_4b
# setting bounds is same as multiplying by 2
assert numpy.allclose(f2 * 2, f4), (f2, f4)
# retrieving from non-inplace generator
# is same as inplace one for first call
assert numpy.allclose(f2_4_4, f4), (f2_4_4, f4)
# f4 changes from call to call, that the update has worked
assert not numpy.allclose(f4, f4b), (f4, f4b)
def test_inplace_optimization(self):
"""Test that FAST_RUN includes the random_make_inplace optimization"""
#inplace = False
rf2 = RandomFunction(numpy.random.RandomState.uniform, tensor.dvector)
rng_R = random_state_type()
# If calling RandomFunction directly, all args have to be specified,
# because shape will have to be moved to the end
post_r2, out2 = rf2(rng_R, (4,), 0., 1.)
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r2,
mutable=True)],
out2,
mode='FAST_RUN') # DEBUG_MODE can't pass the id-based
# test below
# test that the RandomState object stays the same from function call to
# function call, but that the values returned change from call to call.
id0 = id(f[rng_R])
val0 = f()
assert id0 == id(f[rng_R])
val1 = f()
assert id0 == id(f[rng_R])
assert not numpy.allclose(val0, val1)
def test_no_inplace(self):
"""Test that when not running inplace, the RandomState is
not updated"""
rf = RandomFunction('uniform', tensor.dvector)
rng_R = random_state_type()
post_r, out = rf(rng_R, (3,), 0., 1.)
f = compile.function([rng_R], [post_r, out])
rng = numpy.random.RandomState(utt.fetch_seed())
rng0, val0 = f(rng)
rng_ = numpy.random.RandomState(utt.fetch_seed())
# rng should still be in a fresh state
self.assertTrue(rng_R.type.values_eq(rng, rng_))
# rng0 should be in an updated state
self.assertFalse(rng_R.type.values_eq(rng, rng0))
f2 = compile.function(
[compile.In(rng_R,
value=rng,
update=post_r,
mutable=False)],
[post_r, out])
rng2, val2 = f2()
# rng should be in a fresh state
self.assertTrue(rng_R.type.values_eq(rng, rng_))
# rng2 should be in an updated state
self.assertFalse(rng_R.type.values_eq(rng, rng2))
# The updated state should be the same for both functions
self.assertTrue(rng_R.type.values_eq(rng2, rng0))
rng3, val3 = f2()
# rng2 should not have changed
self.assertTrue(rng_R.type.values_eq(rng2, rng0))
# rng3 should be an updated again version of rng2
self.assertFalse(rng_R.type.values_eq(rng3, rng2))
self.assertFalse(rng_R.type.values_eq(rng3, rng))
def test_random_function_ndim(self):
"""Test that random_function helper function accepts argument ndim"""
rng_R = random_state_type()
# ndim is an optional argument indicating the length of the 'shape'
# ndim not specified, OK
post_out4, out4 = uniform(rng_R, (4,))
# ndim specified, consistent with shape, OK
post_out1_4, out1_4 = uniform(rng_R, (4, ), ndim=1)
post_out2_4_4, out2_4_4 = uniform(rng_R, (4, 4), ndim=2)
# ndim specified, but not compatible with shape
self.assertRaises(ValueError, uniform, rng_R, (4,), ndim=2)
f_ok = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_out2_4_4,
mutable=True)],
[out4, out1_4, out2_4_4],
accept_inplace=True)
# The correct cases should execute properly
o4, o1_4, o2_4_4 = f_ok()
# Check the sanity of the answers
self.assertTrue(numpy.allclose(o4, o1_4))
self.assertTrue(numpy.allclose(o4, o2_4_4[0]))
def test_random_function_noshape_args(self):
'''Test if random_function helper works with args but without shape'''
rng_R = random_state_type()
# No shape, default args -> OK
post_out, out = uniform(rng_R, size=None, ndim=2)
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_out,
mutable=True)],
[out],
accept_inplace=True)
o, = f()
# No shape, args that have to be broadcasted -> OK
low = tensor.TensorType(dtype='float64',
broadcastable=(False, True, True))()
high = tensor.TensorType(dtype='float64',
broadcastable=(True, True, True, False))()
post_out2, out2 = uniform(rng_R, size=None, ndim=2, low=low, high=high)
self.assertEqual(out2.ndim, 4)
self.assertEqual(out2.broadcastable, (True, False, True, False))
g = compile.function(
[low,
high,
compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_out2,
mutable=True)],
[out2],
accept_inplace=True)
low_v = [[[3]], [[4]], [[-5]]]
high_v = [[[[5, 8]]]]
o2, = g(low_v, high_v)
self.assertEqual(o2.shape, (1, 3, 1, 2))
def test_random_function_noshape_noargs(self):
'''Test if random_function helper works without args or shape'''
rng_R = random_state_type()
# No shape, no args -> TypeError
self.assertRaises(TypeError, permutation, rng_R, size=None, ndim=2)
def test_random_function_ndim_added(self):
"""Test that random_function helper function accepts ndim_added as
keyword argument"""
# If using numpy's uniform distribution, ndim_added should be 0,
# because the shape provided as argument is the output shape.
# Specifying a different ndim_added will change the Op's output ndim,
# so numpy.uniform will produce a result of incorrect shape,
# and a ValueError should be raised.
def ndim_added_deco(ndim_added):
def randomfunction(random_state, size=(), low=0.0, high=0.0,
ndim=None):
ndim, size, bcast = raw_random._infer_ndim_bcast(ndim, size)
if ndim_added < 0:
bcast = bcast[:ndim_added]
else:
bcast = bcast + ((False,) * ndim_added)
assert len(bcast) == ndim + ndim_added
op = RandomFunction('uniform',
tensor.TensorType(dtype='float64',
broadcastable=bcast),
ndim_added=ndim_added)
return op(random_state, size, low, high)
return randomfunction
uni_1 = ndim_added_deco(1)
uni_0 = ndim_added_deco(0)
uni_m1 = ndim_added_deco(-1)
rng_R = random_state_type()
p_uni11, uni11 = uni_1(rng_R, size=(4,))
p_uni12, uni12 = uni_1(rng_R, size=(3, 4))
p_uni01, uni01 = uni_0(rng_R, size=(4,))
p_uni02, uni02 = uni_0(rng_R, size=(3, 4))
p_unim11, unim11 = uni_m1(rng_R, size=(4,))
p_unim12, unim12 = uni_m1(rng_R, size=(3, 4))
self.assertEqual(uni11.ndim, 2)
self.assertEqual(uni12.ndim, 3)
self.assertEqual(uni01.ndim, 1)
self.assertEqual(uni02.ndim, 2)
self.assertEqual(unim11.ndim, 0)
self.assertEqual(unim12.ndim, 1)
f11 = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=p_uni11, mutable=True)],
[uni11], accept_inplace=True)
f12 = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=p_uni12, mutable=True)],
[uni12], accept_inplace=True)
fm11 = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=p_unim11, mutable=True)],
[unim11], accept_inplace=True)
fm12 = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=p_unim12, mutable=True)],
[unim12], accept_inplace=True)
f0 = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=p_uni02, mutable=True)],
[uni01, uni02], accept_inplace=True)
self.assertRaises(ValueError, f11)
self.assertRaises(ValueError, f12)
self.assertRaises(ValueError, fm11)
self.assertRaises(ValueError, fm12)
u01, u02 = f0()
print u01
print u02
self.assertTrue(numpy.allclose(u01, u02[0]))
def test_uniform(self):
"""Test that raw_random.uniform generates the same results as numpy."""
# Check over two calls to see if the random state is correctly updated.
rng_R = random_state_type()
# Use non-default parameters
post_r, out = uniform(rng_R, (4,), -2.0, 2.0)
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r, mutable=True)],
[out], accept_inplace=True)
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
val0 = f()
val1 = f()
numpy_val0 = numpy_rng.uniform(-2.0, 2.0, size=(4,))
numpy_val1 = numpy_rng.uniform(-2.0, 2.0, size=(4,))
print val0
print numpy_val0
print val1
print numpy_val1
self.assertTrue(numpy.allclose(val0, numpy_val0))
self.assertTrue(numpy.allclose(val1, numpy_val1))
def test_binomial(self):
"""Test that raw_random.binomial generates the same results
as numpy."""
# Check over two calls to see if the random state is correctly updated.
rng_R = random_state_type()
# Use non-default parameters, and larger dimensions because of
# the integer nature of the result
post_r, bin = binomial(rng_R, (7, 12), 5, 0.8)
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r, mutable=True)],
[bin], accept_inplace=True)
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
val0 = f()
val1 = f()
numpy_val0 = numpy_rng.binomial(5, 0.8, size=(7, 12))
numpy_val1 = numpy_rng.binomial(5, 0.8, size=(7, 12))
print val0
print numpy_val0
print val1
print numpy_val1
self.assertTrue(numpy.all(val0 == numpy_val0))
self.assertTrue(numpy.all(val1 == numpy_val1))
def test_normal(self):
"""Test that raw_random.normal generates the same results as numpy."""
# Check over two calls to see if the random state is correctly updated.
rng_R = random_state_type()
# Use non-default parameters
post_r, out = normal(rng_R, (2, 3), 4.0, 2.0)
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r, mutable=True)],
[out], accept_inplace=True)
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
val0 = f()
val1 = f()
numpy_val0 = numpy_rng.normal(4.0, 2.0, size=(2, 3))
numpy_val1 = numpy_rng.normal(4.0, 2.0, size=(2, 3))
print val0
print numpy_val0
print val1
print numpy_val1
self.assertTrue(numpy.allclose(val0, numpy_val0))
self.assertTrue(numpy.allclose(val1, numpy_val1))
def test_random_integers(self):
"""Test that raw_random.random_integers generates the same
results as numpy."""
# Check over two calls to see if the random state is correctly updated.
rng_R = random_state_type()
# Use non-default parameters, and larger dimensions because of
# the integer nature of the result
post_r, out = random_integers(rng_R, (11, 8), -3, 16)
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r, mutable=True)],
[out], accept_inplace=True)
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
val0 = f()
val1 = f()
numpy_val0 = numpy_rng.random_integers(-3, 16, size=(11, 8))
numpy_val1 = numpy_rng.random_integers(-3, 16, size=(11, 8))
print val0
print numpy_val0
print val1
print numpy_val1
self.assertTrue(numpy.allclose(val0, numpy_val0))
self.assertTrue(numpy.allclose(val1, numpy_val1))
def test_permutation_helper(self):
"""Test that raw_random.permutation_helper generates the same
results as numpy,
and that the 'ndim_added' keyword behaves correctly."""
# permutation_helper needs "ndim_added=1", because its output
# is one dimension more than its "shape" argument (and there's
# no way to determine that automatically).
# Check the working case, over two calls to see if the random
# state is correctly updated.
rf = RandomFunction(permutation_helper, tensor.imatrix, 8,
ndim_added=1)
rng_R = random_state_type()
post_r, out = rf(rng_R, (7,), 8)
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r, mutable=True)],
[out], accept_inplace=True)
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
val0 = f()
val1 = f()
# numpy_rng.permutation outputs one vector at a time,
# so we call it iteratively to generate all the samples.
numpy_val0 = numpy.asarray([numpy_rng.permutation(8)
for i in range(7)])
numpy_val1 = numpy.asarray([numpy_rng.permutation(8)
for i in range(7)])
print val0
print numpy_val0
print val1
print numpy_val1
self.assertTrue(numpy.all(val0 == numpy_val0))
self.assertTrue(numpy.all(val1 == numpy_val1))
# This call lacks "ndim_added=1", so ndim_added defaults to 0.
# A ValueError should be raised.
rf0 = RandomFunction(permutation_helper, tensor.imatrix, 8)
post_r0, out0 = rf0(rng_R, (7,), 8)
f0 = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r0, mutable=True)],
[out0], accept_inplace=True)
self.assertRaises(ValueError, f0)
# Here, ndim_added is 2 instead of 1. A ValueError should be raised.
rf2 = RandomFunction(permutation_helper, tensor.imatrix, 8,
ndim_added=2)
post_r2, out2 = rf2(rng_R, (7,), 8)
f2 = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r2, mutable=True)],
[out2], accept_inplace=True)
self.assertRaises(ValueError, f2)
def test_choice(self):
"""Test that raw_random.choice generates the same
results as numpy."""
# numpy.random.choice is only available for numpy versions >= 1.7
major, minor, _ = numpy.version.short_version.split('.')
if (int(major), int(minor)) < (1, 7):
raise utt.SkipTest('choice requires at NumPy version >= 1.7 '
'(%s)' % numpy.__version__)
# Check over two calls to see if the random state is correctly updated.
rng_R = random_state_type()
# Use non-default parameters, and larger dimensions because of
# the integer nature of the result
post_r, out = choice(rng_R, (11, 8), 10, 1, 0)
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r, mutable=True)],
[out], accept_inplace=True)
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
val0 = f()
val1 = f()
numpy_val0 = numpy_rng.choice(10, (11, 8), True, None)
numpy_val1 = numpy_rng.choice(10, (11, 8), True, None)
print val0
print numpy_val0
print val1
print numpy_val1
self.assertTrue(numpy.allclose(val0, numpy_val0))
self.assertTrue(numpy.allclose(val1, numpy_val1))
def test_permutation(self):
"""Test that raw_random.permutation generates the same
results as numpy."""
rng_R = random_state_type()
post_r, out = permutation(rng_R, size=(9,), n=6)
print 'OUT NDIM', out.ndim
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r, mutable=True)],
[out], accept_inplace=True)
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
# Check over two calls to see if the random state is correctly updated.
# numpy_rng.permutation outputs one vector at a time,
# so we call it iteratively to generate all the samples.
val0 = f()
val1 = f()
numpy_val0 = numpy.asarray([numpy_rng.permutation(6)
for i in range(9)])
numpy_val1 = numpy.asarray([numpy_rng.permutation(6)
for i in range(9)])
print val0
print numpy_val0
print val1
print numpy_val1
self.assertTrue(numpy.all(val0 == numpy_val0))
self.assertTrue(numpy.all(val1 == numpy_val1))
def test_multinomial(self):
"""Test that raw_random.multinomial generates the same
results as numpy."""
# Check over two calls to see if the random state is correctly updated.
rng_R = random_state_type()
post_r, out = multinomial(rng_R, (7, 3), 6, [0.2] * 5)
f = compile.function(
[compile.In(rng_R,
value=numpy.random.RandomState(utt.fetch_seed()),
update=post_r, mutable=True)],
[out], accept_inplace=True)
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
val0, = f()
val1, = f()
numpy_val0 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3))
numpy_val1 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3))
print val0
print numpy_val0
print val1
print numpy_val1
self.assertTrue(numpy.all(val0 == numpy_val0))
self.assertTrue(numpy.all(val1 == numpy_val1))
self.assertTrue(val0.shape == (7, 3, 5))
self.assertTrue(val1.shape == (7, 3, 5))
def test_symbolic_shape(self):
rng_R = random_state_type()
shape = tensor.lvector()
post_r, out = uniform(rng_R, shape, ndim=2)
f = compile.function([rng_R, shape], out)
rng_state0 = numpy.random.RandomState(utt.fetch_seed())
assert f(rng_state0, [2, 3]).shape == (2, 3)
assert f(rng_state0, [4, 8]).shape == (4, 8)
self.assertRaises(ValueError, f, rng_state0, [4])
self.assertRaises(ValueError, f, rng_state0, [4, 3, 4, 5])
def test_mixed_shape(self):
# Test when the provided shape is a tuple of ints and scalar vars
rng_R = random_state_type()
shape0 = tensor.lscalar()
shape = (shape0, 3)
post_r, u = uniform(rng_R, size=shape, ndim=2)
f = compile.function([rng_R, shape0], u)
rng_state0 = numpy.random.RandomState(utt.fetch_seed())
assert f(rng_state0, 2).shape == (2, 3)
assert f(rng_state0, 8).shape == (8, 3)
post_r, v = uniform(rng_R, size=shape)
g = compile.function([rng_R, shape0], v)
assert g(rng_state0, 2).shape == (2, 3)
assert g(rng_state0, 8).shape == (8, 3)
def test_mixed_shape_bcastable(self):
# Test when the provided shape is a tuple of ints and scalar vars
rng_R = random_state_type()
shape0 = tensor.lscalar()
shape = (shape0, 1)
post_r, u = uniform(rng_R, size=shape, ndim=2)
assert u.broadcastable == (False, True)
f = compile.function([rng_R, shape0], u)
rng_state0 = numpy.random.RandomState(utt.fetch_seed())
assert f(rng_state0, 2).shape == (2, 1)
assert f(rng_state0, 8).shape == (8, 1)
post_r, v = uniform(rng_R, size=shape)
assert v.broadcastable == (False, True)
g = compile.function([rng_R, shape0], v)
assert g(rng_state0, 2).shape == (2, 1)
assert g(rng_state0, 8).shape == (8, 1)
def test_default_shape(self):
rng_R = random_state_type()
post_r, out = uniform(rng_R)
f = compile.function([rng_R], [post_r, out], accept_inplace=True)
rng_state0 = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
post0, val0 = f(rng_state0)
post1, val1 = f(post0)
numpy_val0 = numpy.asarray(numpy_rng.uniform(),
dtype=theano.config.floatX)
numpy_val1 = numpy.asarray(numpy_rng.uniform(),
dtype=theano.config.floatX)
assert numpy.all(val0 == numpy_val0)
assert numpy.all(val1 == numpy_val1)
post_r, out = multinomial(rng_R)
g = compile.function([rng_R], [post_r, out], accept_inplace=True)
post2, val2 = g(post1)
numpy_val2 = numpy.asarray(numpy_rng.multinomial(n=1, pvals=[.5, .5]),
dtype=theano.config.floatX)
assert numpy.all(val2 == numpy_val2)
def test_vector_arguments(self):
rng_R = random_state_type()
low = tensor.vector()
post_r, out = uniform(rng_R, low=low, high=1)
assert out.ndim == 1
f = compile.function([rng_R, low], [post_r, out], accept_inplace=True)
def as_floatX(thing):
return numpy.asarray(thing, dtype=theano.config.floatX)
rng_state0 = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
post0, val0 = f(rng_state0, [-5, .5, 0, 1])
post1, val1 = f(post0, as_floatX([.9]))
numpy_val0 = as_floatX(numpy_rng.uniform(low=[-5, .5, 0, 1], high=1))
numpy_val1 = as_floatX(numpy_rng.uniform(low=as_floatX([.9]), high=1))
assert numpy.all(val0 == numpy_val0)
assert numpy.all(val1 == numpy_val1)
high = tensor.vector()
post_rb, outb = uniform(rng_R, low=low, high=high)
assert outb.ndim == 1
fb = compile.function([rng_R, low, high], [post_rb, outb],
accept_inplace=True)
post0b, val0b = fb(post1, [-4., -2], [-1, 0])
post1b, val1b = fb(post0b, [-4.], [-1])
numpy_val0b = as_floatX(numpy_rng.uniform(low=[-4., -2], high=[-1, 0]))
numpy_val1b = as_floatX(numpy_rng.uniform(low=[-4.], high=[-1]))
assert numpy.all(val0b == numpy_val0b)
assert numpy.all(val1b == numpy_val1b)
self.assertRaises(ValueError, fb, post1b, [-4., -2], [-1, 0, 1])
#TODO: do we want that?
#self.assertRaises(ValueError, fb, post1b, [-4., -2], [-1])
size = tensor.lvector()
post_rc, outc = uniform(rng_R, low=low, high=high, size=size, ndim=1)
fc = compile.function([rng_R, low, high, size], [post_rc, outc],
accept_inplace=True)
post0c, val0c = fc(post1b, [-4., -2], [-1, 0], [2])
post1c, val1c = fc(post0c, [-4.], [-1], [1])
numpy_val0c = as_floatX(numpy_rng.uniform(low=[-4., -2], high=[-1, 0]))
numpy_val1c = as_floatX(numpy_rng.uniform(low=[-4.], high=[-1]))
assert numpy.all(val0c == numpy_val0c)
assert numpy.all(val1c == numpy_val1c)
self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [1])
self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [1, 2])
self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1, 0], [2, 1])
self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1], [1])
#TODO: do we want that?
#self.assertRaises(ValueError, fc, post1c, [-4., -2], [-1], [2])
def test_broadcast_arguments(self):
rng_R = random_state_type()
low = tensor.dvector()
high = tensor.dcol()
post_r, out = uniform(rng_R, low=low, high=high)
assert out.ndim == 2
f = compile.function([rng_R, low, high], [post_r, out],
accept_inplace=True)
rng_state0 = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
post0, val0 = f(rng_state0, [-5, .5, 0, 1], [[1.]])
post1, val1 = f(post0, [.9], [[1.], [1.1], [1.5]])
post2, val2 = f(post1, [-5, .5, 0, 1], [[1.], [1.1], [1.5]])
numpy_val0 = numpy_rng.uniform(low=[-5, .5, 0, 1], high=[1.])
numpy_val1 = numpy_rng.uniform(low=[.9], high=[[1.], [1.1], [1.5]])
numpy_val2 = numpy_rng.uniform(low=[-5, .5, 0, 1],
high=[[1.], [1.1], [1.5]])
assert numpy.all(val0 == numpy_val0), (val0, numpy_val0)
assert numpy.all(val1 == numpy_val1)
assert numpy.all(val2 == numpy_val2)
def test_uniform_vector(self):
rng_R = random_state_type()
low = tensor.vector()
high = tensor.vector()
post_r, out = uniform(rng_R, low=low, high=high)
assert out.ndim == 1
f = compile.function([rng_R, low, high], [post_r, out],
accept_inplace=True)
def as_floatX(thing):
return numpy.asarray(thing, dtype=theano.config.floatX)
low_val = as_floatX([.1, .2, .3])
high_val = as_floatX([1.1, 2.2, 3.3])
rng = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
# Arguments of size (3,)
rng0, val0 = f(rng, low_val, high_val)
numpy_val0 = as_floatX(numpy_rng.uniform(low=low_val, high=high_val))
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, low_val[:-1], high_val[:-1])
numpy_val1 = as_floatX(numpy_rng.uniform(low=low_val[:-1],
high=high_val[:-1]))
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, low, high],
uniform(rng_R, low=low, high=high, size=(3,)),
accept_inplace=True)
rng2, val2 = g(rng1, low_val, high_val)
numpy_val2 = as_floatX(numpy_rng.uniform(low=low_val, high=high_val,
size=(3,)))
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, low_val[:-1], high_val[:-1])
def test_binomial_vector(self):
rng_R = random_state_type()
n = tensor.lvector()
prob = tensor.vector()
post_r, out = binomial(rng_R, n=n, p=prob)
assert out.ndim == 1
f = compile.function([rng_R, n, prob], [post_r, out],
accept_inplace=True)
n_val = [1, 2, 3]
prob_val = numpy.asarray([.1, .2, .3], dtype=config.floatX)
rng = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
# Arguments of size (3,)
rng0, val0 = f(rng, n_val, prob_val)
numpy_val0 = numpy_rng.binomial(n=n_val, p=prob_val)
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, n_val[:-1], prob_val[:-1])
numpy_val1 = numpy_rng.binomial(n=n_val[:-1], p=prob_val[:-1])
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, n, prob],
binomial(rng_R, n=n, p=prob, size=(3,)),
accept_inplace=True)
rng2, val2 = g(rng1, n_val, prob_val)
numpy_val2 = numpy_rng.binomial(n=n_val, p=prob_val, size=(3,))
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, n_val[:-1], prob_val[:-1])
def test_normal_vector(self):
rng_R = random_state_type()
avg = tensor.vector()
std = tensor.vector()
post_r, out = normal(rng_R, avg=avg, std=std)
assert out.ndim == 1
f = compile.function([rng_R, avg, std], [post_r, out],
accept_inplace=True)
def as_floatX(thing):
return numpy.asarray(thing, dtype=theano.config.floatX)
avg_val = [1, 2, 3]
std_val = as_floatX([.1, .2, .3])
rng = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
# Arguments of size (3,)
rng0, val0 = f(rng, avg_val, std_val)
numpy_val0 = as_floatX(numpy_rng.normal(loc=as_floatX(avg_val),
scale=as_floatX(std_val)))
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, avg_val[:-1], std_val[:-1])
numpy_val1 = numpy.asarray(numpy_rng.normal(loc=avg_val[:-1],
scale=std_val[:-1]),
dtype=theano.config.floatX)
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, avg, std],
normal(rng_R, avg=avg, std=std, size=(3,)),
accept_inplace=True)
rng2, val2 = g(rng1, avg_val, std_val)
numpy_val2 = numpy.asarray(numpy_rng.normal(loc=avg_val, scale=std_val,
size=(3,)),
dtype=theano.config.floatX)
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, avg_val[:-1], std_val[:-1])
def test_random_integers_vector(self):
rng_R = random_state_type()
low = tensor.lvector()
high = tensor.lvector()
post_r, out = random_integers(rng_R, low=low, high=high)
assert out.ndim == 1
f = compile.function([rng_R, low, high], [post_r, out],
accept_inplace=True)
low_val = [100, 200, 300]
high_val = [110, 220, 330]
rng = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
# Arguments of size (3,)
rng0, val0 = f(rng, low_val, high_val)
numpy_val0 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)
for lv, hv in zip(low_val, high_val)])
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, low_val[:-1], high_val[:-1])
numpy_val1 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)
for lv, hv in zip(low_val[:-1], high_val[:-1])])
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, low, high],
random_integers(rng_R, low=low, high=high, size=(3,)),
accept_inplace=True)
rng2, val2 = g(rng1, low_val, high_val)
numpy_val2 = numpy.asarray([numpy_rng.random_integers(low=lv, high=hv)
for lv, hv in zip(low_val, high_val)])
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, low_val[:-1], high_val[:-1])
# Vectorized permutation don't make sense: the only parameter, n,
# controls one dimension of the returned tensor.
def test_multinomial_vector(self):
rng_R = random_state_type()
n = tensor.lvector()
pvals = tensor.matrix()
post_r, out = multinomial(rng_R, n=n, pvals=pvals)
assert out.ndim == 2
f = compile.function([rng_R, n, pvals], [post_r, out],
accept_inplace=True)
n_val = [1, 2, 3]
pvals_val = [[.1, .9], [.2, .8], [.3, .7]]
pvals_val = numpy.asarray(pvals_val, dtype=config.floatX)
rng = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
# Arguments of size (3,)
rng0, val0 = f(rng, n_val, pvals_val)
numpy_val0 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
for nv, pv in zip(n_val, pvals_val)])
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, n_val[:-1], pvals_val[:-1])
numpy_val1 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
for nv, pv in zip(n_val[:-1], pvals_val[:-1])])
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, n, pvals],
multinomial(rng_R, n=n, pvals=pvals, size=(3,)),
accept_inplace=True)
rng2, val2 = g(rng1, n_val, pvals_val)
numpy_val2 = numpy.asarray([numpy_rng.multinomial(n=nv, pvals=pv)
for nv, pv in zip(n_val, pvals_val)])
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, n_val[:-1], pvals_val[:-1])
def test_multinomial_tensor3_a(self):
# Test the examples given in the multinomial documentation regarding
# tensor3 objects
rng_R = random_state_type()
n = 9
pvals = tensor.dtensor3()
post_r, out = multinomial(rng_R, n=n, pvals=pvals, size=(1, -1))
assert out.ndim == 3
assert out.broadcastable == (True, False, False)
f = compile.function([rng_R, pvals], [post_r, out],
accept_inplace=True)
rng = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
pvals_val = numpy.asarray([[[.1, .9], [.2, .8], [.3, .7]]])
assert pvals_val.shape == (1, 3, 2)
new_rng, draw = f(rng, pvals_val)
assert draw.shape == (1, 3, 2)
assert numpy.allclose(draw.sum(axis=2), 9)
def test_multinomial_tensor3_b(self):
# Test the examples given in the multinomial documentation regarding
# tensor3 objects
rng_R = random_state_type()
n = 9
pvals = tensor.dtensor3()
post_r, out = multinomial(rng_R, n=n, pvals=pvals, size=(10, 1, -1))
assert out.ndim == 4
assert out.broadcastable == (False, True, False, False)
f = compile.function([rng_R, pvals], [post_r, out],
accept_inplace=True)
rng = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
pvals_val = numpy.asarray([[[.1, .9], [.2, .8], [.3, .7]]])
assert pvals_val.shape == (1, 3, 2)
out_rng, draw = f(rng, pvals_val)
assert draw.shape == (10, 1, 3, 2)
assert numpy.allclose(draw.sum(axis=3), 9)
def test_dtype(self):
rng_R = random_state_type()
low = tensor.lscalar()
high = tensor.lscalar()
post_r, out = random_integers(rng_R, low=low, high=high, size=(20, ),
dtype='int8')
assert out.dtype == 'int8'
f = compile.function([rng_R, low, high], [post_r, out])
rng = numpy.random.RandomState(utt.fetch_seed())
rng0, val0 = f(rng, 0, 9)
assert val0.dtype == 'int8'
rng1, val1 = f(rng0, 255, 257)
assert val1.dtype == 'int8'
assert numpy.all(abs(val1) <= 1)
def test_dtype_normal_uniform_687(self):
# Regression test for #687.
rng_R = random_state_type()
assert uniform(rng_R, low=tensor.constant(0, dtype='float64'),
dtype='float32')[1].dtype == 'float32'
assert normal(rng_R, avg=tensor.constant(0, dtype='float64'),
dtype='float32')[1].dtype == 'float32'
def setUp(self):
super(T_random_function, self).setUp()
def test_infer_shape(self):
rng_R = random_state_type()
rng_R_val = numpy.random.RandomState(utt.fetch_seed())
# no shape specified, default args
post_r, out = uniform(rng_R)
self._compile_and_check([rng_R], [out], [rng_R_val],
RandomFunction)
post_r, out = uniform(rng_R, size=None, ndim=2)
self._compile_and_check([rng_R], [out], [rng_R_val],
RandomFunction)
"""
#infer_shape don't work for multinomial.
#The parameter ndim_added is set to 1 and in this case, the infer_shape
#inplementation don't know how to infer the shape
post_r, out = multinomial(rng_R)
self._compile_and_check([rng_R], [out], [rng_R_val],
RandomFunction)
"""
# no shape specified, args have to be broadcasted
low = tensor.TensorType(dtype='float64',
broadcastable=(False, True, True))()
high = tensor.TensorType(dtype='float64',
broadcastable=(True, True, True, False))()
post_r, out = uniform(rng_R, size=None, ndim=2, low=low, high=high)
low_val = [[[3]], [[4]], [[-5]]]
high_val = [[[[5, 8]]]]
self._compile_and_check([rng_R, low, high], [out],
[rng_R_val, low_val, high_val],
RandomFunction)
# multinomial, specified shape
"""
#infer_shape don't work for multinomial
n = iscalar()
pvals = dvector()
size_val = (7, 3)
n_val = 6
pvals_val = [0.2] * 5
post_r, out = multinomial(rng_R, size=size_val, n=n, pvals=pvals,
ndim=2)
self._compile_and_check([rng_R, n, pvals], [out],
[rng_R_val, n_val, pvals_val],
RandomFunction)
"""
# uniform vector low and high
low = dvector()
high = dvector()
post_r, out = uniform(rng_R, low=low, high=1)
low_val = [-5, .5, 0, 1]
self._compile_and_check([rng_R, low], [out], [rng_R_val, low_val],
RandomFunction)
low_val = [.9]
self._compile_and_check([rng_R, low], [out], [rng_R_val, low_val],
RandomFunction)
post_r, out = uniform(rng_R, low=low, high=high)
low_val = [-4., -2]
high_val = [-1, 0]
self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,
high_val], RandomFunction)
low_val = [-4.]
high_val = [-1]
self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,
high_val], RandomFunction)
# uniform broadcasting low and high
low = dvector()
high = dcol()
post_r, out = uniform(rng_R, low=low, high=high)
low_val = [-5, .5, 0, 1]
high_val = [[1.]]
self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,
high_val], RandomFunction)
low_val = [.9]
high_val = [[1.], [1.1], [1.5]]
self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,
high_val], RandomFunction)
low_val = [-5, .5, 0, 1]
high_val = [[1.], [1.1], [1.5]]
self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,
high_val], RandomFunction)
# uniform with vector slice
low = dvector()
high = dvector()
post_r, out = uniform(rng_R, low=low, high=high)
low_val = [.1, .2, .3]
high_val = [1.1, 2.2, 3.3]
size_val = (3, )
self._compile_and_check([rng_R, low, high], [out],
[rng_R_val, low_val[:-1],
high_val[:-1]], RandomFunction)
# uniform with explicit size and size implicit in parameters
# NOTE 1: Would it be desirable that size could also be supplied
# as a Theano variable?
post_r, out = uniform(rng_R, size=size_val, low=low, high=high)
self._compile_and_check([rng_R, low, high], [out], [rng_R_val, low_val,
high_val], RandomFunction)
# binomial with vector slice
n = ivector()
prob = dvector()
post_r, out = binomial(rng_R, n=n, p=prob)
n_val = [1, 2, 3]
prob_val = [.1, .2, .3]
size_val = (3, )
self._compile_and_check([rng_R, n, prob], [out],
[rng_R_val, n_val[:-1],
prob_val[:-1]], RandomFunction)
# binomial with explicit size and size implicit in parameters
# cf. NOTE 1
post_r, out = binomial(rng_R, n=n, p=prob, size=size_val)
self._compile_and_check([rng_R, n, prob], [out], [rng_R_val, n_val,
prob_val], RandomFunction)
# normal with vector slice
avg = dvector()
std = dvector()
post_r, out = normal(rng_R, avg=avg, std=std)
avg_val = [1, 2, 3]
std_val = [.1, .2, .3]
size_val = (3, )
self._compile_and_check([rng_R, avg, std], [out],
[rng_R_val, avg_val[:-1],
std_val[:-1]], RandomFunction)
# normal with explicit size and size implicit in parameters
# cf. NOTE 1
post_r, out = normal(rng_R, avg=avg, std=std, size=size_val)
self._compile_and_check([rng_R, avg, std], [out], [rng_R_val, avg_val,
std_val], RandomFunction)
# multinomial with tensor-3 probabilities
"""
#multinomial infer_shape don't work.
pvals = dtensor3()
n = iscalar()
post_r, out = multinomial(rng_R, n=n, pvals=pvals, size=(1, -1))
pvals_val = [[[.1, .9], [.2, .8], [.3, .7]]]
n_val = 9
self._compile_and_check([rng_R, n, pvals], [out],
[rng_R_val, n_val,
pvals_val], RandomFunction)
post_r, out = multinomial(rng_R, n=n, pvals=pvals, size=(10, 1, -1))
self._compile_and_check([rng_R, n, pvals], [out],
[rng_R_val, n_val,
pvals_val], RandomFunction)
"""
if __name__ == '__main__':
from theano.tests import main
main("test_raw_random")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.