hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d8d68a0d3d4bfb90a5a607834eb5cc46b7afd8b | 9,277 | py | Python | States/playstate.py | Rishikesh-kumar-7258/Hot_Air_Balloon | 6c18162c9775723b78588a6d641bc7b919d4da07 | [
"MIT"
] | 2 | 2021-10-02T09:03:58.000Z | 2021-10-03T18:55:01.000Z | States/playstate.py | Rishikesh-kumar-7258/Hot_Air_Balloon | 6c18162c9775723b78588a6d641bc7b919d4da07 | [
"MIT"
] | null | null | null | States/playstate.py | Rishikesh-kumar-7258/Hot_Air_Balloon | 6c18162c9775723b78588a6d641bc7b919d4da07 | [
"MIT"
] | 1 | 2021-10-06T09:17:02.000Z | 2021-10-06T09:17:02.000Z | from Classes.bonuspoints import Bonus
from Classes.hurdles import Hurdle
from pygame.constants import K_LEFT, K_RIGHT, KEYDOWN, KEYUP, K_p
from pygame.color import THECOLORS
from Classes.balloon import Balloon
from Utils.functions import Write, resource_path
import pygame
from States.basestate import Base
from random import *
class Play(Base):
"""
This is play state, the actual playground of our game.
"""
def __init__(self) -> None:
# calls init funtion for base class
super().__init__()
# These are for speed of balloon
self.speedY = 2
self.speedX = 0
# These are for hurdles
self.hurdle_speed = 5
self.countDeleted = 0
self.score = 0
# These are for bonus
self.bonusScore = 5
self.current_bonus = None
self.bonus = 0
# creating different sprite group
self.bonusgroup = pygame.sprite.Group()
self.all_sprites = pygame.sprite.Group()
self.hurdles = pygame.sprite.Group()
# Storing variable to keep track of making the bonus
self.create_bonus = 180
# loading background image for our game
self.background1 = pygame.image.load(resource_path("Utils/Images/background.png"))
self.background1 = pygame.transform.scale(self.background1, (500, 750))
self.brect1 = self.background1.get_rect()
self.background2 = self.background1
self.brect2 = self.background2.get_rect()
self.brect2.y = -self.brect2.height
self.background_speed = 3
# Number of hurdles are increased after this score
self.hurdle_count = 1
# Lives handline
self.lives = 3
self.lost_lives = 0
self.lives_image = pygame.image.load(resource_path("Utils/Images/lives.png"))
self.lost_lives_image = pygame.image.load(resource_path("Utils/Images/lost_lives.png"))
# adding paused functionality
self.paused = False
def render(self) -> None :
#displaying the background image
self.screen.blit(self.background1, self.brect1)
self.screen.blit(self.background2, self.brect2)
# Display score on the screen
Write(text=f"Score : {self.score}", fontsize=25, screen=self.screen, color=THECOLORS['goldenrod'])
# displaying the the lives
for i in range(self.lives):
lives = self.lives_image
if self.lives - self.lost_lives < i+1: lives = self.lost_lives_image
rect = lives.get_rect()
rect.center = (self.wwidth - 15*(i+1) - 25*i, 15)
self.screen.blit(lives, rect)
# Display all the sprite on the screen
self.all_sprites.draw(self.screen)
def update(self, params) -> None:
# calling render function again
self.render()
# This is event handling section events are passed from main.py
for event in params:
# If any button is pressed
if event.type == KEYDOWN:
# pause function
if event.key == K_p:
self.paused = not self.paused
# Controlling the balloon if right or left arrow key is pressed
if event.key == K_LEFT:
self.balloon.change("left")
self.speedX = -5
if event.key == K_RIGHT:
self.balloon.change("right")
self.speedX = 5
# If the pressed key is released
if event.type == KEYUP:
self.balloon.change()
self.speedX = 0
# if the game is paused
if self.paused :
Write(text="Paused", fontsize=72, color=THECOLORS["darkred"], screen=self.screen, x=self.wwidth//2, y=self.wheight//2, center=True)
Write(text="Press p to continue", color=THECOLORS["goldenrod"], screen=self.screen, x=self.wwidth//2, y=self.wheight//2 + 100, center=True)
return
#moving background
self.brect1.y = self.brect1.y + self.background_speed if self.brect1.y < self.wheight else - self.brect1.height + self.background_speed
self.brect2.y = self.brect2.y + self.background_speed if self.brect2.y < self.wheight else - self.brect2.height + self.background_speed
# decreasing create bonus on each update
self.create_bonus -= 1
# creates bonus when create bonus is 0
if self.create_bonus == 0:
self.add_bonus()
self.create_bonus = 180
# for bonus
for bonus in self.bonusgroup:
# for moving the bonus same speed as hurdles
bonus.rect.y += self.hurdle_speed
#checks if the balloon collided with any bonus and increases score accordingly
if pygame.sprite.collide_mask(self.balloon, bonus):
self.bonus += self.bonusScore
bonus.kill()
# if the current hurdle is more than 200 away from the 0 vertically a new hurdle is created
if self.current_hurdle.rect.y > 200 :
for i in range(self.hurdle_count):
self.add_hurlde()
# variable to store the number of hurdles passed
passedCount = 0
# Hurdles
for hurdle in self.hurdles:
# checking if any hurdle collided with the balloon
if pygame.sprite.collide_mask(self.balloon, hurdle):
self.lost_lives += 1
if (self.lost_lives == self.lives) :
self.speedY = 0
self.hurdle_speed = 0
self.gstatemachine.change("over", screen=self.screen, width=self.wwidth, height=self.wheight, score=self.score, gstatemachine=self.gstatemachine)
self.respawn()
# Write(text=f"{self.lives - self.lost_lives} lives remaining", color=THECOLORS["goldenrod"], screen=self.screen, x=self.wwidth//2, y=self.wheight//2, center=True)
pygame.time.wait(1000)
# moving the hurdle
hurdle.rect.y += self.hurdle_speed
# incresing the passedcount and hurdle speed if the hurdle has passed the balloon
if hurdle.rect.y > self.balloon.rect.y + self.balloon.rect.height:
passedCount += 1
self.hurdle_speed += 0.001
# Killin the hurdle if it has gone below the bottom
if hurdle.rect.y >= self.wheight :
self.countDeleted += 1
hurdle.kill()
# Updating our score
self.score = self.countDeleted + passedCount + self.bonus
# creating an extra hurdle when score reaches 100
if self.score >= 100 : self.hurdle_count = 2
# preventing the balloon from getting too far left or too right of the screen
if self.balloon.rect.y > self.wwidth // 2 + 100: self.balloon.rect.y -= self.speedY
if self.balloon.rect.left >= 0 and self.balloon.rect.right <= self.wwidth : self.balloon.rect.x += self.speedX
if self.balloon.rect.left <= 0 : self.balloon.rect.left = 2
if self.balloon.rect.right >= self.wwidth : self.balloon.rect.right = self.wwidth - 2
# updating the balloon class
self.balloon.update()
def enter(self, **params):
# calling init function
self.__init__()
# taking variables passed during changing the state
self.screen = params['screen']
self.wwidth = params['width']
self.wheight = params['height']
self.gstatemachine = params['statemachine']
# creating the balloon
self.balloon = Balloon()
self.balloon.rect.center = (self.wwidth // 2, self.wheight - 50)
self.all_sprites.add(self.balloon)
# creating first hurdle
self.add_hurlde()
def add_hurlde(self) -> None:
"""
This function will add the hurdles on screen. It reduces some code and makes game code more readable.
usage:
self.add_hurlde()
"""
self.current_hurdle = Hurdle()
self.current_hurdle.rect.center = (randrange(self.current_hurdle.rect.width, self.wwidth - self.current_hurdle.rect.width), 0)
self.all_sprites.add(self.current_hurdle)
self.hurdles.add(self.current_hurdle)
def add_bonus(self) -> None:
"""
This function will add the Bonuses on screen. It reduces some code and makes game code more readable.
usage:
self.add_bonus()
"""
self.current_bonus = Bonus()
self.current_bonus.rect.center = (randrange(self.current_bonus.rect.width, self.wwidth - self.current_bonus.rect.width), -50)
self.all_sprites.add(self.current_bonus)
self.bonusgroup.add(self.current_bonus)
self.create_bonus = False
def respawn(self) -> None:
"""
This function respawns the balloon after getting hit by any hurdle.
"""
self.balloon.rect.x = randrange(0, self.wwidth - self.balloon.rect.width - 10)
for hurdle in self.hurdles:
while pygame.sprite.collide_mask(self.balloon, hurdle):
self.balloon.rect.x = randrange(0, self.wwidth - self.balloon.rect.width - 10) | 37.865306 | 179 | 0.611512 |
b22e67f7138ada3a73632802ac5130f48d0b9788 | 455 | py | Python | env/lib/python3.8/site-packages/plotly/validators/sankey/_customdatasrc.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/sankey/_customdatasrc.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/sankey/_customdatasrc.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="customdatasrc", parent_name="sankey", **kwargs):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 35 | 84 | 0.663736 |
b00e79a01cc33cb42d1a822568722ccf0b8176bc | 5,120 | py | Python | deepctr/models/multitask/mmoe.py | dzzxjl/DeepCTR | ec3fa832865c14aa2cc843be2b1eab1bfa7b3e4e | [
"Apache-2.0"
] | 6,192 | 2017-12-05T03:02:35.000Z | 2022-03-31T20:59:30.000Z | deepctr/models/multitask/mmoe.py | dzzxjl/DeepCTR | ec3fa832865c14aa2cc843be2b1eab1bfa7b3e4e | [
"Apache-2.0"
] | 362 | 2018-04-15T06:53:20.000Z | 2022-03-21T15:03:02.000Z | deepctr/models/multitask/mmoe.py | dzzxjl/DeepCTR | ec3fa832865c14aa2cc843be2b1eab1bfa7b3e4e | [
"Apache-2.0"
] | 1,960 | 2017-12-05T03:16:04.000Z | 2022-03-31T06:37:00.000Z | """
Author:
Mincai Lai, laimc@shanghaitech.edu.cn
Weichen Shen, weichenswc@163.com
Reference:
[1] Ma J, Zhao Z, Yi X, et al. Modeling task relationships in multi-task learning with multi-gate mixture-of-experts[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 2018.(https://dl.acm.org/doi/abs/10.1145/3219819.3220007)
"""
import tensorflow as tf
from ...feature_column import build_input_features, input_from_feature_columns
from ...layers.core import PredictionLayer, DNN
from ...layers.utils import combined_dnn_input, reduce_sum
def MMOE(dnn_feature_columns, num_experts=3, expert_dnn_hidden_units=(256, 128), tower_dnn_hidden_units=(64,),
gate_dnn_hidden_units=(), l2_reg_embedding=0.00001, l2_reg_dnn=0, seed=1024, dnn_dropout=0,
dnn_activation='relu',
dnn_use_bn=False, task_types=('binary', 'binary'), task_names=('ctr', 'ctcvr')):
"""Instantiates the Multi-gate Mixture-of-Experts multi-task learning architecture.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param num_experts: integer, number of experts.
:param expert_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of expert DNN.
:param tower_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of task-specific DNN.
:param gate_dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of gate DNN.
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:param task_types: list of str, indicating the loss of each tasks, ``"binary"`` for binary logloss, ``"regression"`` for regression loss. e.g. ['binary', 'regression']
:param task_names: list of str, indicating the predict target of each tasks
:return: a Keras model instance
"""
num_tasks = len(task_names)
if num_tasks <= 1:
raise ValueError("num_tasks must be greater than 1")
if num_experts <= 1:
raise ValueError("num_experts must be greater than 1")
if len(task_types) != num_tasks:
raise ValueError("num_tasks must be equal to the length of task_types")
for task_type in task_types:
if task_type not in ['binary', 'regression']:
raise ValueError("task must be binary or regression, {} is illegal".format(task_type))
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
l2_reg_embedding, seed)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
# build expert layer
expert_outs = []
for i in range(num_experts):
expert_network = DNN(expert_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed,
name='expert_' + str(i))(dnn_input)
expert_outs.append(expert_network)
expert_concat = tf.keras.layers.Lambda(lambda x: tf.stack(x, axis=1))(expert_outs) # None,num_experts,dim
mmoe_outs = []
for i in range(num_tasks): # one mmoe layer: nums_tasks = num_gates
# build gate layers
gate_input = DNN(gate_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed,
name='gate_' + task_names[i])(dnn_input)
gate_out = tf.keras.layers.Dense(num_experts, use_bias=False, activation='softmax',
name='gate_softmax_' + task_names[i])(gate_input)
gate_out = tf.keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1))(gate_out)
# gate multiply the expert
gate_mul_expert = tf.keras.layers.Lambda(lambda x: reduce_sum(x[0] * x[1], axis=1, keep_dims=False),
name='gate_mul_expert_' + task_names[i])([expert_concat, gate_out])
mmoe_outs.append(gate_mul_expert)
task_outs = []
for task_type, task_name, mmoe_out in zip(task_types, task_names, mmoe_outs):
# build tower layer
tower_output = DNN(tower_dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed,
name='tower_' + task_name)(mmoe_out)
logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(tower_output)
output = PredictionLayer(task_type, name=task_name)(logit)
task_outs.append(output)
model = tf.keras.models.Model(inputs=inputs_list, outputs=task_outs)
return model
| 53.333333 | 280 | 0.698047 |
5b61abd55cdf63bf7fc00c18d9aa4400e616fc3c | 16,447 | py | Python | models/shufflenetv2plus.py | ZAKAUDD/LightNet | 58353b28d33e69cc877db878c4a888aabc2118ce | [
"MIT"
] | 737 | 2018-03-21T12:28:04.000Z | 2021-07-07T16:03:09.000Z | models/shufflenetv2plus.py | ZAKAUDD/LightNet | 58353b28d33e69cc877db878c4a888aabc2118ce | [
"MIT"
] | 23 | 2018-03-22T01:19:53.000Z | 2021-03-26T15:08:26.000Z | models/shufflenetv2plus.py | ZAKAUDD/LightNet | 58353b28d33e69cc877db878c4a888aabc2118ce | [
"MIT"
] | 189 | 2018-03-22T08:55:43.000Z | 2021-07-01T12:14:08.000Z | import torch.nn.functional as F
import torch.nn as nn
import torch
from modules import SCSEBlock, InPlaceABN, ASPPInPlaceABNBlock, InPlaceABNWrapper
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
def conv3x3(in_channels, out_channels, stride=1, bias=True, groups=1, dilate=1):
"""3x3 convolution with padding
"""
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=dilate, bias=bias, groups=groups, dilation=dilate)
def conv1x1(in_channels, out_channels, groups=1):
"""1x1 convolution with padding
- Normal pointwise convolution When groups == 1
- Grouped pointwise convolution when groups > 1
"""
return nn.Conv2d(in_channels, out_channels, kernel_size=1, groups=groups, stride=1)
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups,
channels_per_group, height, width)
# transpose
# - contiguous() required if transpose() is used before view().
# See https://github.com/pytorch/pytorch/issues/764
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class ShuffleUnit(nn.Module):
def __init__(self, in_channels, out_channels, groups=3,
dilate=1, grouped_conv=True, combine='add', up=False):
super(ShuffleUnit, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.grouped_conv = grouped_conv
self.combine = combine
self.groups = groups
self.bottleneck_channels = self.out_channels // 4
# define the type of ShuffleUnit
if self.combine == 'add':
# ShuffleUnit Figure 2b
self.depthwise_stride = 1
self.dilate = dilate
self.up = False
self._combine_func = self._add
elif self.combine == 'concat':
# ShuffleUnit Figure 2c
self.depthwise_stride = 1 if up is True else 2
self.dilate = dilate if up is True else 1
self.up = up
self._combine_func = self._concat
# ensure output of concat has the same channels as
# original output channels.
self.out_channels -= self.in_channels
else:
raise ValueError("Cannot combine tensors with \"{}\"" \
"Only \"add\" and \"concat\" are" \
"supported".format(self.combine))
# Use a 1x1 grouped or non-grouped convolution to reduce input channels
# to bottleneck channels, as in a ResNet bottleneck module.
# NOTE: Do not use group convolution for the first conv1x1 in Stage 2.
self.first_1x1_groups = self.groups if grouped_conv else 1
self.g_conv_1x1_compress = self._make_grouped_conv1x1(self.in_channels,
self.bottleneck_channels,
self.first_1x1_groups,
batch_norm=True,
relu=True)
# 3x3 depthwise convolution followed by batch normalization
self.depthwise_conv3x3 = conv3x3(self.bottleneck_channels,
self.bottleneck_channels,
stride=self.depthwise_stride,
groups=self.bottleneck_channels,
dilate=self.dilate)
self.bn_after_depthwise = nn.BatchNorm2d(self.bottleneck_channels)
# Use 1x1 grouped convolution to expand from
# bottleneck_channels to out_channels
self.g_conv_1x1_expand = self._make_grouped_conv1x1(self.bottleneck_channels,
self.out_channels,
self.groups,
batch_norm=True,
relu=False)
@staticmethod
def _add(x, out):
# residual connection
return x + out
@staticmethod
def _concat(x, out):
# concatenate along channel axis
return torch.cat((x, out), dim=1)
def _make_grouped_conv1x1(self, in_channels, out_channels, groups,
batch_norm=True, relu=False):
modules = OrderedDict()
conv = conv1x1(in_channels, out_channels, groups=groups)
modules['conv1x1'] = conv
if batch_norm:
modules['batch_norm'] = nn.BatchNorm2d(out_channels)
if relu:
modules['relu'] = nn.ReLU()
if len(modules) > 1:
return nn.Sequential(modules)
else:
return conv
def forward(self, x):
# save for combining later with output
residual = x
if self.combine == 'concat':
residual = F.avg_pool2d(residual, kernel_size=3, stride=2, padding=1)
if self.up is True:
residual = F.upsample(residual, scale_factor=2, mode="bilinear")
out = self.g_conv_1x1_compress(x)
out = channel_shuffle(out, self.groups)
out = self.depthwise_conv3x3(out)
out = self.bn_after_depthwise(out)
out = self.g_conv_1x1_expand(out)
out = self._combine_func(residual, out)
return F.relu(out)
class ShuffleNetV2Plus(nn.Module):
"""ShuffleNet implementation.
"""
def __init__(self, n_class=19, groups=3, in_channels=3, in_size=(448, 896),
out_sec=256, aspp_sec=(12, 24, 36), norm_act=InPlaceABN):
"""ShuffleNet constructor.
Arguments:
groups (int, optional): number of groups to be used in grouped
1x1 convolutions in each ShuffleUnit. Default is 3 for best
performance according to original paper.
in_channels (int, optional): number of channels in the input tensor.
Default is 3 for RGB image inputs.
num_classes (int, optional): number of classes to predict. Default
is 19 for ImageNet.
"""
super(ShuffleNetV2Plus, self).__init__()
self.groups = groups
self.stage_repeats = [3, 7, 3]
self.in_channels = in_channels
self.n_class = n_class
# index 0 is invalid and should never be called.
# only used for indexing convenience.
if groups == 1:
self.stage_out_channels = [-1, 24, 144, 288, 567]
elif groups == 2:
self.stage_out_channels = [-1, 24, 200, 400, 800]
elif groups == 3:
self.stage_out_channels = [-1, 24, 240, 480, 960]
elif groups == 4:
self.stage_out_channels = [-1, 24, 272, 544, 1088]
elif groups == 8:
self.stage_out_channels = [-1, 24, 384, 768, 1536]
else:
raise ValueError(
"""{} groups is not supported for
1x1 Grouped Convolutions""".format(groups))
# Stage 1 always has 24 output channels
self.conv1 = conv3x3(self.in_channels,
self.stage_out_channels[1], # stage 1
stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# Stage 2
self.stage2 = self._make_stage(2, dilate=2)
# Stage 3
self.stage3 = self._make_stage(3, dilate=4)
# Stage 4
self.stage4 = self._make_stage(4, dilate=8)
# building last several layers
self.last_channel = (2 * self.stage_out_channels[1] +
self.stage_out_channels[2] +
self.stage_out_channels[3] +
self.stage_out_channels[4])
self.out_se = nn.Sequential(SCSEBlock(channel=self.last_channel, reduction=16))
if self.n_class != 0:
self.aspp = nn.Sequential(ASPPInPlaceABNBlock(self.last_channel, out_sec,
feat_res=(int(in_size[0] / 8), int(in_size[1] / 8)),
aspp_sec=aspp_sec, norm_act=norm_act))
in_stag2_up_chs = 2 * self.stage_out_channels[1]
self.score_se = nn.Sequential(SCSEBlock(channel=out_sec + in_stag2_up_chs, reduction=16))
self.score = nn.Sequential(OrderedDict([("norm.1", norm_act(out_sec + in_stag2_up_chs)),
("conv.1", nn.Conv2d(out_sec + in_stag2_up_chs,
out_sec + in_stag2_up_chs,
kernel_size=3, stride=1, padding=2,
dilation=2, bias=False)),
("norm.2", norm_act(out_sec + in_stag2_up_chs)),
("conv.2", nn.Conv2d(out_sec + in_stag2_up_chs, self.n_class,
kernel_size=1, stride=1, padding=0,
bias=True)),
("up1", nn.Upsample(size=in_size, mode='bilinear'))]))
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias is not None:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=0.001)
if m.bias is not None:
init.constant(m.bias, 0)
def _make_stage(self, stage, dilate=1):
modules = OrderedDict()
stage_name = "ShuffleUnit_Stage{}".format(stage)
# First ShuffleUnit in the stage
# 1. non-grouped 1x1 convolution (i.e. pointwise convolution)
# is used in Stage 2. Group convolutions used everywhere else.
grouped_conv = stage > 2
# 2. concatenation unit is always used.
if stage >= 3:
first_module = ShuffleUnit(self.stage_out_channels[stage - 1],
self.stage_out_channels[stage],
groups=self.groups,
dilate=dilate,
grouped_conv=grouped_conv,
combine='concat',
up=True)
else:
first_module = ShuffleUnit(self.stage_out_channels[stage - 1],
self.stage_out_channels[stage],
groups=self.groups,
dilate=1,
grouped_conv=grouped_conv,
combine='concat',
up=False)
modules[stage_name + "_0"] = first_module
# add more ShuffleUnits depending on pre-defined number of repeats
for i in range(self.stage_repeats[stage - 2]):
name = stage_name + "_{}".format(i + 1)
module = ShuffleUnit(self.stage_out_channels[stage],
self.stage_out_channels[stage],
groups=self.groups,
dilate=dilate,
grouped_conv=True,
combine='add', up=False)
modules[name] = module
return nn.Sequential(modules)
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# channel_shuffle: shuffle channels in groups
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
@staticmethod
def _channel_shuffle(x, groups):
"""
Channel shuffle operation
:param x: input tensor
:param groups: split channels into groups
:return: channel shuffled tensor
"""
batch_size, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batch_size, groups, channels_per_group, height, width)
# transpose
# - contiguous() required if transpose() is used before view().
# See https://github.com/pytorch/pytorch/issues/764
x = torch.transpose(x, 1, 2).contiguous().view(batch_size, -1, height, width)
return x
def forward(self, x):
stg0 = self.conv1(x) # [24, H/2, W/2]
stg1 = self.maxpool(stg0) # [24, H/4, W/4]
stg2 = self.stage2(stg1) # [240, H/8, W/8]
stg3 = self.stage3(stg2) # [480, H/8, W/8]
stg4 = self.stage4(stg3) # [960, H/8, W/8]
stg1_1 = F.avg_pool2d(input=stg0, kernel_size=3, stride=2, padding=1) # 1/4
stg1_2 = F.avg_pool2d(input=stg1_1, kernel_size=3, stride=2, padding=1) # 1/8
stg1_3 = F.max_pool2d(input=stg1, kernel_size=3, stride=2, padding=1) # 1/8
# global average pooling layer
# (N, 24+24+240+480+960, 56, 112) 1/8
stg5 = self.out_se(torch.cat([stg2, stg3, stg4, stg1_2, stg1_3], dim=1))
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2. Decoder: multi-scale feature fusion
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
if self.n_class != 0:
# (N, 24+240+480+960, H/8, W/8) -> (N, 256, H/4, W/4)
de_stg1 = self.aspp(stg5)[1]
# (N, 256+24+24, H/4, W/4)
de_stg1 = self.score_se(torch.cat([de_stg1, stg1, stg1_1], dim=1))
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 3. Classifier: pixel-wise classification-segmentation
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
net_out = self.score(de_stg1)
return net_out
else:
return stg5
if __name__ == "__main__":
import os
import time
from scripts.loss import *
from functools import partial
os.environ["CUDA_VISIBLE_DEVICES"] = "1,0"
net_h, net_w = 384, 768
model = ShuffleNetV2Plus(n_class=19, groups=3, in_channels=3, in_size=(net_h, net_w),
out_sec=256, aspp_sec=(12, 24, 36),
norm_act=partial(InPlaceABNWrapper, activation="leaky_relu", slope=0.1)).cuda()
model = torch.nn.DataParallel(model, device_ids=[0]).cuda()
model_dict = model.state_dict()
pre_weight = torch.load("/zfs/zhang/TrainLog/weights/shufflenet.pth.tar")
pre_weight = pre_weight["state_dict"]
pretrained_dict = {"module." + k: v for k, v in pre_weight.items() if "module." + k in model_dict}
model_dict.update(pretrained_dict)
state = {'model_state': model_dict}
torch.save(state, "/zfs/zhang/TrainLog/weights/shufflenetv2plus_model.pkl")
model.load_state_dict(model_dict)
del pre_weight
del model_dict
del pretrained_dict
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.90, weight_decay=5e-4)
loss_fn = bootstrapped_cross_entropy2d
i = 0
while True:
i += 1
print("iter :", i)
model.train()
dummy_input = Variable(torch.rand(1, 3, net_h, net_w).cuda(), requires_grad=True)
dummy_target = Variable(torch.rand(1, net_h, net_w).cuda(), requires_grad=False).long()
start_time = time.time()
dummy_out = model(dummy_input)
print("> Inference Time: {}".format(time.time() - start_time))
optimizer.zero_grad()
topk = 512 * 256
loss = loss_fn(dummy_out, dummy_target, K=topk)
print("> Loss: {}".format(loss.data[0]))
loss.backward()
optimizer.step()
| 40.311275 | 113 | 0.528485 |
b5306347c70bcded531715388b8a675da6ba6a67 | 195 | py | Python | qtools/__init__.py | imagect/qtools | debc3f3870fb61a240e1961aba01c08e69522bdc | [
"MIT"
] | null | null | null | qtools/__init__.py | imagect/qtools | debc3f3870fb61a240e1961aba01c08e69522bdc | [
"MIT"
] | null | null | null | qtools/__init__.py | imagect/qtools | debc3f3870fb61a240e1961aba01c08e69522bdc | [
"MIT"
] | null | null | null | from .app import qtApp
from .event import CallbackEvent
from .event import EventSpy
from qtpy.QtCore import QEvent
from .rxtool import QtUiScheduler
QEvent.registerEventType(CallbackEvent.typeid) | 32.5 | 46 | 0.851282 |
a21095303977244786bf47e1796edbeb12fca87d | 858 | py | Python | desktop/src/controllers/getProject.py | gabrielkiller13/Game-Project-Manager | ed78ec2252257837cf3c45d7aeb290c145e17782 | [
"MIT"
] | null | null | null | desktop/src/controllers/getProject.py | gabrielkiller13/Game-Project-Manager | ed78ec2252257837cf3c45d7aeb290c145e17782 | [
"MIT"
] | null | null | null | desktop/src/controllers/getProject.py | gabrielkiller13/Game-Project-Manager | ed78ec2252257837cf3c45d7aeb290c145e17782 | [
"MIT"
] | null | null | null | ###########################
# DOCUMENTS VISUALIZAITON #
###########################
from src.database.tableCreations import *
from src.database.connection import *
# List all registers from database
def getGameProjects():
try:
select = "SELECT * FROM game_projects"
cursor.execute(select)
except:
return 'O arquivo não pôde ser lido.'
else:
for l in cursor.fetchall():
return ['Busca feita com sucesso.', l]
# Select and show an unique register
def getGameProject(game_cod):
try:
sql_select = """
SELECT * FROM game_projects
WHERE game_cod = {}
""".format(game_cod)
result = cursor.execute(sql_select)
except:
return 0
else:
return {'message':'Busca feita com sucesso.', 'register':result.fetchone()} | 27.677419 | 83 | 0.568765 |
0db69e71ad685d1264189ae720925c9d6f04cffe | 575 | py | Python | apps/core/utils.py | felix781/market-access-public-frontend | 26e7594a86976df941ba97b7d0084364837405db | [
"MIT"
] | null | null | null | apps/core/utils.py | felix781/market-access-public-frontend | 26e7594a86976df941ba97b7d0084364837405db | [
"MIT"
] | null | null | null | apps/core/utils.py | felix781/market-access-public-frontend | 26e7594a86976df941ba97b7d0084364837405db | [
"MIT"
] | null | null | null | import datetime
def get_future_date(days):
date = datetime.datetime.now() + datetime.timedelta(days=days)
date = datetime.datetime.replace(date, hour=0, minute=0, second=0)
return datetime.datetime.strftime(date, "%a, %d-%b-%Y %H:%M:%S GMT")
def convert_to_snake_case(value):
value = value.lower().replace(" ", "_").replace("-", "_")
return "".join([i for i in value if i.isalpha() or i == "_"])
def chain(*iterables):
""" chain returns a generator with the all iterables combined """
for iterable in iterables:
yield from iterable
| 30.263158 | 72 | 0.65913 |
1f8c9e908462359aba6d58a890da1c7e16c2aa98 | 3,560 | py | Python | pyalgotrade/cn/tushare/test_get_trading_days.py | 01FinTech/pyalgotrade-cn | e917f69fb1f48619cf29295a1469b4659e0e1a46 | [
"Apache-2.0"
] | 1,000 | 2016-01-26T12:10:11.000Z | 2022-03-01T23:59:50.000Z | pyalgotrade/cn/tushare/test_get_trading_days.py | collinswei/pyalgotrade-cn | d8ba00e9a2bb609e3e925db17a9a97929c57f672 | [
"Apache-2.0"
] | 22 | 2016-01-26T15:14:09.000Z | 2019-01-30T02:36:38.000Z | pyalgotrade/cn/tushare/test_get_trading_days.py | collinswei/pyalgotrade-cn | d8ba00e9a2bb609e3e925db17a9a97929c57f672 | [
"Apache-2.0"
] | 613 | 2016-01-27T01:02:30.000Z | 2022-03-21T01:38:58.000Z | # Copyright 2011-2015 ZackZK
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: ZackZK <silajoin@sina.com>
"""
from unittest import TestCase
import mock
import pandas as pd
import datetime
from pyalgotrade.tushare.barfeed import get_trading_days
class TestGet_trading_days(TestCase):
@mock.patch('pyalgotrade.tushare.barfeed.ts')
def test_get_trading_days(self, mock_tushare):
start_day = datetime.datetime(2015, 8, 8)
data = [['2015-08-06', 10.0, 11.0, 10.5, 10.0], ['2015-08-07', 10.0, 11.0, 10.5, 10.0]]
COLUMNS = ['date', 'open', 'high', 'close', 'low']
df = pd.DataFrame(data, columns=COLUMNS)
df = df.set_index('date')
mock_tushare.get_hist_data.return_value = df
trading_days = get_trading_days(start_day, 2)
self.assertEqual(2015, trading_days[0].year)
self.assertEqual(8, trading_days[0].month)
self.assertEqual(6, trading_days[0].day)
self.assertEqual(2015, trading_days[1].year)
self.assertEqual(8, trading_days[1].month)
self.assertEqual(7, trading_days[1].day)
@mock.patch('pyalgotrade.tushare.barfeed.ts')
def test_get_trading_days_with_one_holiday(self, mock_tushare):
start_day = datetime.datetime(2015, 8, 10)
data = [['2015-08-06', 10.0, 11.0, 10.5, 10.0], ['2015-08-07', 10.0, 11.0, 10.5, 10.0]]
COLUMNS = ['date', 'open', 'high', 'close', 'low']
df = pd.DataFrame(data, columns=COLUMNS)
df = df.set_index('date')
mock_tushare.get_hist_data.return_value = df
trading_days = get_trading_days(start_day, 2)
self.assertEqual(2015, trading_days[0].year)
self.assertEqual(8, trading_days[0].month)
self.assertEqual(6, trading_days[0].day)
self.assertEqual(2015, trading_days[1].year)
self.assertEqual(8, trading_days[1].month)
self.assertEqual(7, trading_days[1].day)
@mock.patch('pyalgotrade.tushare.barfeed.ts')
def test_get_trading_days_with_two_holidays(self, mock_tushare):
start_day = datetime.datetime(2015, 8, 18)
data = [['2015-08-07', 10.0, 11.0, 10.5, 10.0], ['2015-08-10', 10.0, 11.0, 10.5, 10.0],
['2015-08-11', 10.0, 11.0, 10.5, 10.0], ['2015-08-12', 10.0, 11.0, 10.5, 10.0],
['2015-08-13', 10.0, 11.0, 10.5, 10.0], ['2015-08-14', 10.0, 11.0, 10.5, 10.0],
['2015-08-17', 10.0, 11.0, 10.5, 10.0]]
COLUMNS = ['date', 'open', 'high', 'close', 'low']
df = pd.DataFrame(data, columns=COLUMNS)
df = df.set_index('date')
mock_tushare.get_hist_data.return_value = df
trading_days = get_trading_days(start_day, 7)
self.assertEqual(7, len(trading_days))
self.assertEqual(2015, trading_days[0].year)
self.assertEqual(8, trading_days[0].month)
self.assertEqual(7, trading_days[0].day)
self.assertEqual(2015, trading_days[6].year)
self.assertEqual(8, trading_days[6].month)
self.assertEqual(17, trading_days[6].day)
| 37.87234 | 95 | 0.649438 |
e4fb796e8735c34723ffdafcae3beee2c6257e23 | 4,079 | py | Python | tianshou/policy/modelfree/pg.py | edieson/tianshou | 679a0ce9ad2f7090b5a642c71dd7d9babf318fb8 | [
"MIT"
] | null | null | null | tianshou/policy/modelfree/pg.py | edieson/tianshou | 679a0ce9ad2f7090b5a642c71dd7d9babf318fb8 | [
"MIT"
] | null | null | null | tianshou/policy/modelfree/pg.py | edieson/tianshou | 679a0ce9ad2f7090b5a642c71dd7d9babf318fb8 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from tianshou.data import Batch
from tianshou.policy import BasePolicy
class PGPolicy(BasePolicy):
"""Implementation of Vanilla Policy Gradient.
:param torch.nn.Module model: a model following the rules in
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
:param torch.optim.Optimizer optim: a torch.optim for optimizing the model.
:param torch.distributions.Distribution dist_fn: for computing the action.
:param float discount_factor: in [0, 1].
.. seealso::
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
explanation.
"""
def __init__(self, model, optim, dist_fn=torch.distributions.Categorical,
discount_factor=0.99, reward_normalization=False, **kwargs):
super().__init__(**kwargs)
self.model = model
self.optim = optim
self.dist_fn = dist_fn
assert 0 <= discount_factor <= 1, 'discount factor should in [0, 1]'
self._gamma = discount_factor
self._rew_norm = reward_normalization
self.__eps = np.finfo(np.float32).eps.item()
def process_fn(self, batch, buffer, indice):
r"""Compute the discounted returns for each frame:
.. math::
G_t = \sum_{i=t}^T \gamma^{i-t}r_i
, where :math:`T` is the terminal time step, :math:`\gamma` is the
discount factor, :math:`\gamma \in [0, 1]`.
"""
# batch.returns = self._vanilla_returns(batch)
# batch.returns = self._vectorized_returns(batch)
# return batch
return self.compute_episodic_return(
batch, gamma=self._gamma, gae_lambda=1.)
def forward(self, batch, state=None, **kwargs):
"""Compute action over the given batch data.
:return: A :class:`~tianshou.data.Batch` which has 4 keys:
* ``act`` the action.
* ``logits`` the network's raw output.
* ``dist`` the action distribution.
* ``state`` the hidden state.
.. seealso::
Please refer to :meth:`~tianshou.policy.BasePolicy.forward` for
more detailed explanation.
"""
logits, h = self.model(batch.obs, state=state)
if isinstance(logits, tuple):
dist = self.dist_fn(*logits)
else:
dist = self.dist_fn(logits)
act = dist.sample()
return Batch(logits=logits, act=act, state=h, dist=dist)
def learn(self, batch, batch_size=None, repeat=1, **kwargs):
losses = []
r = batch.returns
if self._rew_norm and r.std() > self.__eps:
batch.returns = (r - r.mean()) / r.std()
for _ in range(repeat):
for b in batch.split(batch_size):
self.optim.zero_grad()
dist = self(b).dist
a = torch.tensor(b.act, device=dist.logits.device)
r = torch.tensor(b.returns, device=dist.logits.device)
loss = -(dist.log_prob(a) * r).sum()
loss.backward()
self.optim.step()
losses.append(loss.item())
return {'loss': losses}
# def _vanilla_returns(self, batch):
# returns = batch.rew[:]
# last = 0
# for i in range(len(returns) - 1, -1, -1):
# if not batch.done[i]:
# returns[i] += self._gamma * last
# last = returns[i]
# return returns
# def _vectorized_returns(self, batch):
# # according to my tests, it is slower than _vanilla_returns
# # import scipy.signal
# convolve = np.convolve
# # convolve = scipy.signal.convolve
# rew = batch.rew[::-1]
# batch_size = len(rew)
# gammas = self._gamma ** np.arange(batch_size)
# c = convolve(rew, gammas)[:batch_size]
# T = np.where(batch.done[::-1])[0]
# d = np.zeros_like(rew)
# d[T] += c[T] - rew[T]
# d[T[1:]] -= d[T[:-1]] * self._gamma ** np.diff(T)
# return (c - convolve(d, gammas)[:batch_size])[::-1]
| 36.419643 | 79 | 0.573915 |
fb666c8f98052b9cbf07be54b96cf335dcc2fc9a | 1,015 | py | Python | test/test_del_contact.py | nkoshkina/Python_Training2 | 1c7d8e51af8d35c058e8040a8d904667c14f1698 | [
"Apache-2.0"
] | null | null | null | test/test_del_contact.py | nkoshkina/Python_Training2 | 1c7d8e51af8d35c058e8040a8d904667c14f1698 | [
"Apache-2.0"
] | null | null | null | test/test_del_contact.py | nkoshkina/Python_Training2 | 1c7d8e51af8d35c058e8040a8d904667c14f1698 | [
"Apache-2.0"
] | null | null | null | from model.contact import Contact
import random
def test_delete_some_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.add_new(Contact("1n", "2n", "3n", "", "Title", "Comp", "address",
"", "", "+7900", "+723456789",
"test@test.com", "t@t2.com", "t@t3.com", "localhost",
"3", "May", "1998", "13", "April", "2020",
"sec address", "//test", "here are notes"))
old_contacts = db.get_contact_list()
contact_del = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact_del.id)
#assert app.contact.count() == len(old_contacts) - 1
old_contacts.remove(contact_del)
new_contacts = db.get_contact_list()
assert new_contacts == old_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == \
sorted(app.contact.get_contacts_list(), key=Contact.id_or_max)
| 46.136364 | 89 | 0.571429 |
37ae01217dea63714868df8b3df20a3c95c5be5c | 4,902 | py | Python | toontown/fishing/FishPanel.py | cmarshall108/Project-Altis | 7ead614abdb5072ca06323982de461f4e775d1b3 | [
"Apache-2.0"
] | 1 | 2021-02-25T06:02:04.000Z | 2021-02-25T06:02:04.000Z | toontown/fishing/FishPanel.py | AnythingTechPro/Project-Altis | 7ead614abdb5072ca06323982de461f4e775d1b3 | [
"Apache-2.0"
] | null | null | null | toontown/fishing/FishPanel.py | AnythingTechPro/Project-Altis | 7ead614abdb5072ca06323982de461f4e775d1b3 | [
"Apache-2.0"
] | 2 | 2021-02-25T06:02:05.000Z | 2021-06-19T03:11:22.000Z | from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from toontown.pgui.DirectGui import *
from pandac.PandaModules import *
from toontown.toonbase import TTLocalizer
from direct.interval.IntervalGlobal import *
from toontown.fishing import FishGlobals
from toontown.fishing import FishPhoto
class FishPanel(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('FishPanel')
def __init__(self, fish = None, parent = aspect2d, doneEvent = None, **kw):
optiondefs = (('relief', None, None),
('state', DGG.DISABLED, None),
('image', DGG.getDefaultDialogGeom(), None),
('image_color', ToontownGlobals.GlobalDialogColor, None),
('image_scale', (0.65, 1, 0.85), None),
('text', '', None),
('text_scale', 0.06, None),
('text_fg', (0, 0, 0, 1), None),
('text_pos', (0, 0.35, 0), None),
('text_font', ToontownGlobals.getInterfaceFont(), None),
('text_wordwrap', 13.5, None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent)
self.initialiseoptions(FishPanel)
self.doneEvent = doneEvent
self.fish = fish
self.photo = None
def destroy(self):
if self.photo:
self.photo.destroy()
self.photo = None
self.fish = None
DirectFrame.destroy(self)
def load(self):
self.weight = DirectLabel(parent=self, pos=(0, 0, -0.28), relief=None, state=DGG.NORMAL, text='', text_scale=0.05, text_fg=(0, 0, 0, 1), text_pos=(0, 0.0, 0), text_font=ToontownGlobals.getInterfaceFont(), text_wordwrap=10.5)
self.value = DirectLabel(parent=self, pos=(0, 0, -0.35), relief=None, state=DGG.NORMAL, text='', text_scale=0.05, text_fg=(0, 0, 0, 1), text_pos=(0, 0, 0), text_font=ToontownGlobals.getInterfaceFont(), text_wordwrap=10.5)
self.mystery = DirectLabel(parent=self, pos=(-0.025, 0, -0.055), relief=None, state=DGG.NORMAL, text='?', text_scale=0.25, text_fg=(0, 0, 0, 1), text_pos=(0, 0, 0), text_font=ToontownGlobals.getInterfaceFont(), text_wordwrap=10.5)
self.extraLabel = DirectLabel(parent=self, relief=None, state=DGG.NORMAL, text='', text_fg=(0.2, 0.8, 0.4, 1), text_font=ToontownGlobals.getSignFont(), text_scale=0.08, pos=(0, 0, 0.26))
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
self.cancel = DirectButton(parent=self, pos=(0.275, 0, -0.375), relief=None, state=DGG.NORMAL, image=(buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')), image_scale=(0.6, 1, 0.6), command=self.handleCancel)
buttons.removeNode()
self.photo = FishPhoto.FishPhoto(parent=self)
self.update(self.fish)
def update(self, fish):
self.fish = fish
if self.fish == None:
return
self['text'] = self.fish.getSpeciesName()
weight = self.fish.getWeight()
conv = TTLocalizer.FishPageWeightConversion
large = weight / conv
if large == 1:
largeStr = TTLocalizer.FishPageWeightLargeS % large
else:
largeStr = TTLocalizer.FishPageWeightLargeP % large
small = weight % conv
if small == 1:
smallStr = TTLocalizer.FishPageWeightSmallS % small
else:
smallStr = TTLocalizer.FishPageWeightSmallP % small
self.weight['text'] = TTLocalizer.FishPageWeightStr + largeStr + smallStr
value = self.fish.getValue()
if value == 1:
self.value['text'] = TTLocalizer.FishPageValueS % value
else:
self.value['text'] = TTLocalizer.FishPageValueP % value
self.photo.update(fish)
def setSwimBounds(self, *bounds):
self.swimBounds = bounds
def setSwimColor(self, *colors):
self.swimColor = colors
def handleCancel(self):
self.hide()
if self.doneEvent:
messenger.send(self.doneEvent)
def show(self, code = FishGlobals.FishItem):
messenger.send('wakeup')
apply(self.photo.setSwimBounds, self.swimBounds)
apply(self.photo.setSwimColor, self.swimColor)
if code == FishGlobals.FishItem:
self.extraLabel.hide()
elif code == FishGlobals.FishItemNewEntry:
self.extraLabel.show()
self.extraLabel['text'] = TTLocalizer.FishingNewEntry
self.extraLabel['text_scale'] = TTLocalizer.FPnewEntry
self.extraLabel.setPos(0, 0, 0.26)
elif code == FishGlobals.FishItemNewRecord:
self.extraLabel.show()
self.extraLabel['text'] = TTLocalizer.FishingNewRecord
self.extraLabel['text_scale'] = TTLocalizer.FPnewRecord
self.photo.show()
DirectFrame.show(self)
def hide(self):
self.photo.hide()
DirectFrame.hide(self) | 46.245283 | 263 | 0.639943 |
b94c5cb695eb1be1211ca22a7b3330b7b69a2813 | 6,126 | py | Python | main.py | tpcstld/youtube | 7979683e60135563ae86becd41b6a7d1df84e0b6 | [
"MIT"
] | null | null | null | main.py | tpcstld/youtube | 7979683e60135563ae86becd41b6a7d1df84e0b6 | [
"MIT"
] | 1 | 2016-12-07T14:09:51.000Z | 2016-12-07T14:09:51.000Z | main.py | tpcstld/youtube | 7979683e60135563ae86becd41b6a7d1df84e0b6 | [
"MIT"
] | null | null | null | import os
import json
import threading
import unicodedata
import urllib.parse
from flask import Flask
from flask import render_template
from flask import jsonify
from flask import send_from_directory
from flask import request
from werkzeug.urls import url_quote
app = Flask(__name__)
from backend import list_files
from backend import status_holder
import download_request
from youtube import handler
from youtube import validator
from youtube.exceptions import YoutubeError
# Pages
@app.route('/')
def index():
"""The main download form."""
return render_template('index.html')
@app.route('/quick')
def quick():
"""An endpoint for Chrome autocomplete, displays the same page, but can
trigger Javascript that automatically queues up the download.
"""
return render_template('index.html')
def _download_video(download):
"""Downloads and converts the video.
Args:
download: A DownloadRequest object.
"""
# If there's a non-bug error, report it
try:
output = handler.initate_download(download)
if not os.path.isfile(os.path.join(str(os.getcwd()), 'temp',
str(output['filename']))):
print("Downloaded file missing. Retrying and forcing mp4 filetype.")
download.set_force_mp4_filetype(True)
output = handler.initate_download(download)
status_holder.set_finished(download, output)
except YoutubeError as e:
status_holder.set_error(download, str(e), 400)
except Exception as e:
raise e
print("Exception in download:", e)
status_holder.set_error(download, 'Internal Error', 500)
# APIs
@app.route('/api/download', methods=['POST'])
def download():
"""Accepts the download request.
Doesn't directly download the video, but instead queues up a thread to
download the video. This is so that we can return some result immediately,
to satisfy Heroku's policy of returning requests within 30 seconds.
Args: (Passed in through the form)
url: A string containing the URL of the video, in any format.
filetype: A string containing the filetype of the output, as either
'audio' or 'video'
Returns:
JSON containing the progress of the download.
"""
data = request.form
try:
download = download_request.DownloadRequest(
data.get('url'), data.get('filetype'))
except YoutubeError as e:
print(e)
return jsonify(status='ERROR', message=e.message), 400
if data.get('enable_trim') == "on":
try:
download.set_time_trimming(data.get("start_time"), data.get("end_time"))
except YoutubeError as e:
print(e)
return jsonify(status='ERROR', message=e.message), 400
cached_data = status_holder.get_entry(download)
# Download not yet started
if cached_data is None:
# Download the video in another thread.
# We use another thread so that we can return some result immediately,
# in order to satisfy Heroku's "must return something" policy.
thread = threading.Thread(
target=_download_video,
args=(download,),
)
thread.daemon = True
thread.start()
# Long timeout, if download exceeds this timeout, I don't care anymore.
status_holder.set_downloading(download)
return jsonify(status='STARTING')
elif cached_data['status'] == status_holder.DOWNLOADING_STATUS:
# TODO: Change to "starting". Or change the other thing to "started".
return jsonify(status='STARTED')
elif cached_data['status'] == status_holder.FINISHED_STATUS:
# We need to URL encode the key so it can be passed as a query parameter
encoded_key = urllib.parse.quote(status_holder.get_cache_key(download))
return jsonify(status='FINISHED', key=encoded_key,
**cached_data['data'])
elif cached_data['status'] == status_holder.ERROR_STATUS:
result = jsonify(status='ERROR', message=cached_data['message'])
return result, cached_data['code']
assert False
@app.route('/api/file')
def get_file():
"""Gets the downloaded and processed file.
We use the cache_key in order to keep track of the file's information.
Args: (Passed in through request.args)
key: The cache key of the specified video.
Returns:
The specified file over HTTP.
"""
cache_key = request.args.get('key', None)
print("Getting information for cache key:", cache_key)
if not cache_key:
return '', 400
cached_data = status_holder.get_entry_from_key(cache_key)
if not cached_data:
return 'File not found. Please try again.', 400
filename = cached_data['data']['filename']
name = cached_data['data']['title']
path = os.path.join(os.getcwd(), 'temp')
# Logging
print("Retrieving file:", path, filename, "of name:", name.encode('utf-8'))
rv = send_from_directory(path, filename)
# Need to support UTF-8
try:
name = name.encode('latin-1')
except UnicodeEncodeError:
names = {
'filename': unicodedata.normalize('NFKD', name).encode('latin-1', 'ignore'),
'filename*': "UTF-8''{}".format(url_quote(name)),
}
else:
names = {'filename': name}
rv.headers.set('Content-Disposition', 'attachment', **names)
return rv
@app.route('/api/all_files')
def list_files_route():
"""Lists all files. Used for primarily debugging reasons.
Files are delimited by newline characters.
"""
files = list_files.list_files()
# If there are no files, you still need to return something, or else Heroku
# returns a 500 error.
if not files:
return 'There are no files.'
return '\n'.join(files)
@app.route('/api/clear')
def clear_files_and_cache():
"""Clears all files and status cache. Used for debugging.
"""
list_files.clear_files()
status_holder.clear()
return 'Cleared.'
if __name__ == '__main__':
app.run(debug=True)
| 30.939394 | 88 | 0.659811 |
88c7894332b22bbbec5c96e0e9f18dfd5021450d | 767 | py | Python | google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py | zoercai/python-spanner | ed7152adc37290c63e59865265f36c593d9b8da3 | [
"Apache-2.0"
] | null | null | null | google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py | zoercai/python-spanner | ed7152adc37290c63e59865265f36c593d9b8da3 | [
"Apache-2.0"
] | null | null | null | google/cloud/spanner_admin_instance_v1/services/instance_admin/__init__.py | zoercai/python-spanner | ed7152adc37290c63e59865265f36c593d9b8da3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import InstanceAdminClient
from .async_client import InstanceAdminAsyncClient
__all__ = (
"InstanceAdminClient",
"InstanceAdminAsyncClient",
)
| 30.68 | 74 | 0.754889 |
4c070ffe81b5eb1a2e73fc63d1905715d9a470ce | 9,274 | py | Python | test/python/opflow/test_state_op_meas_evals.py | Feigenbaum4669/qiskit-terra | 9e770b21991b2c030dc7ca0405aff71d29c9ea39 | [
"Apache-2.0"
] | null | null | null | test/python/opflow/test_state_op_meas_evals.py | Feigenbaum4669/qiskit-terra | 9e770b21991b2c030dc7ca0405aff71d29c9ea39 | [
"Apache-2.0"
] | 1 | 2019-10-03T12:22:41.000Z | 2019-10-03T12:22:41.000Z | test/python/opflow/test_state_op_meas_evals.py | nonhermitian/qiskit-terra | 6a2602a9ecf9b1a3345de1516b873ac7b3da587f | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-name-in-module,import-error
""" Test Operator construction, including OpPrimitives and singletons. """
import unittest
from test.python.opflow import QiskitOpflowTestCase
from ddt import ddt, data
import numpy
from qiskit.circuit import QuantumCircuit, Parameter
from qiskit.utils import QuantumInstance
from qiskit.opflow import StateFn, Zero, One, H, X, I, Z, Plus, Minus, CircuitSampler, ListOp
from qiskit.opflow.exceptions import OpflowError
@ddt
class TestStateOpMeasEvals(QiskitOpflowTestCase):
"""Tests of evals of Meas-Operator-StateFn combos."""
def test_statefn_overlaps(self):
"""state functions overlaps test"""
wf = (4 * StateFn({"101010": 0.5, "111111": 0.3})) + ((3 + 0.1j) * (Zero ^ 6))
wf_vec = StateFn(wf.to_matrix())
self.assertAlmostEqual(wf.adjoint().eval(wf), 14.45)
self.assertAlmostEqual(wf_vec.adjoint().eval(wf_vec), 14.45)
self.assertAlmostEqual(wf_vec.adjoint().eval(wf), 14.45)
self.assertAlmostEqual(wf.adjoint().eval(wf_vec), 14.45)
def test_wf_evals_x(self):
"""wf evals x test"""
qbits = 4
wf = ((Zero ^ qbits) + (One ^ qbits)) * (1 / 2 ** 0.5)
# Note: wf = Plus^qbits fails because TensoredOp can't handle it.
wf_vec = StateFn(wf.to_matrix())
op = X ^ qbits
# op = I^6
self.assertAlmostEqual(wf.adjoint().eval(op.eval(wf)), 1)
self.assertAlmostEqual(wf_vec.adjoint().eval(op.eval(wf)), 1)
self.assertAlmostEqual(wf.adjoint().eval(op.eval(wf_vec)), 1)
self.assertAlmostEqual(wf_vec.adjoint().eval(op.eval(wf_vec)), 1)
# op = (H^X^Y)^2
op = H ^ 6
wf = ((Zero ^ 6) + (One ^ 6)) * (1 / 2 ** 0.5)
wf_vec = StateFn(wf.to_matrix())
# print(wf.adjoint().to_matrix() @ op.to_matrix() @ wf.to_matrix())
self.assertAlmostEqual(wf.adjoint().eval(op.eval(wf)), 0.25)
self.assertAlmostEqual(wf_vec.adjoint().eval(op.eval(wf)), 0.25)
self.assertAlmostEqual(wf.adjoint().eval(op.eval(wf_vec)), 0.25)
self.assertAlmostEqual(wf_vec.adjoint().eval(op.eval(wf_vec)), 0.25)
def test_coefficients_correctly_propagated(self):
"""Test that the coefficients in SummedOp and states are correctly used."""
try:
from qiskit.providers.aer import Aer
except Exception as ex: # pylint: disable=broad-except
self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex)))
return
with self.subTest("zero coeff in SummedOp"):
op = 0 * (I + Z)
state = Plus
self.assertEqual((~StateFn(op) @ state).eval(), 0j)
backend = Aer.get_backend("qasm_simulator")
q_instance = QuantumInstance(backend, seed_simulator=97, seed_transpiler=97)
op = I
with self.subTest("zero coeff in summed StateFn and CircuitSampler"):
state = 0 * (Plus + Minus)
sampler = CircuitSampler(q_instance).convert(~StateFn(op) @ state)
self.assertEqual(sampler.eval(), 0j)
with self.subTest("coeff gets squared in CircuitSampler shot-based readout"):
state = (Plus + Minus) / numpy.sqrt(2)
sampler = CircuitSampler(q_instance).convert(~StateFn(op) @ state)
self.assertAlmostEqual(sampler.eval(), 1 + 0j)
def test_is_measurement_correctly_propagated(self):
"""Test if is_measurement property of StateFn is propagated to converted StateFn."""
try:
from qiskit.providers.aer import Aer
except Exception as ex: # pylint: disable=broad-except
self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex)))
return
backend = Aer.get_backend("qasm_simulator")
q_instance = QuantumInstance(backend) # no seeds needed since no values are compared
state = Plus
sampler = CircuitSampler(q_instance).convert(~state @ state)
self.assertTrue(sampler.oplist[0].is_measurement)
def test_parameter_binding_on_listop(self):
"""Test passing a ListOp with differing parameters works with the circuit sampler."""
try:
from qiskit.providers.aer import Aer
except Exception as ex: # pylint: disable=broad-except
self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex)))
return
x, y = Parameter("x"), Parameter("y")
circuit1 = QuantumCircuit(1)
circuit1.p(0.2, 0)
circuit2 = QuantumCircuit(1)
circuit2.p(x, 0)
circuit3 = QuantumCircuit(1)
circuit3.p(y, 0)
bindings = {x: -0.4, y: 0.4}
listop = ListOp([StateFn(circuit) for circuit in [circuit1, circuit2, circuit3]])
sampler = CircuitSampler(Aer.get_backend("qasm_simulator"))
sampled = sampler.convert(listop, params=bindings)
self.assertTrue(all(len(op.parameters) == 0 for op in sampled.oplist))
def test_list_op_eval_coeff_with_nonlinear_combofn(self):
"""Test evaluating a ListOp with non-linear combo function works with coefficients."""
state = One
op = ListOp(5 * [I], coeff=2, combo_fn=numpy.prod)
expr1 = ~StateFn(op) @ state
expr2 = ListOp(5 * [~state @ I @ state], coeff=2, combo_fn=numpy.prod)
self.assertEqual(expr1.eval(), 2) # if the coeff is propagated too far the result is 4
self.assertEqual(expr2.eval(), 2)
def test_single_parameter_binds(self):
"""Test passing parameter binds as a dictionary to the circuit sampler."""
try:
from qiskit.providers.aer import Aer
except Exception as ex: # pylint: disable=broad-except
self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex)))
return
x = Parameter("x")
circuit = QuantumCircuit(1)
circuit.ry(x, 0)
expr = ~StateFn(H) @ StateFn(circuit)
sampler = CircuitSampler(Aer.get_backend("statevector_simulator"))
res = sampler.convert(expr, params={x: 0}).eval()
self.assertIsInstance(res, complex)
@data("all", "last")
def test_circuit_sampler_caching(self, caching):
"""Test caching all operators works."""
try:
from qiskit.providers.aer import Aer
except Exception as ex: # pylint: disable=broad-except
self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex)))
return
x = Parameter("x")
circuit = QuantumCircuit(1)
circuit.ry(x, 0)
expr1 = ~StateFn(H) @ StateFn(circuit)
expr2 = ~StateFn(X) @ StateFn(circuit)
sampler = CircuitSampler(Aer.get_backend("statevector_simulator"), caching=caching)
res1 = sampler.convert(expr1, params={x: 0}).eval()
res2 = sampler.convert(expr2, params={x: 0}).eval()
res3 = sampler.convert(expr1, params={x: 0}).eval()
res4 = sampler.convert(expr2, params={x: 0}).eval()
self.assertEqual(res1, res3)
self.assertEqual(res2, res4)
if caching == "last":
self.assertEqual(len(sampler._cached_ops.keys()), 1)
else:
self.assertEqual(len(sampler._cached_ops.keys()), 2)
def test_adjoint_nonunitary_circuit_raises(self):
"""Test adjoint on a non-unitary circuit raises a OpflowError instead of CircuitError."""
circuit = QuantumCircuit(1)
circuit.reset(0)
with self.assertRaises(OpflowError):
_ = StateFn(circuit).adjoint()
def test_evaluating_nonunitary_circuit_state(self):
"""Test evaluating a circuit works even if it contains non-unitary instruction (resets).
TODO: allow this for (~StateFn(circuit) @ op @ StateFn(circuit)), but this requires
refactoring how the AerPauliExpectation works, since that currently relies on
composing with CircuitMeasurements
"""
circuit = QuantumCircuit(1)
circuit.initialize([0, 1], [0])
op = Z
res = (~StateFn(op) @ StateFn(circuit)).eval()
self.assertAlmostEqual(-1 + 0j, res)
def test_quantum_instance_with_backend_shots(self):
"""Test sampling a circuit where the backend has shots attached."""
try:
from qiskit.providers.aer import QasmSimulator
except Exception as ex: # pylint: disable=broad-except
self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex)))
backend = QasmSimulator(shots=10)
sampler = CircuitSampler(backend)
res = sampler.convert(~Plus @ Plus).eval()
self.assertAlmostEqual(res, 1 + 0j, places=2)
if __name__ == "__main__":
unittest.main()
| 41.587444 | 97 | 0.639099 |
da0aff2dd97792130f1f4fedba5a670c48aa2022 | 4,959 | py | Python | unrolr/utils/utils.py | jeeberhardt/unrolr | 76d432643525a1999a6b14d6af500b9ffb296b82 | [
"MIT"
] | 2 | 2019-06-05T19:44:25.000Z | 2021-07-25T04:42:25.000Z | unrolr/utils/utils.py | jeeberhardt/unrolr | 76d432643525a1999a6b14d6af500b9ffb296b82 | [
"MIT"
] | null | null | null | unrolr/utils/utils.py | jeeberhardt/unrolr | 76d432643525a1999a6b14d6af500b9ffb296b82 | [
"MIT"
] | 1 | 2019-06-21T16:57:52.000Z | 2019-06-21T16:57:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Jérôme Eberhardt 2016-2018
# Unrolr
#
# Utils functions
# Author: Jérôme Eberhardt <qksoneo@gmail.com>
#
# License: MIT
from __future__ import print_function
import os
import sys
if sys.version_info >= (3, ):
import importlib
else:
import imp
import h5py
import pyopencl as cl
import numpy as np
__author__ = "Jérôme Eberhardt"
__copyright__ = "Copyright 2018, Jérôme Eberhardt"
__lience__ = "MIT"
__maintainer__ = "Jérôme Eberhardt"
__email__ = "qksoneo@gmail.com"
def read_dataset(fname, dname, start=0, stop=-1, skip=1):
"""Read dataset from HDF5 file."""
data = None
try:
with h5py.File(fname, 'r') as f:
if stop == -1:
return f[dname][start::skip,]
else:
return f[dname][start:stop:skip,]
except IOError:
print("Error: cannot find file %s." % fname)
return data
def save_dataset(fname, dname, data):
"""Save dataset to HDF5 file."""
with h5py.File(fname, 'w') as w:
try:
dset = w.create_dataset(dname, (data.shape[0], data.shape[1]))
dset[:] = data
except:
pass
w.flush()
def transform_dihedral_to_metric(dihedral_timeseries):
"""Convert angles in radians to sine/cosine transformed coordinates.
The output will be used as the PCA input for dihedral PCA (dPCA)
Args:
dhedral_timeseries (ndarray): array containing dihedral angles, shape (n_samples, n_features)
Returns:
ndarray: sine/cosine transformed coordinates
"""
new_shape = (dihedral_timeseries.shape[0] * 2, dihedral_timeseries.shape[1])
data = np.zeros(shape=new_shape, dtype=np.float32)
for i in range(dihedral_timeseries.shape[0]):
data[(i * 2)] = np.cos(dihedral_timeseries[i])
data[(i * 2) + 1] = np.sin(dihedral_timeseries[i])
return data
def transform_dihedral_to_circular_mean(dihedral_timeseries):
"""Convert angles in radians to circular mean transformed angles.
The output will be used as the PCA input for dihedral PCA+ (dPCA+)
Args:
dhedral_timeseries (ndarray): array containing dihedral angles, shape (n_samples, n_features)
Returns:
ndarray: circular mean transformed angles
"""
cm = np.zeros(shape=dihedral_timeseries.shape, dtype=np.float32)
# Create a flat view of the numpy arrays.
cmf = cm.ravel()
dtf = dihedral_timeseries.ravel()
x = np.cos(dtf)
y = np.sin(dtf)
# In order to avoid undefined mean angles
zero_y = np.where(y == 0.)[0]
if zero_y.size > 0:
y[zero_y] += 1E6
# Cases x > 0 and x < 0 are combined together
nonzero_x = np.where(x != 0.)
neg_x = np.where(x < 0.)
sign_y = np.sign(y)
# 1. x > 0
cmf[nonzero_x] = np.arctan(y[nonzero_x] / x[nonzero_x])
# 2. x < 0
cmf[neg_x] += sign_y[neg_x] * np.pi
# Case when x equal to 0
zero_x = np.where(x == 0.)[0]
if zero_x.size > 0:
cmf[zero_x] = sign_y[zero_x] * (np.pi / 2.)
return cm
def is_opencl_env_defined():
"""Check if OpenCL env. variable is defined."""
variable_name = "PYOPENCL_CTX"
if os.environ.get(variable_name):
return True
else:
return False
def path_module(module_name):
try:
specs = importlib.machinery.PathFinder().find_spec(module_name)
if specs is not None:
return specs.submodule_search_locations[0]
except:
try:
_, path, _ = imp.find_module(module_name)
abspath = os.path.abspath(path)
return abspath
except ImportError:
return None
return None
def max_conformations_from_dataset(fname, dname):
"""Get maximum number of conformations that can fit
into the memory of the selected OpenCL device and
also the step/interval """
if not is_opencl_env_defined():
print("Error: The environnment variable PYOPENCL_CTX is not defined.")
print("Tip: python -c \"import pyopencl as cl; cl.create_some_context()\"")
sys.exit(1)
ctx = cl.create_some_context()
max_size = int(ctx.devices[0].max_mem_alloc_size)
try:
with h5py.File(fname, 'r') as f:
bytes_size = f[dname].dtype.itemsize
n_conf, n_dim = f[dname].shape
data_size = bytes_size * n_conf * n_dim
except IOError:
print("Error: cannot find file %s." % fname)
if data_size > max_size:
""" Return the first interval that produces a dataset
with a size inferior than max_size """
for i in range(1, n_conf):
if n_conf % i == 0:
tmp_size = (n_conf / i) * n_dim * bytes_size
if tmp_size <= max_size:
return (n_conf / i, i)
# Return None if we didn't find anything
return (None, None)
else:
return (data_shape[0], 1)
| 26.37766 | 101 | 0.620488 |
608faf8f25fe0a1179a843256b50185f9f141bd6 | 33,953 | py | Python | mypy/meet.py | dongweiming/mypy | e2668a236f9cceda2bab9314d20b5585ee63bec9 | [
"PSF-2.0"
] | 1 | 2021-06-12T22:24:40.000Z | 2021-06-12T22:24:40.000Z | Thonny/Lib/site-packages/mypy/meet.py | Pydiderot/pydiderotIDE | a42fcde3ea837ae40c957469f5d87427e8ce46d3 | [
"MIT"
] | 30 | 2019-01-04T10:14:56.000Z | 2020-10-12T14:00:31.000Z | Thonny/Lib/site-packages/mypy/meet.py | Pydiderot/pydiderotIDE | a42fcde3ea837ae40c957469f5d87427e8ce46d3 | [
"MIT"
] | null | null | null | from collections import OrderedDict
from typing import List, Optional, Tuple, Callable
from mypy.join import (
is_similar_callables, combine_similar_callables, join_type_list, unpack_callback_protocol
)
from mypy.types import (
Type, AnyType, TypeVisitor, UnboundType, NoneType, TypeVarType, Instance, CallableType,
TupleType, TypedDictType, ErasedType, UnionType, PartialType, DeletedType,
UninhabitedType, TypeType, TypeOfAny, Overloaded, FunctionLike, LiteralType,
ProperType, get_proper_type, get_proper_types, TypeAliasType
)
from mypy.subtypes import is_equivalent, is_subtype, is_callable_compatible, is_proper_subtype
from mypy.erasetype import erase_type
from mypy.maptype import map_instance_to_supertype
from mypy.typeops import tuple_fallback, make_simplified_union, is_recursive_pair
from mypy import state
# TODO Describe this module.
def trivial_meet(s: Type, t: Type) -> ProperType:
"""Return one of types (expanded) if it is a subtype of other, otherwise bottom type."""
if is_subtype(s, t):
return get_proper_type(s)
elif is_subtype(t, s):
return get_proper_type(t)
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
def meet_types(s: Type, t: Type) -> ProperType:
"""Return the greatest lower bound of two types."""
if is_recursive_pair(s, t):
# This case can trigger an infinite recursion, general support for this will be
# tricky so we use a trivial meet (like for protocols).
return trivial_meet(s, t)
s = get_proper_type(s)
t = get_proper_type(t)
if isinstance(s, ErasedType):
return s
if isinstance(s, AnyType):
return t
if isinstance(s, UnionType) and not isinstance(t, UnionType):
s, t = t, s
return t.accept(TypeMeetVisitor(s))
def narrow_declared_type(declared: Type, narrowed: Type) -> Type:
"""Return the declared type narrowed down to another type."""
# TODO: check infinite recursion for aliases here.
declared = get_proper_type(declared)
narrowed = get_proper_type(narrowed)
if declared == narrowed:
return declared
if isinstance(declared, UnionType):
return make_simplified_union([narrow_declared_type(x, narrowed)
for x in declared.relevant_items()])
elif not is_overlapping_types(declared, narrowed,
prohibit_none_typevar_overlap=True):
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
elif isinstance(narrowed, UnionType):
return make_simplified_union([narrow_declared_type(declared, x)
for x in narrowed.relevant_items()])
elif isinstance(narrowed, AnyType):
return narrowed
elif isinstance(declared, TypeType) and isinstance(narrowed, TypeType):
return TypeType.make_normalized(narrow_declared_type(declared.item, narrowed.item))
elif isinstance(declared, (Instance, TupleType, TypeType, LiteralType)):
return meet_types(declared, narrowed)
elif isinstance(declared, TypedDictType) and isinstance(narrowed, Instance):
# Special case useful for selecting TypedDicts from unions using isinstance(x, dict).
if (narrowed.type.fullname == 'builtins.dict' and
all(isinstance(t, AnyType) for t in get_proper_types(narrowed.args))):
return declared
return meet_types(declared, narrowed)
return narrowed
def get_possible_variants(typ: Type) -> List[Type]:
"""This function takes any "Union-like" type and returns a list of the available "options".
Specifically, there are currently exactly three different types that can have
"variants" or are "union-like":
- Unions
- TypeVars with value restrictions
- Overloads
This function will return a list of each "option" present in those types.
If this function receives any other type, we return a list containing just that
original type. (E.g. pretend the type was contained within a singleton union).
The only exception is regular TypeVars: we return a list containing that TypeVar's
upper bound.
This function is useful primarily when checking to see if two types are overlapping:
the algorithm to check if two unions are overlapping is fundamentally the same as
the algorithm for checking if two overloads are overlapping.
Normalizing both kinds of types in the same way lets us reuse the same algorithm
for both.
"""
typ = get_proper_type(typ)
if isinstance(typ, TypeVarType):
if len(typ.values) > 0:
return typ.values
else:
return [typ.upper_bound]
elif isinstance(typ, UnionType):
return list(typ.items)
elif isinstance(typ, Overloaded):
# Note: doing 'return typ.items()' makes mypy
# infer a too-specific return type of List[CallableType]
return list(typ.items())
else:
return [typ]
def is_overlapping_types(left: Type,
right: Type,
ignore_promotions: bool = False,
prohibit_none_typevar_overlap: bool = False) -> bool:
"""Can a value of type 'left' also be of type 'right' or vice-versa?
If 'ignore_promotions' is True, we ignore promotions while checking for overlaps.
If 'prohibit_none_typevar_overlap' is True, we disallow None from overlapping with
TypeVars (in both strict-optional and non-strict-optional mode).
"""
left, right = get_proper_types((left, right))
def _is_overlapping_types(left: Type, right: Type) -> bool:
'''Encode the kind of overlapping check to perform.
This function mostly exists so we don't have to repeat keyword arguments everywhere.'''
return is_overlapping_types(
left, right,
ignore_promotions=ignore_promotions,
prohibit_none_typevar_overlap=prohibit_none_typevar_overlap)
# We should never encounter this type.
if isinstance(left, PartialType) or isinstance(right, PartialType):
assert False, "Unexpectedly encountered partial type"
# We should also never encounter these types, but it's possible a few
# have snuck through due to unrelated bugs. For now, we handle these
# in the same way we handle 'Any'.
#
# TODO: Replace these with an 'assert False' once we are more confident.
illegal_types = (UnboundType, ErasedType, DeletedType)
if isinstance(left, illegal_types) or isinstance(right, illegal_types):
return True
# 'Any' may or may not be overlapping with the other type
if isinstance(left, AnyType) or isinstance(right, AnyType):
return True
# When running under non-strict optional mode, simplify away types of
# the form 'Union[A, B, C, None]' into just 'Union[A, B, C]'.
if not state.strict_optional:
if isinstance(left, UnionType):
left = UnionType.make_union(left.relevant_items())
if isinstance(right, UnionType):
right = UnionType.make_union(right.relevant_items())
left, right = get_proper_types((left, right))
# We check for complete overlaps next as a general-purpose failsafe.
# If this check fails, we start checking to see if there exists a
# *partial* overlap between types.
#
# These checks will also handle the NoneType and UninhabitedType cases for us.
if (is_proper_subtype(left, right, ignore_promotions=ignore_promotions)
or is_proper_subtype(right, left, ignore_promotions=ignore_promotions)):
return True
# See the docstring for 'get_possible_variants' for more info on what the
# following lines are doing.
left_possible = get_possible_variants(left)
right_possible = get_possible_variants(right)
# We start by checking multi-variant types like Unions first. We also perform
# the same logic if either type happens to be a TypeVar.
#
# Handling the TypeVars now lets us simulate having them bind to the corresponding
# type -- if we deferred these checks, the "return-early" logic of the other
# checks will prevent us from detecting certain overlaps.
#
# If both types are singleton variants (and are not TypeVars), we've hit the base case:
# we skip these checks to avoid infinitely recursing.
def is_none_typevar_overlap(t1: Type, t2: Type) -> bool:
t1, t2 = get_proper_types((t1, t2))
return isinstance(t1, NoneType) and isinstance(t2, TypeVarType)
if prohibit_none_typevar_overlap:
if is_none_typevar_overlap(left, right) or is_none_typevar_overlap(right, left):
return False
if (len(left_possible) > 1 or len(right_possible) > 1
or isinstance(left, TypeVarType) or isinstance(right, TypeVarType)):
for l in left_possible:
for r in right_possible:
if _is_overlapping_types(l, r):
return True
return False
# Now that we've finished handling TypeVars, we're free to end early
# if one one of the types is None and we're running in strict-optional mode.
# (None only overlaps with None in strict-optional mode).
#
# We must perform this check after the TypeVar checks because
# a TypeVar could be bound to None, for example.
if state.strict_optional and isinstance(left, NoneType) != isinstance(right, NoneType):
return False
# Next, we handle single-variant types that may be inherently partially overlapping:
#
# - TypedDicts
# - Tuples
#
# If we cannot identify a partial overlap and end early, we degrade these two types
# into their 'Instance' fallbacks.
if isinstance(left, TypedDictType) and isinstance(right, TypedDictType):
return are_typed_dicts_overlapping(left, right, ignore_promotions=ignore_promotions)
elif typed_dict_mapping_pair(left, right):
# Overlaps between TypedDicts and Mappings require dedicated logic.
return typed_dict_mapping_overlap(left, right,
overlapping=_is_overlapping_types)
elif isinstance(left, TypedDictType):
left = left.fallback
elif isinstance(right, TypedDictType):
right = right.fallback
if is_tuple(left) and is_tuple(right):
return are_tuples_overlapping(left, right, ignore_promotions=ignore_promotions)
elif isinstance(left, TupleType):
left = tuple_fallback(left)
elif isinstance(right, TupleType):
right = tuple_fallback(right)
# Next, we handle single-variant types that cannot be inherently partially overlapping,
# but do require custom logic to inspect.
#
# As before, we degrade into 'Instance' whenever possible.
if isinstance(left, TypeType) and isinstance(right, TypeType):
return _is_overlapping_types(left.item, right.item)
def _type_object_overlap(left: Type, right: Type) -> bool:
"""Special cases for type object types overlaps."""
# TODO: these checks are a bit in gray area, adjust if they cause problems.
left, right = get_proper_types((left, right))
# 1. Type[C] vs Callable[..., C], where the latter is class object.
if isinstance(left, TypeType) and isinstance(right, CallableType) and right.is_type_obj():
return _is_overlapping_types(left.item, right.ret_type)
# 2. Type[C] vs Meta, where Meta is a metaclass for C.
if isinstance(left, TypeType) and isinstance(right, Instance):
if isinstance(left.item, Instance):
left_meta = left.item.type.metaclass_type
if left_meta is not None:
return _is_overlapping_types(left_meta, right)
# builtins.type (default metaclass) overlaps with all metaclasses
return right.type.has_base('builtins.type')
elif isinstance(left.item, AnyType):
return right.type.has_base('builtins.type')
# 3. Callable[..., C] vs Meta is considered below, when we switch to fallbacks.
return False
if isinstance(left, TypeType) or isinstance(right, TypeType):
return _type_object_overlap(left, right) or _type_object_overlap(right, left)
if isinstance(left, CallableType) and isinstance(right, CallableType):
return is_callable_compatible(left, right,
is_compat=_is_overlapping_types,
ignore_pos_arg_names=True,
allow_partial_overlap=True)
elif isinstance(left, CallableType):
left = left.fallback
elif isinstance(right, CallableType):
right = right.fallback
if isinstance(left, LiteralType) and isinstance(right, LiteralType):
if left.value == right.value:
# If values are the same, we still need to check if fallbacks are overlapping,
# this is done below.
left = left.fallback
right = right.fallback
else:
return False
elif isinstance(left, LiteralType):
left = left.fallback
elif isinstance(right, LiteralType):
right = right.fallback
# Finally, we handle the case where left and right are instances.
if isinstance(left, Instance) and isinstance(right, Instance):
# First we need to handle promotions and structural compatibility for instances
# that came as fallbacks, so simply call is_subtype() to avoid code duplication.
if (is_subtype(left, right, ignore_promotions=ignore_promotions)
or is_subtype(right, left, ignore_promotions=ignore_promotions)):
return True
# Two unrelated types cannot be partially overlapping: they're disjoint.
if left.type.has_base(right.type.fullname):
left = map_instance_to_supertype(left, right.type)
elif right.type.has_base(left.type.fullname):
right = map_instance_to_supertype(right, left.type)
else:
return False
if len(left.args) == len(right.args):
# Note: we don't really care about variance here, since the overlapping check
# is symmetric and since we want to return 'True' even for partial overlaps.
#
# For example, suppose we have two types Wrapper[Parent] and Wrapper[Child].
# It doesn't matter whether Wrapper is covariant or contravariant since
# either way, one of the two types will overlap with the other.
#
# Similarly, if Wrapper was invariant, the two types could still be partially
# overlapping -- what if Wrapper[Parent] happened to contain only instances of
# specifically Child?
#
# Or, to use a more concrete example, List[Union[A, B]] and List[Union[B, C]]
# would be considered partially overlapping since it's possible for both lists
# to contain only instances of B at runtime.
for left_arg, right_arg in zip(left.args, right.args):
if _is_overlapping_types(left_arg, right_arg):
return True
return False
# We ought to have handled every case by now: we conclude the
# two types are not overlapping, either completely or partially.
#
# Note: it's unclear however, whether returning False is the right thing
# to do when inferring reachability -- see https://github.com/python/mypy/issues/5529
assert type(left) != type(right)
return False
def is_overlapping_erased_types(left: Type, right: Type, *,
ignore_promotions: bool = False) -> bool:
"""The same as 'is_overlapping_erased_types', except the types are erased first."""
return is_overlapping_types(erase_type(left), erase_type(right),
ignore_promotions=ignore_promotions,
prohibit_none_typevar_overlap=True)
def are_typed_dicts_overlapping(left: TypedDictType, right: TypedDictType, *,
ignore_promotions: bool = False,
prohibit_none_typevar_overlap: bool = False) -> bool:
"""Returns 'true' if left and right are overlapping TypeDictTypes."""
# All required keys in left are present and overlapping with something in right
for key in left.required_keys:
if key not in right.items:
return False
if not is_overlapping_types(left.items[key], right.items[key],
ignore_promotions=ignore_promotions,
prohibit_none_typevar_overlap=prohibit_none_typevar_overlap):
return False
# Repeat check in the other direction
for key in right.required_keys:
if key not in left.items:
return False
if not is_overlapping_types(left.items[key], right.items[key],
ignore_promotions=ignore_promotions):
return False
# The presence of any additional optional keys does not affect whether the two
# TypedDicts are partially overlapping: the dicts would be overlapping if the
# keys happened to be missing.
return True
def are_tuples_overlapping(left: Type, right: Type, *,
ignore_promotions: bool = False,
prohibit_none_typevar_overlap: bool = False) -> bool:
"""Returns true if left and right are overlapping tuples."""
left, right = get_proper_types((left, right))
left = adjust_tuple(left, right) or left
right = adjust_tuple(right, left) or right
assert isinstance(left, TupleType), 'Type {} is not a tuple'.format(left)
assert isinstance(right, TupleType), 'Type {} is not a tuple'.format(right)
if len(left.items) != len(right.items):
return False
return all(is_overlapping_types(l, r,
ignore_promotions=ignore_promotions,
prohibit_none_typevar_overlap=prohibit_none_typevar_overlap)
for l, r in zip(left.items, right.items))
def adjust_tuple(left: ProperType, r: ProperType) -> Optional[TupleType]:
"""Find out if `left` is a Tuple[A, ...], and adjust its length to `right`"""
if isinstance(left, Instance) and left.type.fullname == 'builtins.tuple':
n = r.length() if isinstance(r, TupleType) else 1
return TupleType([left.args[0]] * n, left)
return None
def is_tuple(typ: Type) -> bool:
typ = get_proper_type(typ)
return (isinstance(typ, TupleType)
or (isinstance(typ, Instance) and typ.type.fullname == 'builtins.tuple'))
class TypeMeetVisitor(TypeVisitor[ProperType]):
def __init__(self, s: ProperType) -> None:
self.s = s
def visit_unbound_type(self, t: UnboundType) -> ProperType:
if isinstance(self.s, NoneType):
if state.strict_optional:
return AnyType(TypeOfAny.special_form)
else:
return self.s
elif isinstance(self.s, UninhabitedType):
return self.s
else:
return AnyType(TypeOfAny.special_form)
def visit_any(self, t: AnyType) -> ProperType:
return self.s
def visit_union_type(self, t: UnionType) -> ProperType:
if isinstance(self.s, UnionType):
meets = [] # type: List[Type]
for x in t.items:
for y in self.s.items:
meets.append(meet_types(x, y))
else:
meets = [meet_types(x, self.s)
for x in t.items]
return make_simplified_union(meets)
def visit_none_type(self, t: NoneType) -> ProperType:
if state.strict_optional:
if isinstance(self.s, NoneType) or (isinstance(self.s, Instance) and
self.s.type.fullname == 'builtins.object'):
return t
else:
return UninhabitedType()
else:
return t
def visit_uninhabited_type(self, t: UninhabitedType) -> ProperType:
return t
def visit_deleted_type(self, t: DeletedType) -> ProperType:
if isinstance(self.s, NoneType):
if state.strict_optional:
return t
else:
return self.s
elif isinstance(self.s, UninhabitedType):
return self.s
else:
return t
def visit_erased_type(self, t: ErasedType) -> ProperType:
return self.s
def visit_type_var(self, t: TypeVarType) -> ProperType:
if isinstance(self.s, TypeVarType) and self.s.id == t.id:
return self.s
else:
return self.default(self.s)
def visit_instance(self, t: Instance) -> ProperType:
if isinstance(self.s, Instance):
si = self.s
if t.type == si.type:
if is_subtype(t, self.s) or is_subtype(self.s, t):
# Combine type arguments. We could have used join below
# equivalently.
args = [] # type: List[Type]
for i in range(len(t.args)):
args.append(self.meet(t.args[i], si.args[i]))
return Instance(t.type, args)
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
else:
if is_subtype(t, self.s):
return t
elif is_subtype(self.s, t):
# See also above comment.
return self.s
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
elif isinstance(self.s, FunctionLike) and t.type.is_protocol:
call = unpack_callback_protocol(t)
if call:
return meet_types(call, self.s)
elif isinstance(self.s, FunctionLike) and self.s.is_type_obj() and t.type.is_metaclass():
if is_subtype(self.s.fallback, t):
return self.s
return self.default(self.s)
elif isinstance(self.s, TypeType):
return meet_types(t, self.s)
elif isinstance(self.s, TupleType):
return meet_types(t, self.s)
elif isinstance(self.s, LiteralType):
return meet_types(t, self.s)
elif isinstance(self.s, TypedDictType):
return meet_types(t, self.s)
return self.default(self.s)
def visit_callable_type(self, t: CallableType) -> ProperType:
if isinstance(self.s, CallableType) and is_similar_callables(t, self.s):
if is_equivalent(t, self.s):
return combine_similar_callables(t, self.s)
result = meet_similar_callables(t, self.s)
# We set the from_type_type flag to suppress error when a collection of
# concrete class objects gets inferred as their common abstract superclass.
if not ((t.is_type_obj() and t.type_object().is_abstract) or
(self.s.is_type_obj() and self.s.type_object().is_abstract)):
result.from_type_type = True
if isinstance(get_proper_type(result.ret_type), UninhabitedType):
# Return a plain None or <uninhabited> instead of a weird function.
return self.default(self.s)
return result
elif isinstance(self.s, TypeType) and t.is_type_obj() and not t.is_generic():
# In this case we are able to potentially produce a better meet.
res = meet_types(self.s.item, t.ret_type)
if not isinstance(res, (NoneType, UninhabitedType)):
return TypeType.make_normalized(res)
return self.default(self.s)
elif isinstance(self.s, Instance) and self.s.type.is_protocol:
call = unpack_callback_protocol(self.s)
if call:
return meet_types(t, call)
return self.default(self.s)
def visit_overloaded(self, t: Overloaded) -> ProperType:
# TODO: Implement a better algorithm that covers at least the same cases
# as TypeJoinVisitor.visit_overloaded().
s = self.s
if isinstance(s, FunctionLike):
if s.items() == t.items():
return Overloaded(t.items())
elif is_subtype(s, t):
return s
elif is_subtype(t, s):
return t
else:
return meet_types(t.fallback, s.fallback)
elif isinstance(self.s, Instance) and self.s.type.is_protocol:
call = unpack_callback_protocol(self.s)
if call:
return meet_types(t, call)
return meet_types(t.fallback, s)
def visit_tuple_type(self, t: TupleType) -> ProperType:
if isinstance(self.s, TupleType) and self.s.length() == t.length():
items = [] # type: List[Type]
for i in range(t.length()):
items.append(self.meet(t.items[i], self.s.items[i]))
# TODO: What if the fallbacks are different?
return TupleType(items, tuple_fallback(t))
elif isinstance(self.s, Instance):
# meet(Tuple[t1, t2, <...>], Tuple[s, ...]) == Tuple[meet(t1, s), meet(t2, s), <...>].
if self.s.type.fullname == 'builtins.tuple' and self.s.args:
return t.copy_modified(items=[meet_types(it, self.s.args[0]) for it in t.items])
elif is_proper_subtype(t, self.s):
# A named tuple that inherits from a normal class
return t
return self.default(self.s)
def visit_typeddict_type(self, t: TypedDictType) -> ProperType:
if isinstance(self.s, TypedDictType):
for (name, l, r) in self.s.zip(t):
if (not is_equivalent(l, r) or
(name in t.required_keys) != (name in self.s.required_keys)):
return self.default(self.s)
item_list = [] # type: List[Tuple[str, Type]]
for (item_name, s_item_type, t_item_type) in self.s.zipall(t):
if s_item_type is not None:
item_list.append((item_name, s_item_type))
else:
# at least one of s_item_type and t_item_type is not None
assert t_item_type is not None
item_list.append((item_name, t_item_type))
items = OrderedDict(item_list)
mapping_value_type = join_type_list(list(items.values()))
fallback = self.s.create_anonymous_fallback(value_type=mapping_value_type)
required_keys = t.required_keys | self.s.required_keys
return TypedDictType(items, required_keys, fallback)
elif isinstance(self.s, Instance) and is_subtype(t, self.s):
return t
else:
return self.default(self.s)
def visit_literal_type(self, t: LiteralType) -> ProperType:
if isinstance(self.s, LiteralType) and self.s == t:
return t
elif isinstance(self.s, Instance) and is_subtype(t.fallback, self.s):
return t
else:
return self.default(self.s)
def visit_partial_type(self, t: PartialType) -> ProperType:
# We can't determine the meet of partial types. We should never get here.
assert False, 'Internal error'
def visit_type_type(self, t: TypeType) -> ProperType:
if isinstance(self.s, TypeType):
typ = self.meet(t.item, self.s.item)
if not isinstance(typ, NoneType):
typ = TypeType.make_normalized(typ, line=t.line)
return typ
elif isinstance(self.s, Instance) and self.s.type.fullname == 'builtins.type':
return t
elif isinstance(self.s, CallableType):
return self.meet(t, self.s)
else:
return self.default(self.s)
def visit_type_alias_type(self, t: TypeAliasType) -> ProperType:
assert False, "This should be never called, got {}".format(t)
def meet(self, s: Type, t: Type) -> ProperType:
return meet_types(s, t)
def default(self, typ: Type) -> ProperType:
if isinstance(typ, UnboundType):
return AnyType(TypeOfAny.special_form)
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
def meet_similar_callables(t: CallableType, s: CallableType) -> CallableType:
from mypy.join import join_types
arg_types = [] # type: List[Type]
for i in range(len(t.arg_types)):
arg_types.append(join_types(t.arg_types[i], s.arg_types[i]))
# TODO in combine_similar_callables also applies here (names and kinds)
# The fallback type can be either 'function' or 'type'. The result should have 'function' as
# fallback only if both operands have it as 'function'.
if t.fallback.type.fullname != 'builtins.function':
fallback = t.fallback
else:
fallback = s.fallback
return t.copy_modified(arg_types=arg_types,
ret_type=meet_types(t.ret_type, s.ret_type),
fallback=fallback,
name=None)
def meet_type_list(types: List[Type]) -> Type:
if not types:
# This should probably be builtins.object but that is hard to get and
# it doesn't matter for any current users.
return AnyType(TypeOfAny.implementation_artifact)
met = types[0]
for t in types[1:]:
met = meet_types(met, t)
return met
def typed_dict_mapping_pair(left: Type, right: Type) -> bool:
"""Is this a pair where one type is a TypedDict and another one is an instance of Mapping?
This case requires a precise/principled consideration because there are two use cases
that push the boundary the opposite ways: we need to avoid spurious overlaps to avoid
false positives for overloads, but we also need to avoid spuriously non-overlapping types
to avoid false positives with --strict-equality.
"""
left, right = get_proper_types((left, right))
assert not isinstance(left, TypedDictType) or not isinstance(right, TypedDictType)
if isinstance(left, TypedDictType):
_, other = left, right
elif isinstance(right, TypedDictType):
_, other = right, left
else:
return False
return isinstance(other, Instance) and other.type.has_base('typing.Mapping')
def typed_dict_mapping_overlap(left: Type, right: Type,
overlapping: Callable[[Type, Type], bool]) -> bool:
"""Check if a TypedDict type is overlapping with a Mapping.
The basic logic here consists of two rules:
* A TypedDict with some required keys is overlapping with Mapping[str, <some type>]
if and only if every key type is overlapping with <some type>. For example:
- TypedDict(x=int, y=str) overlaps with Dict[str, Union[str, int]]
- TypedDict(x=int, y=str) doesn't overlap with Dict[str, int]
Note that any additional non-required keys can't change the above result.
* A TypedDict with no required keys overlaps with Mapping[str, <some type>] if and
only if at least one of key types overlaps with <some type>. For example:
- TypedDict(x=str, y=str, total=False) overlaps with Dict[str, str]
- TypedDict(x=str, y=str, total=False) doesn't overlap with Dict[str, int]
- TypedDict(x=int, y=str, total=False) overlaps with Dict[str, str]
As usual empty, dictionaries lie in a gray area. In general, List[str] and List[str]
are considered non-overlapping despite empty list belongs to both. However, List[int]
and List[<nothing>] are considered overlapping.
So here we follow the same logic: a TypedDict with no required keys is considered
non-overlapping with Mapping[str, <some type>], but is considered overlapping with
Mapping[<nothing>, <nothing>]. This way we avoid false positives for overloads, and also
avoid false positives for comparisons like SomeTypedDict == {} under --strict-equality.
"""
left, right = get_proper_types((left, right))
assert not isinstance(left, TypedDictType) or not isinstance(right, TypedDictType)
if isinstance(left, TypedDictType):
assert isinstance(right, Instance)
typed, other = left, right
else:
assert isinstance(left, Instance)
assert isinstance(right, TypedDictType)
typed, other = right, left
mapping = next(base for base in other.type.mro if base.fullname == 'typing.Mapping')
other = map_instance_to_supertype(other, mapping)
key_type, value_type = get_proper_types(other.args)
# TODO: is there a cleaner way to get str_type here?
fallback = typed.as_anonymous().fallback
str_type = fallback.type.bases[0].args[0] # typing._TypedDict inherits Mapping[str, object]
# Special case: a TypedDict with no required keys overlaps with an empty dict.
if isinstance(key_type, UninhabitedType) and isinstance(value_type, UninhabitedType):
return not typed.required_keys
if typed.required_keys:
if not overlapping(key_type, str_type):
return False
return all(overlapping(typed.items[k], value_type) for k in typed.required_keys)
else:
if not overlapping(key_type, str_type):
return False
non_required = set(typed.items.keys()) - typed.required_keys
return any(overlapping(typed.items[k], value_type) for k in non_required)
| 44.209635 | 98 | 0.640827 |
d0246c0aea7d8ca10fa9ae06924c41e996c5e7ab | 27,036 | py | Python | Src/StdLib/Lib/telnetlib.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | Src/StdLib/Lib/telnetlib.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | Src/StdLib/Lib/telnetlib.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | r"""TELNET client class.
Based on RFC 854: TELNET Protocol Specification, by J. Postel and
J. Reynolds
Example:
>>> from telnetlib import Telnet
>>> tn = Telnet('www.python.org', 79) # connect to finger port
>>> tn.write('guido\r\n')
>>> print tn.read_all()
Login Name TTY Idle When Where
guido Guido van Rossum pts/2 <Dec 2 11:10> snag.cnri.reston..
>>>
Note that read_all() won't read until eof -- it just reads some data
-- but it guarantees to read at least one byte unless EOF is hit.
It is possible to pass a Telnet object to select.select() in order to
wait until more data is available. Note that in this case,
read_eager() may return '' even if there was data on the socket,
because the protocol negotiation may have eaten the data. This is why
EOFError is needed in some cases to distinguish between "no data" and
"connection closed" (since the socket also appears ready for reading
when it is closed).
To do:
- option negotiation
- timeout should be intrinsic to the connection object instead of an
option on one of the read calls only
"""
# Imported modules
import errno
import sys
import socket
import select
__all__ = ["Telnet"]
# Tunable parameters
DEBUGLEVEL = 0
# Telnet protocol defaults
TELNET_PORT = 23
# Telnet protocol characters (don't change)
IAC = chr(255) # "Interpret As Command"
DONT = chr(254)
DO = chr(253)
WONT = chr(252)
WILL = chr(251)
theNULL = chr(0)
SE = chr(240) # Subnegotiation End
NOP = chr(241) # No Operation
DM = chr(242) # Data Mark
BRK = chr(243) # Break
IP = chr(244) # Interrupt process
AO = chr(245) # Abort output
AYT = chr(246) # Are You There
EC = chr(247) # Erase Character
EL = chr(248) # Erase Line
GA = chr(249) # Go Ahead
SB = chr(250) # Subnegotiation Begin
# Telnet protocol options code (don't change)
# These ones all come from arpa/telnet.h
BINARY = chr(0) # 8-bit data path
ECHO = chr(1) # echo
RCP = chr(2) # prepare to reconnect
SGA = chr(3) # suppress go ahead
NAMS = chr(4) # approximate message size
STATUS = chr(5) # give status
TM = chr(6) # timing mark
RCTE = chr(7) # remote controlled transmission and echo
NAOL = chr(8) # negotiate about output line width
NAOP = chr(9) # negotiate about output page size
NAOCRD = chr(10) # negotiate about CR disposition
NAOHTS = chr(11) # negotiate about horizontal tabstops
NAOHTD = chr(12) # negotiate about horizontal tab disposition
NAOFFD = chr(13) # negotiate about formfeed disposition
NAOVTS = chr(14) # negotiate about vertical tab stops
NAOVTD = chr(15) # negotiate about vertical tab disposition
NAOLFD = chr(16) # negotiate about output LF disposition
XASCII = chr(17) # extended ascii character set
LOGOUT = chr(18) # force logout
BM = chr(19) # byte macro
DET = chr(20) # data entry terminal
SUPDUP = chr(21) # supdup protocol
SUPDUPOUTPUT = chr(22) # supdup output
SNDLOC = chr(23) # send location
TTYPE = chr(24) # terminal type
EOR = chr(25) # end or record
TUID = chr(26) # TACACS user identification
OUTMRK = chr(27) # output marking
TTYLOC = chr(28) # terminal location number
VT3270REGIME = chr(29) # 3270 regime
X3PAD = chr(30) # X.3 PAD
NAWS = chr(31) # window size
TSPEED = chr(32) # terminal speed
LFLOW = chr(33) # remote flow control
LINEMODE = chr(34) # Linemode option
XDISPLOC = chr(35) # X Display Location
OLD_ENVIRON = chr(36) # Old - Environment variables
AUTHENTICATION = chr(37) # Authenticate
ENCRYPT = chr(38) # Encryption option
NEW_ENVIRON = chr(39) # New - Environment variables
# the following ones come from
# http://www.iana.org/assignments/telnet-options
# Unfortunately, that document does not assign identifiers
# to all of them, so we are making them up
TN3270E = chr(40) # TN3270E
XAUTH = chr(41) # XAUTH
CHARSET = chr(42) # CHARSET
RSP = chr(43) # Telnet Remote Serial Port
COM_PORT_OPTION = chr(44) # Com Port Control Option
SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
TLS = chr(46) # Telnet Start TLS
KERMIT = chr(47) # KERMIT
SEND_URL = chr(48) # SEND-URL
FORWARD_X = chr(49) # FORWARD_X
PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
EXOPL = chr(255) # Extended-Options-List
NOOPT = chr(0)
class Telnet:
"""Telnet interface class.
An instance of this class represents a connection to a telnet
server. The instance is initially not connected; the open()
method must be used to establish a connection. Alternatively, the
host name and optional port number can be passed to the
constructor, too.
Don't try to reopen an already connected instance.
This class has many read_*() methods. Note that some of them
raise EOFError when the end of the connection is read, because
they can return an empty string for other reasons. See the
individual doc strings.
read_until(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
read_all()
Read all data until EOF; may block.
read_some()
Read at least one byte or EOF; may block.
read_very_eager()
Read all data available already queued or on the socket,
without blocking.
read_eager()
Read either data already queued or some data available on the
socket, without blocking.
read_lazy()
Read all data in the raw queue (processing it first), without
doing any socket I/O.
read_very_lazy()
Reads all data in the cooked queue, without doing any socket
I/O.
read_sb_data()
Reads available data between SB ... SE sequence. Don't block.
set_option_negotiation_callback(callback)
Each time a telnet option is read on the input flow, this callback
(if set) is called with the following parameters :
callback(telnet socket, command, option)
option will be chr(0) when there is no option.
No other action is done afterwards by telnetlib.
"""
def __init__(self, host=None, port=0,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Constructor.
When called without arguments, create an unconnected instance.
With a hostname argument, it connects the instance; port number
and timeout are optional.
"""
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
self.timeout = timeout
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
self.iacseq = '' # Buffer for IAC sequence.
self.sb = 0 # flag for SB and SE sequence.
self.sbdataq = ''
self.option_callback = None
self._has_poll = hasattr(select, 'poll')
if host is not None:
self.open(host, port, timeout)
def open(self, host, port=0, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don't try to reopen an already connected instance.
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
self.timeout = timeout
self.sock = socket.create_connection((host, port), timeout)
def __del__(self):
"""Destructor -- close the connection."""
self.close()
def msg(self, msg, *args):
"""Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
"""
if self.debuglevel > 0:
print 'Telnet(%s,%s):' % (self.host, self.port),
if args:
print msg % args
else:
print msg
def set_debuglevel(self, debuglevel):
"""Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).
"""
self.debuglevel = debuglevel
def close(self):
"""Close the connection."""
sock = self.sock
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
if sock:
sock.close()
def get_socket(self):
"""Return the socket object used internally."""
return self.sock
def fileno(self):
"""Return the fileno() of the socket object used internally."""
return self.sock.fileno()
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %r", buffer)
self.sock.sendall(buffer)
def read_until(self, match, timeout=None):
"""Read until a given string is encountered or until timeout.
When no match is found, return whatever is available instead,
possibly the empty string. Raise EOFError if the connection
is closed and no cooked data is available.
"""
if self._has_poll:
return self._read_until_with_poll(match, timeout)
else:
return self._read_until_with_select(match, timeout)
def _read_until_with_poll(self, match, timeout):
"""Read until a given string is encountered or until timeout.
This method uses select.poll() to implement the timeout.
"""
n = len(match)
call_timeout = timeout
if timeout is not None:
from time import time
time_start = time()
self.process_rawq()
i = self.cookedq.find(match)
if i < 0:
poller = select.poll()
poll_in_or_priority_flags = select.POLLIN | select.POLLPRI
poller.register(self, poll_in_or_priority_flags)
while i < 0 and not self.eof:
try:
# Poll takes its timeout in milliseconds.
ready = poller.poll(None if timeout is None
else 1000 * call_timeout)
except select.error as e:
if e[0] == errno.EINTR:
if timeout is not None:
elapsed = time() - time_start
call_timeout = timeout-elapsed
continue
raise
for fd, mode in ready:
if mode & poll_in_or_priority_flags:
i = max(0, len(self.cookedq)-n)
self.fill_rawq()
self.process_rawq()
i = self.cookedq.find(match, i)
if timeout is not None:
elapsed = time() - time_start
if elapsed >= timeout:
break
call_timeout = timeout-elapsed
poller.unregister(self)
if i >= 0:
i = i + n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
return self.read_very_lazy()
def _read_until_with_select(self, match, timeout=None):
"""Read until a given string is encountered or until timeout.
The timeout is implemented using select.select().
"""
n = len(match)
self.process_rawq()
i = self.cookedq.find(match)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
s_reply = ([self], [], [])
s_args = s_reply
if timeout is not None:
s_args = s_args + (timeout,)
from time import time
time_start = time()
while not self.eof and select.select(*s_args) == s_reply:
i = max(0, len(self.cookedq)-n)
self.fill_rawq()
self.process_rawq()
i = self.cookedq.find(match, i)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
if timeout is not None:
elapsed = time() - time_start
if elapsed >= timeout:
break
s_args = s_reply + (timeout-elapsed,)
return self.read_very_lazy()
def read_all(self):
"""Read all data until EOF; block until connection closed."""
self.process_rawq()
while not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while not self.cookedq and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_very_eager(self):
"""Read everything that's possible without blocking in I/O (eager).
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_eager(self):
"""Read readily available data.
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.cookedq and not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_lazy(self):
"""Process and return data that's already in the queues (lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block
unless in the midst of an IAC sequence.
"""
self.process_rawq()
return self.read_very_lazy()
def read_very_lazy(self):
"""Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
"""
buf = self.cookedq
self.cookedq = ''
if not buf and self.eof and not self.rawq:
raise EOFError, 'telnet connection closed'
return buf
def read_sb_data(self):
"""Return any data available in the SB ... SE queue.
Return '' if no SB ... SE available. Should only be called
after seeing a SB or SE command. When a new SB command is
found, old unread SB data will be discarded. Don't block.
"""
buf = self.sbdataq
self.sbdataq = ''
return buf
def set_option_negotiation_callback(self, callback):
"""Provide a callback function called after each receipt of a telnet option."""
self.option_callback = callback
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = ['', '']
try:
while self.rawq:
c = self.rawq_getchar()
if not self.iacseq:
if c == theNULL:
continue
if c == "\021":
continue
if c != IAC:
buf[self.sb] = buf[self.sb] + c
continue
else:
self.iacseq += c
elif len(self.iacseq) == 1:
# 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
self.iacseq += c
continue
self.iacseq = ''
if c == IAC:
buf[self.sb] = buf[self.sb] + c
else:
if c == SB: # SB ... SE start.
self.sb = 1
self.sbdataq = ''
elif c == SE:
self.sb = 0
self.sbdataq = self.sbdataq + buf[1]
buf[1] = ''
if self.option_callback:
# Callback is supposed to look into
# the sbdataq
self.option_callback(self.sock, c, NOOPT)
else:
# We can't offer automatic processing of
# suboptions. Alas, we should not get any
# unless we did a WILL/DO before.
self.msg('IAC %d not recognized' % ord(c))
elif len(self.iacseq) == 2:
cmd = self.iacseq[1]
self.iacseq = ''
opt = c
if cmd in (DO, DONT):
self.msg('IAC %s %d',
cmd == DO and 'DO' or 'DONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + WONT + opt)
elif cmd in (WILL, WONT):
self.msg('IAC %s %d',
cmd == WILL and 'WILL' or 'WONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + DONT + opt)
except EOFError: # raised by self.rawq_getchar()
self.iacseq = '' # Reset on EOF
self.sb = 0
pass
self.cookedq = self.cookedq + buf[0]
self.sbdataq = self.sbdataq + buf[1]
def rawq_getchar(self):
"""Get next char from raw queue.
Block if no data is immediately available. Raise EOFError
when connection is closed.
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
return c
def fill_rawq(self):
"""Fill raw queue from exactly one recv() system call.
Block if no data is immediately available. Set self.eof when
connection is closed.
"""
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %r", buf)
self.eof = (not buf)
self.rawq = self.rawq + buf
def sock_avail(self):
"""Test whether data is available on the socket."""
return select.select([self], [], [], 0) == ([self], [], [])
def interact(self):
"""Interaction function, emulates a very dumb telnet client."""
if sys.platform == "win32":
self.mt_interact()
return
while 1:
rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
if self in rfd:
try:
text = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
break
if text:
sys.stdout.write(text)
sys.stdout.flush()
if sys.stdin in rfd:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def mt_interact(self):
"""Multithreaded version of interact()."""
import thread
thread.start_new_thread(self.listener, ())
while 1:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def listener(self):
"""Helper for mt_interact() -- this executes in the other thread."""
while 1:
try:
data = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
return
if data:
sys.stdout.write(data)
else:
sys.stdout.flush()
def expect(self, list, timeout=None):
"""Read until one from a list of a regular expressions matches.
The first argument is a list of regular expressions, either
compiled (re.RegexObject instances) or uncompiled (strings).
The optional second argument is a timeout, in seconds; default
is no timeout.
Return a tuple of three items: the index in the list of the
first regular expression that matches; the match object
returned; and the text read up till and including the match.
If EOF is read and no text was read, raise EOFError.
Otherwise, when nothing matches, return (-1, None, text) where
text is the text received so far (may be the empty string if a
timeout happened).
If a regular expression ends with a greedy match (e.g. '.*')
or if more than one expression can match the same input, the
results are undeterministic, and may depend on the I/O timing.
"""
if self._has_poll:
return self._expect_with_poll(list, timeout)
else:
return self._expect_with_select(list, timeout)
def _expect_with_poll(self, expect_list, timeout=None):
"""Read until one from a list of a regular expressions matches.
This method uses select.poll() to implement the timeout.
"""
re = None
expect_list = expect_list[:]
indices = range(len(expect_list))
for i in indices:
if not hasattr(expect_list[i], "search"):
if not re: import re
expect_list[i] = re.compile(expect_list[i])
call_timeout = timeout
if timeout is not None:
from time import time
time_start = time()
self.process_rawq()
m = None
for i in indices:
m = expect_list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
break
if not m:
poller = select.poll()
poll_in_or_priority_flags = select.POLLIN | select.POLLPRI
poller.register(self, poll_in_or_priority_flags)
while not m and not self.eof:
try:
ready = poller.poll(None if timeout is None
else 1000 * call_timeout)
except select.error as e:
if e[0] == errno.EINTR:
if timeout is not None:
elapsed = time() - time_start
call_timeout = timeout-elapsed
continue
raise
for fd, mode in ready:
if mode & poll_in_or_priority_flags:
self.fill_rawq()
self.process_rawq()
for i in indices:
m = expect_list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
break
if timeout is not None:
elapsed = time() - time_start
if elapsed >= timeout:
break
call_timeout = timeout-elapsed
poller.unregister(self)
if m:
return (i, m, text)
text = self.read_very_lazy()
if not text and self.eof:
raise EOFError
return (-1, None, text)
def _expect_with_select(self, list, timeout=None):
"""Read until one from a list of a regular expressions matches.
The timeout is implemented using select.select().
"""
re = None
list = list[:]
indices = range(len(list))
for i in indices:
if not hasattr(list[i], "search"):
if not re: import re
list[i] = re.compile(list[i])
if timeout is not None:
from time import time
time_start = time()
while 1:
self.process_rawq()
for i in indices:
m = list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
return (i, m, text)
if self.eof:
break
if timeout is not None:
elapsed = time() - time_start
if elapsed >= timeout:
break
s_args = ([self.fileno()], [], [], timeout-elapsed)
r, w, x = select.select(*s_args)
if not r:
break
self.fill_rawq()
text = self.read_very_lazy()
if not text and self.eof:
raise EOFError
return (-1, None, text)
def test():
"""Test program for telnetlib.
Usage: python telnetlib.py [-d] ... [host [port]]
Default host is localhost; default port is 23.
"""
debuglevel = 0
while sys.argv[1:] and sys.argv[1] == '-d':
debuglevel = debuglevel+1
del sys.argv[1]
host = 'localhost'
if sys.argv[1:]:
host = sys.argv[1]
port = 0
if sys.argv[2:]:
portstr = sys.argv[2]
try:
port = int(portstr)
except ValueError:
port = socket.getservbyname(portstr, 'tcp')
tn = Telnet()
tn.set_debuglevel(debuglevel)
tn.open(host, port, timeout=0.5)
tn.interact()
tn.close()
if __name__ == '__main__':
test()
| 34.136364 | 87 | 0.548861 |
b556c3aa8bf2b8611e4f90ad73a3dafffd1e7790 | 3,811 | py | Python | zeroaccess.py | mischw/botnet_dissectors | a67dbdac740a4f9f97d0507b4fa7ed83f68e99c2 | [
"MIT"
] | 1 | 2021-09-16T11:20:45.000Z | 2021-09-16T11:20:45.000Z | zeroaccess.py | mischw/botnet_dissectors | a67dbdac740a4f9f97d0507b4fa7ed83f68e99c2 | [
"MIT"
] | null | null | null | zeroaccess.py | mischw/botnet_dissectors | a67dbdac740a4f9f97d0507b4fa7ed83f68e99c2 | [
"MIT"
] | null | null | null | import binascii
from dataclasses import dataclass
from base import BaseMessage
from exceptions import DissectError
@dataclass(frozen=True)
class ZeroAccessMessage(BaseMessage):
data: bytearray
plain: bool
def __str__(self) -> str:
fstr = "Checksum: %s, Command: %s, Flag: %s, Payload: %s"
return fstr % (hex(self.get_checksum()), self.get_command_string(), hex(self.get_flag()), '0x' + self.get_payload().hex())
def __repr__(self) -> str:
return ' '.join(["0x{:02x}".format(b) for b in self.data])
def get_command_string(self) -> str:
return {
'4c746567': "getL",
'4c746572': "retL"
}[self.get_command().hex()]
def get_length_raw(self) -> int:
return len(self.data)
@staticmethod
def parse(data_in: bytearray) -> 'ZeroAccessMessage':
"""
ZeroAccess Message Structure:
[offset] [length] [field] [encrypted] [note]
-----------|-----------|---------------|---------------|------------
0 4 byte checksum encrypted header (checksum starts here, for calculating, the checksum field is set to 0)
4 4 byte command encrypted header
8 4 byte flag encrypted header
12 ? byte data encrypted payload
"""
if len(data_in) < 16:
raise DissectError(
"Message too small to contain at least a healthy header")
payload_decrypted = ZeroAccessMessage.decrypt_xor(data_in, "ftp2")
# check crc32
crc32 = binascii.crc32(
b"\x00\x00\x00\x00" + payload_decrypted[4:]).to_bytes(4, byteorder='little')
if bytearray(payload_decrypted[0:4]) != bytearray(crc32):
raise DissectError("Checksum error")
return ZeroAccessMessage(payload_decrypted, True)
""" ------------------------------------------------------------------ """
""" additional implementations """
""" ------------------------------------------------------------------ """
def get_checksum(self) -> int:
return int.from_bytes(self.data[0:4], "little")
def get_command(self) -> bytearray:
return self.data[4:8]
def get_flag(self) -> int:
return int.from_bytes(self.data[8:12], "little")
def get_payload(self) -> bytearray:
return self.data[12:]
def get_peer_list(self) -> list[str]:
# TODO there is a lot more to parse here. For our use case this here is enough
if self.get_command_string() != "retL":
raise DissectError("Not a retL message")
peers = []
# 16 peers, each having 4 byte ip, 4 byte timestamp
payload = self.get_payload()
n_peers = int.from_bytes(payload[0:4], byteorder='little')
entry_len = 8
offset = 4
for i in range(0 + offset, entry_len * n_peers + offset, entry_len):
ip_bytes = payload[i:i + 4]
ip_str = '.'.join("%d" % b for b in ip_bytes)
peers.append(ip_str)
return peers
@staticmethod
def decrypt_xor(payload_bytes: bytearray, key: str) -> bytearray:
order = 'little'
# prepare key
# b'ftp2' / 0x66 0x74 0x70 0x32 / The default encryption key for zero access
key_bytes = key.encode()
key_int = int.from_bytes(key_bytes, "big") # 1718906930
dec = bytearray()
# iterate payload
for i in range(0, len(payload_bytes), 4):
payload_dword = payload_bytes[i:i+4]
payload_int = int.from_bytes(payload_dword, byteorder=order)
xor = payload_int ^ key_int
dec.extend(xor.to_bytes(4, byteorder=order))
#print("Cipher XOR Key = %s XOR %s = %s" % (hex(payload_int), hex(key_int), hex(xor)))
# rotate key # example for first iteration:
key_bin_str = bin(key_int) # '0b1100110011101000111000000110010'
prefix = key_bin_str[0:2] # '0b'
data = key_bin_str[2:] # '1100110011101000111000000110010'
# '01100110011101000111000000110010' (zero fill in front to 32 bits)
data = data.rjust(32, '0')
# '11001100111010001110000001100100' (left shift rotated)
rol = data[1:] + data[0]
key_int = int(prefix + rol, 2) # 3437813860
return dec
| 32.02521 | 124 | 0.649436 |
e738feca61b5d5561390a379afd29682409a645d | 8,694 | py | Python | cc_licenses/settings/base.py | brylie/cc-licenses | 9367a268ef5d136fab194b09685f69e72564ddef | [
"MIT"
] | null | null | null | cc_licenses/settings/base.py | brylie/cc-licenses | 9367a268ef5d136fab194b09685f69e72564ddef | [
"MIT"
] | null | null | null | cc_licenses/settings/base.py | brylie/cc-licenses | 9367a268ef5d136fab194b09685f69e72564ddef | [
"MIT"
] | null | null | null | """
Django settings for cc_licenses project.
"""
# Standard library
import os
# Third-party
from babel import Locale
from django.conf.locale import LANG_INFO
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# SETTINGS_DIR is where this settings file is
SETTINGS_DIR = os.path.dirname(os.path.abspath(__file__))
# PROJECT_DIR is the directory under root that contains the settings directory,
# urls.py, and other global stuff.
PROJECT_DIR = os.path.dirname(SETTINGS_DIR)
# ROOT_DIR is the top directory under source control
ROOT_DIR = os.path.dirname(PROJECT_DIR)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"licenses",
"i18n",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "cc_licenses.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_DIR, "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"dealer.contrib.django.context_processor",
],
},
},
]
WSGI_APPLICATION = "cc_licenses.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "cc_licenses",
}
}
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(ROOT_DIR, "public", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/media/"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}
},
"formatters": {
"basic": {
"format": "%(asctime)s %(name)-20s %(levelname)-8s %(message)s",
},
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "basic",
},
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
},
"root": {
"handlers": [
"console",
],
"level": "INFO",
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en" # "en" matches our default language code in Transifex
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Teach Django about a few more languages
mi = Locale.parse("mi")
LANG_INFO["mi"] = { # Maori
"bidi": False,
"code": "mi",
"name": mi.get_display_name("en"), # in english
"name_local": mi.get_display_name("mi"), # in their own language
}
LANG_INFO["ms"] = { # Malay
"bidi": False,
"code": "ms",
"name": "Malay",
"name_local": "Bahasa Melayu", # ??
}
LANG_INFO["zh-Hans"] = {
"fallback": ["zh-hans"],
}
LANG_INFO["zh-Hant"] = {
"fallback": ["zh-hant"],
}
LANG_INFO["oci"] = { # Occitan? https://iso639-3.sil.org/code/oci
"bidi": False,
"code": "oci",
"name": "Occitan",
"name_local": "Occitan",
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "America/New_York"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(ROOT_DIR, "public", "static")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Additional locations of static files
STATICFILES_DIRS = (os.path.join(PROJECT_DIR, "static"),)
# If using Celery, tell it to obey our logging configuration.
CELERYD_HIJACK_ROOT_LOGGER = False
# https://docs.djangoproject.com/en/1.9/topics/auth/passwords/#password-validation
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": (
"django.contrib.auth."
"password_validation.UserAttributeSimilarityValidator"
),
},
{
"NAME": (
"django.contrib.auth.password_validation.MinimumLengthValidator"
),
},
{
"NAME": (
"django.contrib.auth."
"password_validation.CommonPasswordValidator"
),
},
{
"NAME": (
"django.contrib.auth."
"password_validation.NumericPasswordValidator"
),
},
]
# Make things more secure by default. Run "python manage.py check --deploy"
# for even more suggestions that you might want to add to the settings,
# depending on how the site uses SSL.
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = "DENY"
# template_fragments
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
},
"branchstatuscache": {
# Use memory caching so template fragments get cached whether we have
# memcached running or not.
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
},
}
# This will use memcached if we have it, and otherwise just not cache.
if "CACHE_HOST" in os.environ:
CACHES["default"] = {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "%(CACHE_HOST)s" % os.environ,
}
# Percent translated that languages should be at or above
TRANSLATION_THRESHOLD = 80
# Location of the translation data's repo. Look in env for
# TRANSLATION_REPOSITORY_DIRECTORY.
# Default is next to this one.
TRANSLATION_REPOSITORY_DIRECTORY = os.getenv(
"TRANSLATION_REPOSITORY_DIRECTORY",
os.path.join(ROOT_DIR, "..", "cc-licenses-data"),
)
# django-distill settings
DISTILL_DIR = f"{TRANSLATION_REPOSITORY_DIRECTORY}/docs/"
# Django translations are in the translation repo directory, under "locale".
# License translations are in the translation repo directory, under
# "translations".
LOCALE_PATHS = (
os.path.join(TRANSLATION_REPOSITORY_DIRECTORY, "locale"),
os.path.join(TRANSLATION_REPOSITORY_DIRECTORY, "legalcode"),
)
TRANSIFEX = {
"ORGANIZATION_SLUG": "creativecommons",
"PROJECT_SLUG": "CC",
"API_TOKEN": os.getenv("TRANSIFEX_API_TOKEN", "missing"),
}
# The git branch where the official, approved, used in production translations
# are.
OFFICIAL_GIT_BRANCH = "main"
# Path to private keyfile to use when pushing up to data repo
TRANSLATION_REPOSITORY_DEPLOY_KEY = os.getenv(
"TRANSLATION_REPOSITORY_DEPLOY_KEY", ""
)
| 29.571429 | 82 | 0.660686 |
80f29f871035a990861dd44517ab25ac7dadf459 | 1,418 | py | Python | test/functional/feature_reindex.py | ScooterCoinWallet/wallet-pos | 8685f2c36110cc847b73208eb8919f5497daad78 | [
"MIT"
] | null | null | null | test/functional/feature_reindex.py | ScooterCoinWallet/wallet-pos | 8685f2c36110cc847b73208eb8919f5497daad78 | [
"MIT"
] | null | null | null | test/functional/feature_reindex.py | ScooterCoinWallet/wallet-pos | 8685f2c36110cc847b73208eb8919f5497daad78 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running scootercoind with -reindex and -reindex-chainstate options.
- Start a single node and generate 3 blocks.
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until
class ReindexTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def reindex(self, justchainstate=False):
self.nodes[0].generatetoaddress(3, self.nodes[0].get_deterministic_priv_key().address)
blockcount = self.nodes[0].getblockcount()
self.stop_nodes()
extra_args = [["-reindex-chainstate" if justchainstate else "-reindex"]]
self.start_nodes(extra_args)
wait_until(lambda: self.nodes[0].getblockcount() == blockcount)
self.log.info("Success")
def run_test(self):
self.reindex(False)
self.reindex(True)
self.reindex(False)
self.reindex(True)
if __name__ == '__main__':
ReindexTest().main()
| 37.315789 | 106 | 0.717913 |
b0ca523c06199dd802379fba816aaaf6205bb806 | 11,233 | py | Python | dev/buildtool/image_commands.py | kevinawoo/spinnaker | 22cfbc9b640cc9536b16333ff2f5b412347e7223 | [
"Apache-2.0"
] | null | null | null | dev/buildtool/image_commands.py | kevinawoo/spinnaker | 22cfbc9b640cc9536b16333ff2f5b412347e7223 | [
"Apache-2.0"
] | null | null | null | dev/buildtool/image_commands.py | kevinawoo/spinnaker | 22cfbc9b640cc9536b16333ff2f5b412347e7223 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements imaging commands for buildtool.
This uses existing external scripts so the configuration and artifact
handling/generation is not consistent with the rest of the tool.
"""
import logging
import os
import re
from buildtool import (
SPINNAKER_RUNNABLE_REPOSITORY_NAMES,
BomSourceCodeManager,
RepositoryCommandFactory,
RepositoryCommandProcessor,
GitRepositorySpec,
GitRunner,
HalRunner,
add_parser_argument,
check_subprocess,
check_subprocesses_to_logfile,
check_options_set,
raise_and_log_error,
ConfigError,
UnexpectedError)
# TODO(ewiseblatt): 20180203
# Really these should come from "bom_dependencies_path" file
# so that it is extendable
EXTRA_REPO_NAMES = ['consul', 'redis', 'vault']
class BuildGceComponentImages(RepositoryCommandProcessor):
"""Builds GCE VM images for each of the runtime components.
Although we are calling this a RepositoryCommandProcessor, it
isnt really processing repositories. Some of the images we build
are not associated with github or repos at all (e.g. redis). However
the RepositoryCommandProcessor is a useful abstraction for its ability
to parallelize across the different subsystems so we're going to overload
it and pretend we have repositories even though we'll never do anything
with their urls.
"""
def _do_determine_source_repositories(self):
"""Implements RepositoryCommandProcessor interface."""
# These arent actually used, just the names.
repositories = [self.source_code_manager.make_repository_spec(name)
for name in SPINNAKER_RUNNABLE_REPOSITORY_NAMES]
repositories.extend([
GitRepositorySpec(name) for name in EXTRA_REPO_NAMES])
return repositories
def __init__(self, factory, options, **kwargs):
check_options_set(
options,
['build_gce_service_account',
'build_gce_project',
'publish_gce_image_project'])
options.github_disable_upstream_push = True
super(BuildGceComponentImages, self).__init__(factory, options, **kwargs)
def __determine_repo_install_args(self, repository):
"""Determine --spinnaker_dev-github_[owner|user] args for install script."""
options = self.options
branch = options.git_branch
owner = ('spinnaker'
if options.github_owner in ('default', 'upstream')
else options.github_owner)
git_dir = os.path.dirname(__file__)
if not branch:
branch = GitRunner(options).query_local_repository_branch(git_dir)
if not owner:
url = repository.origin
match = re.search('github.com/([^/]+)/', url)
if not match:
raise_and_log_error(
UnexpectedError('Cannot determine owner from url=%s' % url,
cause='BadUrl'))
owner = match.group(1)
return [
'--spinnaker_dev_github_owner', owner,
'--spinnaker_dev_github_branch', branch
]
def have_image(self, repository):
"""Determine if we already have an image for the repository or not."""
bom = self.source_code_manager.bom
dependencies = bom['dependencies']
services = bom['services']
service_name = self.scm.repository_name_to_service_name(repository.name)
if service_name in dependencies:
build_version = dependencies[service_name]['version']
else:
build_version = services[service_name]['version']
options = self.options
image_name = 'spinnaker-{repo}-{version}'.format(
repo=repository.name,
version=build_version.replace('.', '-').replace(':', '-'))
lookup_command = ['gcloud', '--account', options.build_gce_service_account,
'compute', 'images', 'list', '--filter', image_name,
'--project', options.build_gce_project,
'--quiet', '--format=json']
logging.debug('Checking for existing image for "%s"', repository.name)
got = check_subprocess(' '.join(lookup_command))
if got.strip() == '[]':
return False
labels = {'repository': repository.name, 'artifact': 'gce-image'}
if self.options.skip_existing:
logging.info('Already have %s -- skipping build', image_name)
self.metrics.inc_counter('ReuseArtifact', labels)
return True
if not self.options.delete_existing:
raise_and_log_error(
ConfigError('Already have image "{name}"'.format(name=image_name)))
delete_command = ['gcloud', '--account', options.gcb_service_account,
'compute', 'images', 'delete', image_name,
'--project', options.build_gce_project,
'--quiet']
logging.debug('Deleting existing image %s', image_name)
self.metrics.count_call(
'DeleteArtifact', labels,
'Attempts to delete existing GCE images.',
check_subprocess, ' '.join(delete_command))
return False
def ensure_local_repository(self, repository):
"""Local repositories are used to get version information."""
if repository.name in EXTRA_REPO_NAMES:
return None
return super(BuildGceComponentImages, self).ensure_local_repository(
repository)
def _do_can_skip_repository(self, repository):
if not repository.name in SPINNAKER_RUNNABLE_REPOSITORY_NAMES:
logging.debug('%s does not build a GCE component image -- skip',
repository.name)
return True
return self.have_image(repository)
def _do_repository(self, repository):
"""Implements RepositoryCommandProcessor interface."""
name = repository.name
build_component_image_sh = os.path.join(
os.path.dirname(__file__), '..', 'build_google_component_image.sh')
options = self.options
bom_version = self.source_code_manager.determine_bom_version()
command_line = [
build_component_image_sh,
'--artifact ', name,
'--account', options.build_gce_service_account,
'--hal_daemon_endpoint', 'http://' + options.halyard_daemon,
'--build_project', options.build_gce_project,
'--install_script', options.install_image_script,
'--publish_project', options.publish_gce_image_project,
'--publish_script', options.publish_gce_image_script,
'--version', bom_version,
'--zone', options.build_gce_zone]
command_line.extend(self.__determine_repo_install_args(repository))
extra_install_args = []
if options.halyard_bom_bucket:
extra_install_args.extend(
['--halyard_config_bucket', options.halyard_bom_bucket])
if options.bintray_debian_repository:
bintray_url = 'https://dl.bintray.com/{org}/{repo}'.format(
org=options.bintray_org,
repo=options.bintray_debian_repository)
extra_install_args.extend([
'--release_track', options.halyard_release_track,
'--halyard_repository', bintray_url,
'--spinnaker_repository', bintray_url])
if extra_install_args:
command_line.extend(['--extra_install_script_args',
'"{0}"'.format(' '.join(extra_install_args))])
command = ' '.join(command_line)
logfile = self.get_logfile_path(name + '-gce-image')
what = '{name} component image'.format(name=name)
check_subprocesses_to_logfile(what, logfile, [command])
return what
class BuildGceComponentImagesFactory(RepositoryCommandFactory):
"""Builds GCE VM images for each of the runtime components."""
def __init__(self):
super(BuildGceComponentImagesFactory, self).__init__(
'build_gce_component_images', BuildGceComponentImages,
'Build Google Compute Engine VM Images For Each Service.',
BomSourceCodeManager)
@staticmethod
def add_bom_parser_args(parser, defaults):
"""Adds arguments shared with creating boms."""
if hasattr(parser, 'added_gce_image_project'):
return
parser.added_gce_image_project = True
add_parser_argument(
parser, 'publish_gce_image_project', defaults, None,
help='Project to publish images to.')
def init_argparser(self, parser, defaults):
super(BuildGceComponentImagesFactory, self).init_argparser(
parser, defaults)
HalRunner.add_parser_args(parser, defaults)
self.add_bom_parser_args(parser, defaults)
self.add_argument(
parser, 'halyard_release_track', defaults, 'stable',
choices=['nightly', 'stable'],
help='Which halyard release track to use when installing images.')
self.add_argument(
parser, 'skip_existing', defaults, False, type=bool,
help='Skip builds if the desired image already exists in GCE.')
self.add_argument(
parser, 'delete_existing', defaults, None, type=bool,
help='Delete pre-existing desired images from GCE.')
self.add_argument(
parser, 'build_gce_service_account', defaults, None,
help='Service account for building images.')
self.add_argument(
parser, 'build_gce_project', defaults, None,
help='Project to build image in.')
self.add_argument(
parser, 'build_gce_zone', defaults, 'us-central1-f',
help='Zone to build image in.')
halyard_install_sh = 'dev/halyard_install_component.sh'
self.add_argument(
parser, 'install_image_script', defaults, halyard_install_sh,
help='Script for installing images.')
publish_image_sh = os.path.join(
os.path.dirname(__file__), '..', '..', 'google', 'dev',
'publish_gce_release.sh')
self.add_argument(
parser, 'publish_gce_image_script', defaults, publish_image_sh,
help='Script for publishing images to a project.')
self.add_argument(
parser, 'git_branch', defaults, None,
help='Github branch to get install scripts from.'
' If none, then use the source repo branch that this script'
' is running from.')
self.add_argument(
parser, 'bintray_org', defaults, None,
help='The bintray organization for the bintray_*_repositories.')
self.add_argument(
parser, 'bintray_debian_repository', defaults, None,
help='Repository where built debians were placed.')
self.add_argument(
parser, 'halyard_bom_bucket', defaults, 'halconfig',
help='The bucket manaing halyard BOMs and config profiles.')
def add_bom_parser_args(parser, defaults):
"""Adds parser arguments pertaining to publishing boms."""
BuildGceComponentImagesFactory.add_bom_parser_args(parser, defaults)
def register_commands(registry, subparsers, defaults):
BuildGceComponentImagesFactory().register(registry, subparsers, defaults)
| 38.601375 | 80 | 0.694828 |
87f8e6aebe2affc23242c16b59b99e717779dad0 | 1,221 | py | Python | NLUtils.py | IlhamH13/VideoCaptioning-Inceptionv4 | 150606affc6f8a738ca148492446393971a15dc6 | [
"MIT"
] | null | null | null | NLUtils.py | IlhamH13/VideoCaptioning-Inceptionv4 | 150606affc6f8a738ca148492446393971a15dc6 | [
"MIT"
] | null | null | null | NLUtils.py | IlhamH13/VideoCaptioning-Inceptionv4 | 150606affc6f8a738ca148492446393971a15dc6 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
def decode_sequence(ix_to_word, seq):
seq = seq.cpu()
N, D = seq.size()
out = []
for i in range(N):
txt = ''
for j in range(D):
ix = seq[i, j].item()
if ix > 0:
if j >= 1:
txt = txt + ' '
txt = txt + ix_to_word[str(ix)]
else:
break
out.append(txt)
return out
class LanguageModelCriterion(nn.Module):
def __init__(self):
super(LanguageModelCriterion, self).__init__()
self.loss_fn = nn.CrossEntropyLoss(reduce=False)
def forward(self, inputs, target, mask):
"""
inputs: shape of (N, seq_len, vocab_size)
target: shape of (N, seq_len)
mask: shape of (N, seq_len)
"""
batch_size = inputs.shape[0]
target = target[:, :inputs.shape[1]]
mask = mask[:, :inputs.shape[1]]
inputs = inputs.contiguous().view(-1, inputs.shape[2])
target = target.contiguous().view(-1)
mask = mask.contiguous().view(-1)
loss = self.loss_fn(inputs, target)
output = torch.sum(loss * mask) / batch_size
return output | 29.071429 | 62 | 0.524161 |
e3e96b4806a689caf0038a5250b3532e11e86a19 | 893 | py | Python | train/3c-check_caption-en_data.py | kota7/kgschart | a08a1ac37d351a9c999c0221ba3a35c28492f148 | [
"MIT"
] | 1 | 2020-10-26T16:23:58.000Z | 2020-10-26T16:23:58.000Z | train/3c-check_caption-en_data.py | kota7/kgschart | a08a1ac37d351a9c999c0221ba3a35c28492f148 | [
"MIT"
] | 3 | 2017-05-11T17:07:43.000Z | 2017-05-12T18:41:17.000Z | train/3c-check_caption-en_data.py | kota7/kgschart | a08a1ac37d351a9c999c0221ba3a35c28492f148 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
import numpy as np
import os
proj_root = os.path.abspath(os.path.join(os.path.dirname( \
os.path.realpath(__file__)), '../'))
def check(xfile, yfile):
X = np.load(xfile)
Y = np.load(yfile)
print('size of X =', len(X), '; size of Y =', len(Y))
if len(X) != len(Y):
print('length mismatch!')
return
index = np.random.permutation(len(X))
i = 0
while i < len(X):
for j in range(9):
if i >= len(X): break
a = plt.subplot(3, 3, j+1)
a.imshow(X[index[i]], cmap='gray')
a.text(0, 0, Y[index[i]], bbox={'facecolor':"wheat"})
i += 1
plt.show()
check(os.path.join(proj_root, 'data/caption/X-en.npy'), \
os.path.join(proj_root, 'data/caption/Y-en.npy'))
| 24.805556 | 65 | 0.525196 |
1395466ab47033d6856c1a39e3d2d2ed45fdc80a | 595 | py | Python | addons/test_website/tests/test_multi_company.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/test_website/tests/test_multi_company.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/test_website/tests/test_multi_company.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import HttpCase, tagged
@tagged('post_install', '-at_install')
class TestMultiCompany(HttpCase):
def test_company_in_context(self):
""" Test website company is set in context """
website = self.env.ref('website.default_website')
company = self.env['res.company'].create({'name': "Adaa"})
website.company_id = company
response = self.url_open('/multi_company_website')
self.assertEqual(response.json()[0], company.id)
| 35 | 74 | 0.682353 |
0c07d2d61a8b68b54514334601b81fd32b29bcc0 | 1,040 | py | Python | Monte_Carlo/pi_Monte_Carlo.py | Farhad-UPC/Reinforcement_Learning | 6a48f6347adcf8b07267eab33290a32164c5d165 | [
"MIT"
] | null | null | null | Monte_Carlo/pi_Monte_Carlo.py | Farhad-UPC/Reinforcement_Learning | 6a48f6347adcf8b07267eab33290a32164c5d165 | [
"MIT"
] | null | null | null | Monte_Carlo/pi_Monte_Carlo.py | Farhad-UPC/Reinforcement_Learning | 6a48f6347adcf8b07267eab33290a32164c5d165 | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
from bokeh.plotting import figure, show
from bokeh.plotting import *
import math
import random
# generate some random points
# equation --> x**2 + y**2 <= size
points_in = 0 # number of points inside the circle
points_out = 0 # number of points outside the circle
square_size = 1
sample = 1000
arc = np.linspace (0, np.pi/2, 100)
def generate (size):
x = random.random() * size
y = random.random() * size
return (x,y)
def in_circle (point, size):
return math.sqrt(point[0]**2 + point[1]**2)<= size
def pi(points_in, points_out): # calculate pi value
return 4*(points_in / points_out)
t = figure (title ="Approximate value of pi", plot_width = 600, plot_height = 700)
for i in range (sample):
point = generate (square_size)
t.circle(point[0], point[1], color ="orange")
points_out +=1
if in_circle(point, square_size):
points_in +=1
print ("Approximate value of pi {}".format (pi(points_in, points_out)))
t.line(1*np.cos(arc), 1*np.sin(arc) , color = "black")
show (t) | 26 | 82 | 0.700962 |
65f88213d169294c26f84dc1ab7ef5e689f1eaea | 9,289 | py | Python | mdso/spectral_embedding_.py | artk2019/AISTAT_2019_107 | 31475dc9b54404a01e519111250377ed9bd85820 | [
"BSD-3-Clause"
] | null | null | null | mdso/spectral_embedding_.py | artk2019/AISTAT_2019_107 | 31475dc9b54404a01e519111250377ed9bd85820 | [
"BSD-3-Clause"
] | null | null | null | mdso/spectral_embedding_.py | artk2019/AISTAT_2019_107 | 31475dc9b54404a01e519111250377ed9bd85820 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Given a similarity matrix on sparse or numpy format, it creates a
Laplacian Embedding, for various type of graph Laplacian as well as
normalization.
So far the similarity is assumed to represent a fully connected graph.
'''
# try:
# np
# except:
import numpy as np
# graphical utilities
import matplotlib.pyplot as plt
import time
# import sys
from scipy import sparse as sp
# from sinkhorn_knopp import sinkhorn_knopp as skp
"""
if 'sinkhorn' not in sys.modules:
'''
TODO: may not be true for every-one
'''
sys.path.append("/usr/local/lib/python3.5/dist-packages")
"""
def check_matrix(matrix):
'''
check that the matrix is square and symmetric and return its
type.
'''
# check for squareness
(n, m) = matrix.shape
if n != m:
raise ValueError('the matrix is not square')
# check for symmetry
if sp.issparse(matrix):
if not (sp.find(matrix)[2] >= 0).all():
raise ValueError('the matrix is not nonnegative')
if not (abs(matrix-matrix.T) > 1e-10).nnz == 0:
raise ValueError('specified similarity matrix is not\
symmetric')
return('sparse')
else:
if not np.all(matrix >= 0):
raise ValueError('matrix is not nonnegative')
if not np.allclose(matrix, matrix.T, atol=1e-6):
raise ValueError('specified similarity matrix is not\
symmetric.')
return('dense')
def normalize(matrix, type_normalization):
'''
normalize a similarity matrix with coifman
or sinkhorn normalization
'''
type_matrix = check_matrix(matrix)
n = matrix.shape[0]
if type_normalization is None:
if type_matrix == 'sparse':
return(sp.csr_matrix(matrix, copy=True, dtype='float64'))
else:
return(matrix)
if type_normalization not in ['coifman', 'sinkhorn']:
raise ValueError('the normalisation is not admissible')
if type_normalization == 'coifman' and type_matrix == 'dense':
W = matrix
D = np.diag(1./matrix.dot(np.ones(n)))
W = D.dot(matrix.dot(D))
return(W)
if type_normalization == 'coifman' and type_matrix == 'sparse':
W = sp.csr_matrix(matrix, copy=True, dtype='float64')
d = W.sum(axis=1).getA1()
# d = np.array(sim_mat.sum(axis=0, dtype=sim_mat.dtype))[0]
Dinv = sp.diags(1./d)
W = Dinv.dot(W.dot(Dinv))
return(W)
if type_normalization == 'sinkhorn' and type_matrix == 'dense':
sk = skp.SinkhornKnopp()
normalized_matrix = sk.fit(matrix)
return(normalized_matrix)
else:
raise ValueError('There is a problem in the choice of \
type_normalization')
def make_laplacian_emb_dense(matrix,
dim,
type_laplacian='unnormalized',
type_normalization=None,
scaled=False, verbose=0):
'''
Create a vector of dimension dim for every node whose pair-wise
distances are stored in similarity_matrix.
compute first k eigenvectors of matrix of the similarity_matrix
L = diag(W 1) - W
INPUT:
matrix should be a numpy matrix here.
'''
admissible_type_laplacian = ['symmetric',
'random_walk',
'unnormalized']
if type_laplacian not in admissible_type_laplacian:
raise ValueError('the parameter type_laplacian is\
not well specified!')
n = matrix.shape[0]
n_vec = dim+1
# normalize similarity_matrix
W = normalize(matrix, type_normalization)
# compute a laplacian
if type_laplacian == 'random_walk':
Laplacian = np.eye(n) - W.dot(np.diag(1/W.dot(np.ones(n))))
elif type_laplacian == 'symmetric':
Laplacian = np.eye(n) - np.diag(1/W.dot(np.ones(n))**(0.5)).\
dot(W.dot(np.diag(1/W.dot(np.ones(n))**(0.5))))
elif type_laplacian == 'unnormalized':
Laplacian = np.diag(W.dot(np.ones(n))) - W
# compute embedding
[d, Vec] = np.linalg.eig(Laplacian)
idx = d.argsort()
d = d[idx]
Vec = Vec[:, idx]
if scaled:
if scaled == 'CTD':
dsqrt = np.sqrt(d[1:n_vec])
else:
dsqrt = np.arange(1, n_vec)
V = Vec[:, 1:n_vec].dot(np.diag(1./dsqrt))
else:
V = Vec[:, 1:n_vec]
'''
TODO: check if it not supposed to be Vec[:,2:k].dot(np.diag(d[2:k]))
'''
return(V)
def make_laplacian_emb_sparse(matrix,
dim,
type_laplacian='unnormalized',
type_normalization=None,
scaled=False, verbose=0):
'''
Create a vector of dimension dim for every node whose pair-wise
distances are stored in similarity_matrix.
compute first k eigenvectors of matrix of the similarity_matrix
L = diag(W 1) - W
'''
admissible_type_laplacian = ['symmetric', 'random_walk', 'unnormalized']
if type_laplacian not in admissible_type_laplacian:
raise ValueError('the parameter type_laplacian is not well specified!'
'(choose between symmetric, random_walk,'
' or unnormalized)')
n = matrix.shape[0]
n_vec = dim + 1
W = normalize(matrix, type_normalization)
W.dtype = 'float64'
# compute a laplacian
lap = sp.csgraph.laplacian(W, normed=False, return_diag=False)
if type_laplacian == 'random_walk':
d = W.sum(axis=1).getA1()
Dinv = sp.diags(1./d)
lap = Dinv.dot(lap)
elif type_laplacian == 'symmetric':
lap = sp.csgraph.laplacian(W, normed=True, return_diag=False)
# lap = sp.csr_matrix(lap, copy=True, dtype='float64')
lap.dtype = 'float64'
# Compute embedding
t0 = time.time()
# Largest eigenvalue first
(evals_max, _) = sp.linalg.eigsh(lap, 1, which='LA', tol=1e-15, ncv=20)
t1 = time.time()
if verbose > 1:
print('Computed largest eigenvalue of A in {}s.\n'.format(t1-t0))
maxval = float(evals_max)
# Then largest eigenvalue of minus laplacian
# (it is faster to compute largest eigenvalues)
m_lap = lap
m_lap *= -1
m_lap += sp.diags(np.tile(maxval, (n)))
# m_lap = maxval * sp.identity(n) - lap
# evec0 = np.ones(n) # , v0=evec0)
evals_small, evecs_small = sp.linalg.eigsh(m_lap, n_vec, which='LA', tol=1e-15)
# eval_s, evec_s = eigsh(lap, n_vec, which='SM', v0=evec0)
t2 = time.time()
if verbose > 0:
print('Computed Laplacian embedding of dim. {} in {}s.\n'.format(
dim, t2-t1))
evals_small = maxval - evals_small
idx = np.array(evals_small).argsort()
d = evals_small[idx]
Vec = evecs_small[:, idx]
V = Vec[:, 1:n_vec]
if scaled:
if scaled == 'CTD':
dsqrt = np.sqrt(d[1:n_vec])
else:
dsqrt = np.arange(1, n_vec)
V = V.dot(np.diag(1./dsqrt))
return(V)
def make_laplacian_emb(matrix,
dim,
type_laplacian='unnormalized',
type_normalization=None,
scaled=False,
verb=0):
type_matrix = check_matrix(matrix)
if type_matrix == 'dense':
V = make_laplacian_emb_dense(matrix,
dim,
type_laplacian,
type_normalization,
scaled, verbose=verb)
else:
V = make_laplacian_emb_sparse(matrix,
dim,
type_laplacian,
type_normalization,
scaled, verbose=verb)
return(V)
def vizualize_embedding(embedding, title=None, perm=None):
embedding = np.real(embedding)
n = embedding.shape[0]
plt.figure(int(time.clock()*100))
if perm is not None:
plt.scatter(embedding[:, 0], embedding[:, 1], c=perm)
else:
plt.scatter(embedding[:, 0], embedding[:, 1], c=np.arange(n))
if title is not None:
plt.title(title)
if __name__ == '__main__':
# Embedding_graph()
D = np.abs(np.random.normal(0, 1, (5, 5)))
D = np.transpose(D) + D
S = sp.csr_matrix(D)
embeddi = make_laplacian_emb(D,
3,
type_laplacian='unnormalized',
type_normalization='coifman',
scaled=False)
print(embeddi.shape)
vizualize_embedding(embeddi)
embeddi = make_laplacian_emb(S,
3,
type_laplacian='unnormalized',
type_normalization='coifman',
scaled=False)
print(embeddi.shape)
vizualize_embedding(embeddi)
| 34.276753 | 83 | 0.549898 |
88b86f6a2d7bf17710b6199b01667aa119f0dc5e | 681 | py | Python | app/core/migrations/0002_tag.py | tatjapan/python-recipe-backend-api | d1e07086d3f2f5ba01b47fa69928272a106e55c2 | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | tatjapan/python-recipe-backend-api | d1e07086d3f2f5ba01b47fa69928272a106e55c2 | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | tatjapan/python-recipe-backend-api | d1e07086d3f2f5ba01b47fa69928272a106e55c2 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-01-21 01:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=90)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.375 | 118 | 0.615272 |
e14d57a6cbf68c433deda3d5c63a340507c4dc9e | 1,498 | py | Python | statistical_learning_method/debug.py | StudyExchange/CourseCode | 82d25da36a7c40b3c36151575793e2901d23927b | [
"MIT"
] | 1 | 2019-06-28T07:22:51.000Z | 2019-06-28T07:22:51.000Z | statistical_learning_method/debug.py | StudyExchange/CourseCode | 82d25da36a7c40b3c36151575793e2901d23927b | [
"MIT"
] | null | null | null | statistical_learning_method/debug.py | StudyExchange/CourseCode | 82d25da36a7c40b3c36151575793e2901d23927b | [
"MIT"
] | null | null | null | import math
class MarkovForward(object):
def probability(self, a, b, pi, output):
an = len(a[0]) # a的状态的数量 = len(pi)
bn = len(b[0]) # b的状态的数量
m = len(output) # 观测序列的长度
alphas = []
alpha1 = [0] * an
# alpha1
for i in range(an):
alpha1[i] = pi[i] * b[i][output[0]]
print(alpha1)
alphas.append(alpha1)
# alpha2到alpha_T
for i in range(1, m):
alpha = [0] * an
for j in range(an):
alpha[j] = 0
for k in range(an):
alpha[j] += alphas[-1][k] * a[k][j]
print('%.3f x %.3f' % (alpha[j], b[j][output[i]]), end=' = ')
alpha[j] *= b[j][output[i]]
print('%.4f' % alpha[j])
alphas.append(alpha)
result = sum(alphas[-1])
return result
# 测试数据1:测试数据:p177,例10.2(主要的测试数据)
a = [
[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]
]
b = [
[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]
]
pi = [0.2, 0.4, 0.4]
output = [0, 1, 0]
mf = MarkovForward()
print(mf.probability(a, b, pi, output))
# 测试数据2:测试数据:p173,例10.1(盒子和球模型)(用来与前向和后向算法做对照可以相互印证,结果是否正确)
a = [
[0, 1, 0, 0],
[0.4, 0, 0.6, 0],
[0, 0.4, 0, 0.6],
[0, 0, 0.5, 0.5]
]
b = [
[0.5, 0.5],
[0.3, 0.7],
[0.6, 0.4],
[0.8, 0.2]
]
pi = (0.25, 0.25, 0.25, 0.25)
output = [0, 0, 1, 1, 0]
mf = MarkovForward()
print(mf.probability(a, b, pi, output))
| 21.710145 | 77 | 0.435247 |
2f65a2bc03f881865e40083fcfb80131e545bec3 | 9,417 | py | Python | beginner_source/nlp/pytorch_tutorial.py | b21527616/tutorials | 07918d2401a0d8ca7af5ad38b876f5cb6b9aa646 | [
"BSD-3-Clause"
] | 2 | 2018-03-22T06:13:44.000Z | 2018-07-14T07:34:56.000Z | beginner_source/nlp/pytorch_tutorial.py | b21527616/tutorials | 07918d2401a0d8ca7af5ad38b876f5cb6b9aa646 | [
"BSD-3-Clause"
] | null | null | null | beginner_source/nlp/pytorch_tutorial.py | b21527616/tutorials | 07918d2401a0d8ca7af5ad38b876f5cb6b9aa646 | [
"BSD-3-Clause"
] | 5 | 2017-04-05T22:08:27.000Z | 2021-07-31T12:29:35.000Z | # -*- coding: utf-8 -*-
r"""
Introduction to PyTorch
***********************
Introduction to Torch's tensor library
======================================
All of deep learning is computations on tensors, which are
generalizations of a matrix that can be indexed in more than 2
dimensions. We will see exactly what this means in-depth later. First,
lets look what we can do with tensors.
"""
# Author: Robert Guthrie
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
######################################################################
# Creating Tensors
# ~~~~~~~~~~~~~~~~
#
# Tensors can be created from Python lists with the torch.Tensor()
# function.
#
# Create a torch.Tensor object with the given data. It is a 1D vector
V_data = [1., 2., 3.]
V = torch.Tensor(V_data)
print(V)
# Creates a matrix
M_data = [[1., 2., 3.], [4., 5., 6]]
M = torch.Tensor(M_data)
print(M)
# Create a 3D tensor of size 2x2x2.
T_data = [[[1., 2.], [3., 4.]],
[[5., 6.], [7., 8.]]]
T = torch.Tensor(T_data)
print(T)
######################################################################
# What is a 3D tensor anyway? Think about it like this. If you have a
# vector, indexing into the vector gives you a scalar. If you have a
# matrix, indexing into the matrix gives you a vector. If you have a 3D
# tensor, then indexing into the tensor gives you a matrix!
#
# A note on terminology:
# when I say "tensor" in this tutorial, it refers
# to any torch.Tensor object. Matrices and vectors are special cases of
# torch.Tensors, where their dimension is 1 and 2 respectively. When I am
# talking about 3D tensors, I will explicitly use the term "3D tensor".
#
# Index into V and get a scalar
print(V[0])
# Index into M and get a vector
print(M[0])
# Index into T and get a matrix
print(T[0])
######################################################################
# You can also create tensors of other datatypes. The default, as you can
# see, is Float. To create a tensor of integer types, try
# torch.LongTensor(). Check the documentation for more data types, but
# Float and Long will be the most common.
#
######################################################################
# You can create a tensor with random data and the supplied dimensionality
# with torch.randn()
#
x = torch.randn((3, 4, 5))
print(x)
######################################################################
# Operations with Tensors
# ~~~~~~~~~~~~~~~~~~~~~~~
#
# You can operate on tensors in the ways you would expect.
x = torch.Tensor([1., 2., 3.])
y = torch.Tensor([4., 5., 6.])
z = x + y
print(z)
######################################################################
# See `the documentation <http://pytorch.org/docs/torch.html>`__ for a
# complete list of the massive number of operations available to you. They
# expand beyond just mathematical operations.
#
# One helpful operation that we will make use of later is concatenation.
#
# By default, it concatenates along the first axis (concatenates rows)
x_1 = torch.randn(2, 5)
y_1 = torch.randn(3, 5)
z_1 = torch.cat([x_1, y_1])
print(z_1)
# Concatenate columns:
x_2 = torch.randn(2, 3)
y_2 = torch.randn(2, 5)
# second arg specifies which axis to concat along
z_2 = torch.cat([x_2, y_2], 1)
print(z_2)
# If your tensors are not compatible, torch will complain. Uncomment to see the error
# torch.cat([x_1, x_2])
######################################################################
# Reshaping Tensors
# ~~~~~~~~~~~~~~~~~
#
# Use the .view() method to reshape a tensor. This method receives heavy
# use, because many neural network components expect their inputs to have
# a certain shape. Often you will need to reshape before passing your data
# to the component.
#
x = torch.randn(2, 3, 4)
print(x)
print(x.view(2, 12)) # Reshape to 2 rows, 12 columns
# Same as above. If one of the dimensions is -1, its size can be inferred
print(x.view(2, -1))
######################################################################
# Computation Graphs and Automatic Differentiation
# ================================================
#
# The concept of a computation graph is essential to efficient deep
# learning programming, because it allows you to not have to write the
# back propagation gradients yourself. A computation graph is simply a
# specification of how your data is combined to give you the output. Since
# the graph totally specifies what parameters were involved with which
# operations, it contains enough information to compute derivatives. This
# probably sounds vague, so lets see what is going on using the
# fundamental class of Pytorch: autograd.Variable.
#
# First, think from a programmers perspective. What is stored in the
# torch.Tensor objects we were creating above? Obviously the data and the
# shape, and maybe a few other things. But when we added two tensors
# together, we got an output tensor. All this output tensor knows is its
# data and shape. It has no idea that it was the sum of two other tensors
# (it could have been read in from a file, it could be the result of some
# other operation, etc.)
#
# The Variable class keeps track of how it was created. Lets see it in
# action.
#
# Variables wrap tensor objects
x = autograd.Variable(torch.Tensor([1., 2., 3]), requires_grad=True)
# You can access the data with the .data attribute
print(x.data)
# You can also do all the same operations you did with tensors with Variables.
y = autograd.Variable(torch.Tensor([4., 5., 6]), requires_grad=True)
z = x + y
print(z.data)
# BUT z knows something extra.
print(z.grad_fn)
######################################################################
# So Variables know what created them. z knows that it wasn't read in from
# a file, it wasn't the result of a multiplication or exponential or
# whatever. And if you keep following z.grad_fn, you will find yourself at
# x and y.
#
# But how does that help us compute a gradient?
#
# Lets sum up all the entries in z
s = z.sum()
print(s)
print(s.grad_fn)
######################################################################
# So now, what is the derivative of this sum with respect to the first
# component of x? In math, we want
#
# .. math::
#
# \frac{\partial s}{\partial x_0}
#
#
#
# Well, s knows that it was created as a sum of the tensor z. z knows
# that it was the sum x + y. So
#
# .. math:: s = \overbrace{x_0 + y_0}^\text{$z_0$} + \overbrace{x_1 + y_1}^\text{$z_1$} + \overbrace{x_2 + y_2}^\text{$z_2$}
#
# And so s contains enough information to determine that the derivative
# we want is 1!
#
# Of course this glosses over the challenge of how to actually compute
# that derivative. The point here is that s is carrying along enough
# information that it is possible to compute it. In reality, the
# developers of Pytorch program the sum() and + operations to know how to
# compute their gradients, and run the back propagation algorithm. An
# in-depth discussion of that algorithm is beyond the scope of this
# tutorial.
#
######################################################################
# Lets have Pytorch compute the gradient, and see that we were right:
# (note if you run this block multiple times, the gradient will increment.
# That is because Pytorch *accumulates* the gradient into the .grad
# property, since for many models this is very convenient.)
#
# calling .backward() on any variable will run backprop, starting from it.
s.backward()
print(x.grad)
######################################################################
# Understanding what is going on in the block below is crucial for being a
# successful programmer in deep learning.
#
x = torch.randn((2, 2))
y = torch.randn((2, 2))
z = x + y # These are Tensor types, and backprop would not be possible
var_x = autograd.Variable(x)
var_y = autograd.Variable(y)
# var_z contains enough information to compute gradients, as we saw above
var_z = var_x + var_y
print(var_z.grad_fn)
var_z_data = var_z.data # Get the wrapped Tensor object out of var_z...
# Re-wrap the tensor in a new variable
new_var_z = autograd.Variable(var_z_data)
# ... does new_var_z have information to backprop to x and y?
# NO!
print(new_var_z.grad_fn)
# And how could it? We yanked the tensor out of var_z (that is
# what var_z.data is). This tensor doesn't know anything about
# how it was computed. We pass it into new_var_z, and this is all the
# information new_var_z gets. If var_z_data doesn't know how it was
# computed, theres no way new_var_z will.
# In essence, we have broken the variable away from its past history
######################################################################
# Here is the basic, extremely important rule for computing with
# autograd.Variables (note this is more general than Pytorch. There is an
# equivalent object in every major deep learning toolkit):
#
# **If you want the error from your loss function to backpropagate to a
# component of your network, you MUST NOT break the Variable chain from
# that component to your loss Variable. If you do, the loss will have no
# idea your component exists, and its parameters can't be updated.**
#
# I say this in bold, because this error can creep up on you in very
# subtle ways (I will show some such ways below), and it will not cause
# your code to crash or complain, so you must be careful.
#
| 33.393617 | 125 | 0.644791 |
4c4cccd78a14d9742d6ee8a6ba3ed2d281aab12e | 672 | py | Python | machine_vision/draw_ellipse.py | cyrus07424/M5stickV-playground | 9c1447078bebb279684bf9fc4b485c1aae1c8b12 | [
"MIT"
] | 3 | 2020-03-17T16:20:14.000Z | 2021-03-21T09:12:20.000Z | machine_vision/draw_ellipse.py | cyrus07424/M5stickV-playground | 9c1447078bebb279684bf9fc4b485c1aae1c8b12 | [
"MIT"
] | null | null | null | machine_vision/draw_ellipse.py | cyrus07424/M5stickV-playground | 9c1447078bebb279684bf9fc4b485c1aae1c8b12 | [
"MIT"
] | 2 | 2020-04-17T01:35:36.000Z | 2020-10-31T00:54:45.000Z | #
# 楕円を描画.
# ドキュメントに載っていない隠しメソッド?
#
##################################################
# import
##################################################
import lcd
import image
##################################################
# initialize
##################################################
# LCDを初期化
lcd.init()
# LCDの方向を設定
lcd.direction(lcd.YX_LRUD)
##################################################
# main
##################################################
# 画像を作成
img = image.Image()
# 画像に対して楕円を描画(x, y, x方向半径, y方向半径)
img.draw_ellipse(img.width() // 2, img.height() // 2, img.width() // 4, img.height() // 4)
# 画像をLCDに描画
lcd.display(img)
| 21.677419 | 91 | 0.334821 |
3f5932743d4c3184e99d51acad2adbf87c1c66ee | 23,406 | py | Python | scripts/release_scripts/deploy.py | queencai/oppia | c9a36db9c258588b04be9bc26f3d2efef7d21abc | [
"Apache-2.0"
] | 1 | 2021-06-26T00:31:08.000Z | 2021-06-26T00:31:08.000Z | scripts/release_scripts/deploy.py | queencai/oppia | c9a36db9c258588b04be9bc26f3d2efef7d21abc | [
"Apache-2.0"
] | null | null | null | scripts/release_scripts/deploy.py | queencai/oppia | c9a36db9c258588b04be9bc26f3d2efef7d21abc | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is a deployment script for Oppia that should only be used by release
coordinators.
The script creates a build with unnecessary files removed, and saves a copy of
the uploaded files to a deployment folder in the parent directory of the oppia/
folder. It then pushes this build to the production server.
IMPORTANT NOTES:
1. Before running this script, you must install third-party dependencies by
running
python -m scripts.install_third_party_libs
at least once.
2. This script should be run from the oppia root folder:
python -m scripts.release_scripts.deploy --app_name=[app_name]
where [app_name] is the name of your app. Note that the root folder MUST be
named 'oppia'.
3. If you want to start the production server in the maintenance mode (the site
will only work for super admins) add a --maintenance_mode flag.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import datetime
import os
import shutil
import subprocess
import sys
import python_utils
import release_constants
from scripts import common
from scripts import install_third_party_libs
from scripts.release_scripts import gcloud_adapter
from scripts.release_scripts import update_configs
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PY_GITHUB_PATH = os.path.join(
_PARENT_DIR, 'oppia_tools', 'PyGithub-%s' % common.PYGITHUB_VERSION)
sys.path.insert(0, _PY_GITHUB_PATH)
import github # isort:skip pylint: disable=wrong-import-position
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--app_name', help='name of the app to deploy to', type=str)
_PARSER.add_argument(
'--version', help='version to deploy', type=str)
_PARSER.add_argument(
'--maintenance_mode', action='store_true', default=False)
APP_NAME_OPPIASERVER = 'oppiaserver'
APP_NAME_OPPIATESTSERVER = 'oppiatestserver'
BUCKET_NAME_SUFFIX = '-resources'
CURRENT_DATETIME = datetime.datetime.utcnow()
LOG_FILE_PATH = os.path.join('..', 'deploy.log')
INDEX_YAML_PATH = os.path.join('.', 'index.yaml')
THIRD_PARTY_DIR = os.path.join('.', 'third_party')
FILES_AT_ROOT = ['favicon.ico', 'robots.txt']
IMAGE_DIRS = ['avatar', 'general', 'sidebar', 'logo']
# Denotes length for cache slug used in production mode. It consists of
# lowercase alphanumeric characters.
CACHE_SLUG_PROD_LENGTH = 6
DOT_CHAR = '.'
HYPHEN_CHAR = '-'
def preprocess_release(app_name, deploy_data_path):
"""Pre-processes release files.
This function should be called from within release_dir_name defined
in execute_deployment function. Currently it does the following:
(1) Substitutes files from the per-app deployment data.
(2) Change GCS_RESOURCE_BUCKET in assets/constants.ts.
Args:
app_name: str. Name of the app to deploy.
deploy_data_path: str. Path for deploy data directory.
Raises:
Exception. Could not find deploy data directory.
Exception. Could not find source path.
Exception. Could not find destination path.
"""
if not os.path.exists(deploy_data_path):
raise Exception(
'Could not find deploy_data directory at %s' % deploy_data_path)
# Copies files in root folder to assets/.
for filename in FILES_AT_ROOT:
src = os.path.join(deploy_data_path, filename)
dst = os.path.join(os.getcwd(), 'assets', filename)
if not os.path.exists(src):
raise Exception(
'Could not find source path %s. Please check your deploy_data '
'folder.' % src)
if not os.path.exists(dst):
raise Exception(
'Could not find destination path %s. Has the code been '
'updated in the meantime?' % dst)
shutil.copyfile(src, dst)
# Copies files in images to /assets/images.
for dir_name in IMAGE_DIRS:
src_dir = os.path.join(deploy_data_path, 'images', dir_name)
dst_dir = os.path.join(os.getcwd(), 'assets', 'images', dir_name)
if not os.path.exists(src_dir):
raise Exception(
'Could not find source dir %s. Please check your deploy_data '
'folder.' % src_dir)
common.ensure_directory_exists(dst_dir)
for filename in os.listdir(src_dir):
src = os.path.join(src_dir, filename)
dst = os.path.join(dst_dir, filename)
shutil.copyfile(src, dst)
with python_utils.open_file(
os.path.join(common.CONSTANTS_FILE_PATH), 'r') as assets_file:
content = assets_file.read()
assert '"DEV_MODE": true' in content, 'Invalid DEV_MODE'
assert '"GCS_RESOURCE_BUCKET_NAME": "None-resources",' in content, (
'Invalid value for GCS_RESOURCE_BUCKET_NAME in %s' % (
common.CONSTANTS_FILE_PATH))
bucket_name = app_name + BUCKET_NAME_SUFFIX
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"GCS_RESOURCE_BUCKET_NAME": "None-resources",',
'"GCS_RESOURCE_BUCKET_NAME": "%s",' % bucket_name)
def check_errors_in_a_page(url_to_check, msg_to_confirm):
"""Prompts user to check errors in a page.
Args:
url_to_check: str. The url of the page to be tested.
msg_to_confirm: str. The message displayed asking user for confirmation.
Returns:
bool. Whether the page has errors or not.
"""
common.open_new_tab_in_browser_if_possible(url_to_check)
while True:
python_utils.PRINT(
'******************************************************')
python_utils.PRINT(
'PLEASE CONFIRM: %s See %s '
'(y/n)' % (msg_to_confirm, url_to_check))
answer = python_utils.INPUT().lower()
if answer in release_constants.AFFIRMATIVE_CONFIRMATIONS:
return True
elif answer:
return False
def update_and_check_indexes(app_name):
"""Updates indexes and checks if all indexes are serving.
Args:
app_name: str. The name of the app to deploy.
Raises:
Exception. All indexes are not serving on the indexes page.
"""
# Update indexes, then prompt for a check that they are all serving
# before continuing with the deployment.
# NOTE: This assumes that the build process does not modify the
# index.yaml file or create a different version of it to use in
# production.
indexes_page_url = (
'https://console.cloud.google.com/datastore/indexes'
'?project=%s') % app_name
gcloud_adapter.update_indexes(INDEX_YAML_PATH, app_name)
if not gcloud_adapter.check_all_indexes_are_serving(app_name):
common.open_new_tab_in_browser_if_possible(indexes_page_url)
raise Exception(
'Please wait for all indexes to serve, then run this '
'script again to complete the deployment. For details, '
'visit the indexes page. Exiting.')
def build_scripts(maintenance_mode):
"""Builds and minifies all the scripts.
Args:
maintenance_mode: bool. Whether to enable the maintenance mode.
Raises:
Exception. The build process fails.
"""
# Do a build, while outputting to the terminal.
python_utils.PRINT('Building and minifying scripts...')
build_command = [
'python', '-m', 'scripts.build', '--prod_env', '--deploy_mode']
if maintenance_mode:
build_command.append('--maintenance_mode')
build_process = subprocess.Popen(build_command, stdout=subprocess.PIPE)
while True:
line = build_process.stdout.readline().strip()
if not line:
break
python_utils.PRINT(line)
# Wait for process to terminate, then check return code.
build_process.communicate()
if build_process.returncode > 0:
raise Exception('Build failed.')
def deploy_application_and_write_log_entry(
app_name, version_to_deploy_to, current_git_revision):
"""Deploys the app and writes the log entry.
Args:
app_name: str. The name of the app to deploy.
version_to_deploy_to: str. The version to deploy to.
current_git_revision: str. The current git revision.
"""
# Deploy export service to GAE.
gcloud_adapter.deploy_application('export/app.yaml', app_name)
# Deploy app to GAE.
gcloud_adapter.deploy_application(
'./app.yaml', app_name, version=version_to_deploy_to)
# Writing log entry.
common.ensure_directory_exists(os.path.dirname(LOG_FILE_PATH))
with python_utils.open_file(LOG_FILE_PATH, 'a') as log_file:
log_file.write(
'Successfully deployed to %s at %s (version %s)\n' % (
app_name, CURRENT_DATETIME.strftime('%Y-%m-%d %H:%M:%S'),
current_git_revision))
def flush_memcache(app_name):
"""Flushes the memcache.
Args:
app_name: str. The name of the app to deploy.
"""
memcache_url = (
'https://console.cloud.google.com/appengine/memcache?'
'src=ac&project=%s') % app_name
common.open_new_tab_in_browser_if_possible(memcache_url)
common.ask_user_to_confirm('Please flush the memcache.')
def switch_version(app_name, current_release_version):
"""Switches version if library page loads correctly.
Args:
app_name: str. The name of the app to deploy.
current_release_version: str. The version of the current release.
Raises:
Exception. The library page does not load correctly.
"""
release_version_library_url = (
'https://%s-dot-%s.appspot.com/community-library' % (
current_release_version, app_name))
library_page_loads_correctly = check_errors_in_a_page(
release_version_library_url, 'Library page is loading correctly?')
if not library_page_loads_correctly:
raise Exception(
'Aborting version switch due to issues in library page '
'loading.')
version_switch = release_constants.AFFIRMATIVE_CONFIRMATIONS[0]
if common.is_current_branch_a_hotfix_branch():
python_utils.PRINT('Do you want to switch version?')
version_switch = python_utils.INPUT()
if version_switch in release_constants.AFFIRMATIVE_CONFIRMATIONS:
gcloud_adapter.switch_version(
app_name, current_release_version)
python_utils.PRINT(
'Successfully migrated traffic to release version!')
def check_breakage(app_name, current_release_version):
"""Checks if there is any major breakage for test server deployment
and asks the user to file an issue if that is the case.
Args:
app_name: str. The name of the app to deploy.
current_release_version: str. The version of the current release.
Raises:
Exception. There is major breakage found through test server logs.
"""
# If this is a test server deployment and the current release version is
# already serving, open the GAE error logs.
test_server_error_logs_url = (
'https://console.cloud.google.com/logs/viewer?'
'project=%s&key1=default&minLogLevel=500') % app_name
currently_served_version = (
gcloud_adapter.get_currently_served_version(app_name))
if (app_name == APP_NAME_OPPIATESTSERVER or 'migration' in app_name) and (
currently_served_version == current_release_version):
major_breakage = check_errors_in_a_page(
test_server_error_logs_url, 'Is anything major broken?')
if major_breakage:
common.open_new_tab_in_browser_if_possible(
release_constants.RELEASE_DRIVE_URL)
common.open_new_tab_in_browser_if_possible(
release_constants.ISSUE_FILING_URL)
raise Exception(
'Please note the issue in the release journal for this month, '
'file a blocking bug and switch to the last known good '
'version.')
def check_travis_and_circleci_tests(current_branch_name):
"""Checks if all travis and circleci tests are passing on release/test
branch.
Args:
current_branch_name: str. The name of current branch.
Raises:
Exception. The latest commit on release/test branch locally does not
match the latest commit on local fork or upstream.
Exception. The travis or circleci tests are failing on release/test
branch.
"""
local_sha = subprocess.check_output([
'git', 'rev-parse', current_branch_name])
origin_sha = subprocess.check_output([
'git', 'rev-parse', 'origin/%s' % current_branch_name])
upstream_sha = subprocess.check_output([
'git', 'rev-parse', '%s/%s' % (
common.get_remote_alias(release_constants.REMOTE_URL),
current_branch_name)])
if local_sha != origin_sha:
raise Exception(
'The latest commit on release branch locally does '
'not match the latest commit on your local fork.')
if local_sha != upstream_sha:
raise Exception(
'The latest commit on release branch locally does '
'not match the latest commit on Oppia repo.')
python_utils.PRINT('\nEnter your GitHub username.\n')
github_username = python_utils.INPUT().lower().strip()
travis_url = 'https://travis-ci.org/%s/oppia/branches' % github_username
circleci_url = 'https://circleci.com/gh/%s/workflows/oppia' % (
github_username)
try:
python_utils.url_open(travis_url)
except Exception:
travis_url = 'https://travis-ci.com/oppia/oppia/branches'
try:
python_utils.url_open(circleci_url)
except Exception:
circleci_url = 'https://circleci.com/gh/oppia/workflows/oppia'
common.open_new_tab_in_browser_if_possible(travis_url)
python_utils.PRINT(
'Are all travis tests passing on branch %s?\n' % current_branch_name)
travis_tests_passing = python_utils.INPUT().lower()
if travis_tests_passing not in release_constants.AFFIRMATIVE_CONFIRMATIONS:
raise Exception(
'Please fix the travis tests before deploying.')
common.open_new_tab_in_browser_if_possible(circleci_url)
python_utils.PRINT(
'Are all circleci tests passing on branch %s?\n' % current_branch_name)
circleci_tests_passing = python_utils.INPUT().lower()
if circleci_tests_passing not in (
release_constants.AFFIRMATIVE_CONFIRMATIONS):
raise Exception(
'Please fix the circleci tests before deploying.')
def check_release_doc():
"""Asks the co-ordinator to create a doc for the current release.
or update the doc for the hotfix.
"""
message = (
'Please create a dedicated section for this release in the '
'release tracking document created by the QA Lead.\n'
'The three tabs in your browser point to: '
'Release drive url, template for the release notes, example of '
'release notes from previous release.')
if common.is_current_branch_a_hotfix_branch():
message = (
'Please ensure you note down the notes for the hotfix in the '
'release tracking document created by the QA Lead for the release '
'corresponding to the hotfix.\n'
'The three tabs in your browser point to: '
'Release drive url, template for the release notes, example of '
'release notes from previous release.')
common.open_new_tab_in_browser_if_possible(
release_constants.RELEASE_DRIVE_URL)
common.open_new_tab_in_browser_if_possible(
release_constants.RELEASE_NOTES_TEMPLATE_URL)
common.open_new_tab_in_browser_if_possible(
release_constants.RELEASE_NOTES_EXAMPLE_URL)
common.ask_user_to_confirm(message)
def execute_deployment():
"""Executes the deployment process after doing the prerequisite checks.
Raises:
Exception. App name is invalid.
Exception. Custom version is used with production app.
Exception. App name is not specified.
Exception. The deployment script is not run from a release or test
branch.
Exception. The deployment script is run for prod server from a test
branch.
Exception. Current release version has '.' character.
Exception. Last commit message is invalid.
Exception. The mailgun API key is not added before deployment.
Exception. Could not find third party directory.
Exception. Invalid directory accessed during deployment.
"""
parsed_args = _PARSER.parse_args()
custom_version = None
if parsed_args.app_name:
app_name = parsed_args.app_name
if app_name not in [
APP_NAME_OPPIASERVER, APP_NAME_OPPIATESTSERVER] and (
'migration' not in app_name):
raise Exception('Invalid app name: %s' % app_name)
if parsed_args.version and app_name == APP_NAME_OPPIASERVER:
raise Exception('Cannot use custom version with production app.')
# Note that custom_version may be None.
custom_version = parsed_args.version
else:
raise Exception('No app name specified.')
current_branch_name = common.get_current_branch_name()
release_dir_name = 'deploy-%s-%s-%s' % (
'-'.join('-'.join(app_name.split('.')).split(':')),
current_branch_name,
CURRENT_DATETIME.strftime('%Y%m%d-%H%M%S'))
release_dir_path = os.path.join(os.getcwd(), '..', release_dir_name)
deploy_data_path = os.path.join(
os.getcwd(), os.pardir, 'release-scripts', 'deploy_data', app_name)
install_third_party_libs.main()
if not (common.is_current_branch_a_release_branch() or (
common.is_current_branch_a_test_branch())):
raise Exception(
'The deployment script must be run from a release or test branch.')
if common.is_current_branch_a_test_branch() and (
app_name in [APP_NAME_OPPIASERVER, APP_NAME_OPPIATESTSERVER]):
raise Exception('Test branch can only be deployed to backup server.')
if custom_version is not None:
current_release_version = custom_version.replace(
DOT_CHAR, HYPHEN_CHAR)
else:
current_release_version = current_branch_name[
len(common.RELEASE_BRANCH_NAME_PREFIX):].replace(
DOT_CHAR, HYPHEN_CHAR)
# This is required to compose the release_version_library_url
# (defined in switch_version function) correctly.
if '.' in current_release_version:
raise Exception('Current release version has \'.\' character.')
assert len(current_release_version) <= 25, (
'The length of the "version" arg should be less than or '
'equal to 25 characters.')
# Do prerequisite checks.
common.require_cwd_to_be_oppia()
common.ensure_release_scripts_folder_exists_and_is_up_to_date()
gcloud_adapter.require_gcloud_to_be_available()
try:
if app_name == APP_NAME_OPPIASERVER:
check_release_doc()
release_version_number = common.get_current_release_version_number(
current_branch_name)
last_commit_message = subprocess.check_output(
'git log -1 --pretty=%B'.split())
personal_access_token = common.get_personal_access_token()
if not common.is_current_branch_a_hotfix_branch():
if not last_commit_message.startswith(
'Update authors and changelog for v%s' % (
release_version_number)):
raise Exception(
'Invalid last commit message: %s.' % (
last_commit_message))
g = github.Github(personal_access_token)
repo = g.get_organization('oppia').get_repo('oppia')
common.check_blocking_bug_issue_count(repo)
common.check_prs_for_current_release_are_released(repo)
check_travis_and_circleci_tests(current_branch_name)
update_configs.main(personal_access_token)
with python_utils.open_file(common.FECONF_PATH, 'r') as f:
feconf_contents = f.read()
if ('MAILGUN_API_KEY' not in feconf_contents or
'MAILGUN_API_KEY = None' in feconf_contents):
raise Exception(
'The mailgun API key must be added before deployment.')
if not os.path.exists(THIRD_PARTY_DIR):
raise Exception(
'Could not find third_party directory at %s. Please run '
'install_third_party_libs.py prior to running this script.'
% THIRD_PARTY_DIR)
current_git_revision = subprocess.check_output(
['git', 'rev-parse', 'HEAD']).strip()
# Create a folder in which to save the release candidate.
python_utils.PRINT('Ensuring that the release directory parent exists')
common.ensure_directory_exists(os.path.dirname(release_dir_path))
# Copy files to the release directory. Omits the .git subfolder.
python_utils.PRINT('Copying files to the release directory')
shutil.copytree(
os.getcwd(), release_dir_path,
ignore=shutil.ignore_patterns('.git'))
# Change the current directory to the release candidate folder.
with common.CD(release_dir_path):
if not os.getcwd().endswith(release_dir_name):
raise Exception(
'Invalid directory accessed during deployment: %s'
% os.getcwd())
python_utils.PRINT('Changing directory to %s' % os.getcwd())
python_utils.PRINT('Preprocessing release...')
preprocess_release(app_name, deploy_data_path)
update_and_check_indexes(app_name)
build_scripts(parsed_args.maintenance_mode)
deploy_application_and_write_log_entry(
app_name, current_release_version,
current_git_revision)
python_utils.PRINT('Returning to oppia/ root directory.')
switch_version(app_name, current_release_version)
flush_memcache(app_name)
check_breakage(app_name, current_release_version)
python_utils.PRINT('Done!')
finally:
common.run_cmd([
'git', 'checkout', '--',
update_configs.LOCAL_FECONF_PATH,
update_configs.LOCAL_CONSTANTS_PATH])
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when deploy.py is used as a script.
if __name__ == '__main__': # pragma: no cover
execute_deployment()
| 39.604061 | 80 | 0.675211 |
482b21ff62606a4d93a41cb1e5fae13fbfc71ad1 | 5,740 | py | Python | examples/process_module/functional.py | muliarska/Twitter-sentiment-analysis | 2e5e92771085ac488e965ed33695a5452bf40ddf | [
"MIT"
] | null | null | null | examples/process_module/functional.py | muliarska/Twitter-sentiment-analysis | 2e5e92771085ac488e965ed33695a5452bf40ddf | [
"MIT"
] | 2 | 2020-04-04T16:27:17.000Z | 2020-05-19T06:46:37.000Z | examples/process_module/functional.py | muliarska/Twitter-sentiment-analysis | 2e5e92771085ac488e965ed33695a5452bf40ddf | [
"MIT"
] | null | null | null | """Module for main calculations and processing of the program"""
from examples.adt_realization.tweets_linked_list import TweetsLinkedList
from examples.adt_realization.emotions_list import EmotionsList
class Functional:
"""Manages a process of the program_process"""
def __init__(self):
"""
() ->
Initialize an EMOTIONS_LIST, a TWEETS_LIST and a nickname
to facilitate the management of program processes
"""
self.emotions_list = EmotionsList('NRC-Emotion-Intensity-Lexicon-v1.txt')
self.tweets_list = None
self.nickname = None
def set_nickname(self, nickname):
"""
(str) ->
Sets nickname of the user with which the program works
"""
self.nickname = nickname
self.tweets_list = TweetsLinkedList(nickname)
self.tweets_list.create_linked()
@staticmethod
def _two_emotions(emotions):
"""
(list) -> list
Returns a list divided into positive and negative
emotions with their probabilities
"""
prob1 = (emotions[4][1] + emotions[6][1] + emotions[7][1]) / 3
prob2 = (emotions[0][1] + emotions[2][1] + emotions[3][1] + emotions[5][1]) / 4
probability1 = prob1 / (prob1 + prob2)
probability2 = 1 - probability1
return [('positive', probability1), ('negative', probability2)]
def available_days(self):
"""
() -> list
Returns a list with the dates on which user wrote tweets
"""
dates = []
for date in self.tweets_list.available_dates:
if date[1] not in dates and len(dates) < 10:
dates.append(date[1])
return dates
def many_emotions_last_time(self):
"""
() -> list
Returns a list with emotions and their probabilities
for last time
"""
result = []
for i in range(8):
cur_node = self.tweets_list.data
name = None
probability = 0
counter = 0
while cur_node is not None and counter < 20:
if len(cur_node.data) == 1:
text = cur_node.data[0]
else:
text = cur_node.data[1]
self.emotions_list.set_tweet(text)
emotions_tuple = self.emotions_list.get_emotions_probability()[i]
name = emotions_tuple[0]
probability += emotions_tuple[1]
cur_node = cur_node.next
counter += 1
result.append((name, probability / counter))
return result
def two_emotions_last_time(self):
"""
() -> list
Returns a list divided into positive and negative
emotions with their probabilities for last time
"""
emotions = self.many_emotions_last_time()
return self._two_emotions(emotions)
def many_emotions_specific_day(self, day):
"""
(str) -> list
Returns a list with emotions and their probabilities
for a specific DAY
"""
result = []
tweets = self.tweets_list.get_tweet_by_day(day)
for i in range(8):
name = None
probability = 0
counter = 0
for text in tweets:
while counter < 20:
self.emotions_list.set_tweet(text)
emotions_tuple = self.emotions_list.get_emotions_probability()[i]
name = emotions_tuple[0]
probability += emotions_tuple[1]
counter += 1
result.append((name, probability / counter))
return result
def two_emotions_specific_day(self, day):
"""
(str) -> list
Returns a list divided into positive and negative
emotions with their probabilities for a specific DAY
"""
emotions = self.many_emotions_specific_day(day)
return self._two_emotions(emotions)
@staticmethod
def _one_emotion(emotions_lst, emotion):
"""
(list, str) -> tuple
Returns the EMOTION and its probability
"""
for cur_el in emotions_lst:
if cur_el[0] == emotion:
return cur_el
return None
def one_emotion_last_time(self, emotion):
"""
(str) -> tuple
Returns the EMOTION and its probability for last time
"""
return self._one_emotion(self.many_emotions_last_time(), emotion)
def one_emotion_specific_day(self, emotion, day):
"""
(str) -> tuple
Returns the EMOTION and its probability for a specific DAY
"""
return self._one_emotion(self.many_emotions_specific_day(day), emotion)
if __name__ == '__main__':
MANAGER = Functional()
MANAGER.set_nickname('elonmusk')
print("Available dates:")
print(MANAGER.available_days())
print("\nStatistics of many emotions for last time")
print(MANAGER.many_emotions_last_time())
print("\nStatistics of two emotions for last time")
print(MANAGER.two_emotions_last_time())
DAY = MANAGER.available_days()[0]
print("\nDay:")
print(DAY)
print("\nStatistics of many emotions for {}".format(DAY))
print(MANAGER.many_emotions_specific_day(DAY))
print("\nStatistics of two emotions for {}".format(DAY))
print(MANAGER.two_emotions_specific_day(DAY))
print("\nEmotion:")
EMOTION = "joy"
print(EMOTION)
print("\nStatistic of {} for last time".format(EMOTION))
print(MANAGER.one_emotion_last_time(EMOTION))
print("\nStatistic of {} for {}".format(EMOTION, DAY))
print(MANAGER.one_emotion_specific_day(EMOTION, DAY))
| 33.179191 | 87 | 0.595645 |
04115d01d63888710f2d9bffdd3ececd072af5c4 | 5,745 | py | Python | api/rf_temps/rf_collect.py | barretobrock/server-tools | 2f2b899994df90817686b2232d3d65e53defd8c2 | [
"MIT"
] | 1 | 2021-03-16T19:57:49.000Z | 2021-03-16T19:57:49.000Z | api/rf_temps/rf_collect.py | barretobrock/server-tools | 2f2b899994df90817686b2232d3d65e53defd8c2 | [
"MIT"
] | 3 | 2021-06-08T21:37:55.000Z | 2021-06-13T01:24:59.000Z | api/rf_temps/rf_collect.py | barretobrock/server-tools | 2f2b899994df90817686b2232d3d65e53defd8c2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
ETL for RTL_433 json objects via syslog -> processed Dataframe -> influx
Note: depends on `rf_stream` already being running and feeding data to port 1433
via `rtl_433 -F syslog::1433`
"""
import json
from json import JSONDecodeError
import socket
from datetime import datetime
import pandas as pd
from kavalkilu import InfluxDBLocal, InfluxDBHomeAuto, LogWithInflux, \
GracefulKiller, Hosts, HOME_SERVER_HOSTNAME, HAHelper
from servertools import SlackComm
logg = LogWithInflux('rf_temp', log_dir='rf')
sc = SlackComm(parent_log=logg)
UDP_IP = Hosts().get_ip_from_host(HOME_SERVER_HOSTNAME)
UDP_PORT = 1433
# device id to device-specific data mapping
mappings = {
3092: {'name': 'magamistuba'},
5252: {'name': 'elutuba'},
6853: {'name': 'kontor-wc'},
8416: {'name': 'r6du-l22ne'},
9459: {'name': 'freezer'},
9533: {'name': 'kontor'},
10246: {'name': 'v2lisuks'},
12476: {'name': 'suur-wc'},
14539: {'name': 'fridge'},
15227: {'name': 'r6du-ida'}
}
# Other items that aren't sensors
other_mappings = {
}
unknown_devices = {}
# Map the names of the variables from the various sensors to what's acceptable in the db
possible_measurements = {
'temperature_C': 'temp',
'humidity': 'humidity'
}
def parse_syslog(ln: bytes) -> str:
"""Try to extract the payload from a syslog line."""
ln = ln.decode("ascii") # also UTF-8 if BOM
if ln.startswith("<"):
# fields should be "<PRI>VER", timestamp, hostname, command, pid, mid, sdata, payload
fields = ln.split(None, 7)
ln = fields[-1]
return ln
logg.debug('Establishing socket...')
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.bind((UDP_IP, UDP_PORT))
logg.debug('Connecting to Influx..')
influx = InfluxDBLocal(InfluxDBHomeAuto.TEMPS)
killer = GracefulKiller()
# Set up methods to periodically send processed data packets to Influx
interval = datetime.now()
# Adjust interval to be an even 10 mins
replace_mins = interval.minute - interval.minute % 10
interval = interval.replace(minute=replace_mins, second=0, microsecond=0)
split_s = 300 # Data packet to influx interval
logg.debug(f'Data packets sent to Influx every {split_s / 60} mins.')
data_df = pd.DataFrame()
last_dt = datetime.now().date() # For reporting daily unknown devices
logg.debug('Beginning loop!')
while not killer.kill_now:
line, _addr = sock.recvfrom(1024)
# Convert line from bytes to str, prep for conversion into dict
line = parse_syslog(line)
data = None
try:
data = json.loads(line)
# logg.debug(f'Seeing: {data}')
except JSONDecodeError as e:
logg.error_from_class(e, 'Unable to parse this object. Skipping.')
continue
if "model" not in data:
# Exclude anything that doesn't contain a device 'model' key
continue
# Begin processing the data
if data is not None:
# Begin extraction process
dev_id = data.get('id')
dev_model = data.get('model')
logg.debug(f'Receiving from device: {dev_model} ({dev_id})')
if dev_id in mappings.keys():
loc = mappings[dev_id]['name']
logg.debug(f'Device identified. Location: {loc}')
# Device is known sensor... record data
measurements = {}
for k, v in possible_measurements.items():
if k in data.keys():
measurements[v] = data[k]
if len(measurements) > 0:
# Write to dataframe
measurements.update({
'location': loc,
'timestamp': data['time']
})
data_df = data_df.append(pd.DataFrame(measurements, index=[0]))
logg.debug('Successfully recorded object to dataframe..')
elif dev_id in other_mappings.keys():
pass
# Handle signal another way
# item = other_mappings.get(data['id']).get('name')
# if item == 'gdo':
# # Routines for notifying gdo was used
# sc.st.send_message(sc.kodu_kanal, 'Someone used the garage door remote!')
# elif item == 'doorbell':
# # Routines for notifying doorbell was used
# HAHelper().call_webhook('doorbell_pressed')
else:
logg.info(f'Unknown device found: {dev_model}: ({dev_id})\n'
f'{json.dumps(data, indent=2)}')
unknown_devices[dev_id] = data
if (datetime.now() - interval).total_seconds() > split_s:
# Gone over the time limit. Try to log all the non-duplicate info to database
data_df = data_df.drop_duplicates()
# Enforce data types
for col in ['temp', 'humidity']:
if col in data_df.columns:
data_df[col] = data_df[col].astype(float)
logg.debug(f'Logging interval reached. Sending {data_df.shape[0]} data points to db.')
influx.write_df_to_table(data_df, tags='location', value_cols=['temp', 'humidity'], time_col='timestamp')
# Reset our info
logg.debug('Resetting interval and dataframe.')
interval = datetime.now()
data_df = pd.DataFrame()
if last_dt != datetime.now().date() and len(unknown_devices) > 0:
# Report on found unknown devices
# report_text = "\n\n".join([f'*{k}*\n{json.dumps(v, indent=2)}' for k, v in unknown_devices.items()])
# sc.st.send_message(sc.kodu_kanal, message=f'Unknown devices discovered:\n\n{report_text}')
last_dt = datetime.now().date()
unknown_devices = {}
logg.debug('Collection ended. Closing Influx connection')
influx.close()
logg.close()
| 36.592357 | 113 | 0.631506 |
4a9f881befb291a09606e3402e4386c72246e985 | 5,387 | py | Python | ask-smapi-model/ask_smapi_model/v1/skill/nlu/evaluations/get_nlu_evaluation_results_response.py | Signal-Kinetics/alexa-apis-for-python | abb8d3dce18a5510c48b215406ed36c024f01495 | [
"Apache-2.0"
] | null | null | null | ask-smapi-model/ask_smapi_model/v1/skill/nlu/evaluations/get_nlu_evaluation_results_response.py | Signal-Kinetics/alexa-apis-for-python | abb8d3dce18a5510c48b215406ed36c024f01495 | [
"Apache-2.0"
] | null | null | null | ask-smapi-model/ask_smapi_model/v1/skill/nlu/evaluations/get_nlu_evaluation_results_response.py | Signal-Kinetics/alexa-apis-for-python | abb8d3dce18a5510c48b215406ed36c024f01495 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_smapi_model.v1.skill.nlu.evaluations.paged_results_response import PagedResultsResponse
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.nlu.evaluations.paged_results_response_pagination_context import PagedResultsResponsePaginationContextV1
from ask_smapi_model.v1.skill.nlu.evaluations.test_case import TestCaseV1
from ask_smapi_model.v1.skill.nlu.evaluations.links import LinksV1
class GetNLUEvaluationResultsResponse(PagedResultsResponse):
"""
:param pagination_context:
:type pagination_context: (optional) ask_smapi_model.v1.skill.nlu.evaluations.paged_results_response_pagination_context.PagedResultsResponsePaginationContext
:param links:
:type links: (optional) ask_smapi_model.v1.skill.nlu.evaluations.links.Links
:param total_failed: count of tests failed. A test fails when the expected intent and expected slots are not identical.
:type total_failed: (optional) float
:param test_cases:
:type test_cases: (optional) list[ask_smapi_model.v1.skill.nlu.evaluations.test_case.TestCase]
"""
deserialized_types = {
'pagination_context': 'ask_smapi_model.v1.skill.nlu.evaluations.paged_results_response_pagination_context.PagedResultsResponsePaginationContext',
'links': 'ask_smapi_model.v1.skill.nlu.evaluations.links.Links',
'total_failed': 'float',
'test_cases': 'list[ask_smapi_model.v1.skill.nlu.evaluations.test_case.TestCase]'
} # type: Dict
attribute_map = {
'pagination_context': 'paginationContext',
'links': '_links',
'total_failed': 'totalFailed',
'test_cases': 'testCases'
} # type: Dict
supports_multiple_types = False
def __init__(self, pagination_context=None, links=None, total_failed=None, test_cases=None):
# type: (Optional[PagedResultsResponsePaginationContextV1], Optional[LinksV1], Optional[float], Optional[List[TestCaseV1]]) -> None
"""
:param pagination_context:
:type pagination_context: (optional) ask_smapi_model.v1.skill.nlu.evaluations.paged_results_response_pagination_context.PagedResultsResponsePaginationContext
:param links:
:type links: (optional) ask_smapi_model.v1.skill.nlu.evaluations.links.Links
:param total_failed: count of tests failed. A test fails when the expected intent and expected slots are not identical.
:type total_failed: (optional) float
:param test_cases:
:type test_cases: (optional) list[ask_smapi_model.v1.skill.nlu.evaluations.test_case.TestCase]
"""
self.__discriminator_value = None # type: str
super(GetNLUEvaluationResultsResponse, self).__init__(pagination_context=pagination_context, links=links)
self.total_failed = total_failed
self.test_cases = test_cases
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, GetNLUEvaluationResultsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 41.122137 | 165 | 0.66679 |
ecebe959b63e253efc07b6aab9f99f055b56a8e9 | 1,112 | py | Python | riprova/backoff.py | EdwardBetts/riprova | 0242cc3b633f582ae4f65bdfc8cbf14d1549b20f | [
"MIT"
] | 114 | 2016-12-27T11:22:15.000Z | 2021-06-17T19:24:40.000Z | riprova/backoff.py | EdwardBetts/riprova | 0242cc3b633f582ae4f65bdfc8cbf14d1549b20f | [
"MIT"
] | 20 | 2016-12-27T20:27:09.000Z | 2021-03-03T17:39:29.000Z | riprova/backoff.py | EdwardBetts/riprova | 0242cc3b633f582ae4f65bdfc8cbf14d1549b20f | [
"MIT"
] | 15 | 2016-12-27T17:30:49.000Z | 2018-08-24T16:42:23.000Z | # -*- coding: utf-8 -*-
import abc
from six import with_metaclass
class Backoff(with_metaclass(abc.ABCMeta, object)):
"""
Backoff representing the minimum implementable interface
by backoff strategies.
This class does not provide any logic, it's simply used for documentation
purposes and type polymorphism.
Backoff implementations are intended to be used in a single-thread context.
"""
# Flag used by backoff strategies to notify when the retry max attempts
# were reached and they should stop.
STOP = -1
@abc.abstractmethod
def reset(self):
"""
Resets the current backoff state data.
Backoff strategies must implement this method.
"""
@abc.abstractmethod
def next(self):
"""
Returns the number of seconds to wait before the next try,
otherwise returns `Backoff.STOP`, which indicates the max number
of retry operations were reached.
Backoff strategies must implement this method.
Returns:
int: time to wait in seconds before the next try.
"""
| 27.121951 | 79 | 0.666367 |
f5c468bc56f872d19df254a2b918dc10ac159c2e | 2,689 | py | Python | Linear Regression/linear_regression.py | SpencerOfwiti/machine-learning-algorithms | a5fec0fd6b8da8f431902aacb1d938fa7873047e | [
"MIT"
] | 2 | 2021-07-16T10:23:36.000Z | 2021-11-29T05:41:49.000Z | Linear Regression/linear_regression.py | SpencerOfwiti/machine-learning-algorithms | a5fec0fd6b8da8f431902aacb1d938fa7873047e | [
"MIT"
] | 5 | 2021-06-09T17:46:31.000Z | 2022-03-12T00:24:20.000Z | Linear Regression/linear_regression.py | SpencerOfwiti/machine-learning-algorithms | a5fec0fd6b8da8f431902aacb1d938fa7873047e | [
"MIT"
] | 1 | 2021-09-27T16:48:18.000Z | 2021-09-27T16:48:18.000Z | # import libraries
import pandas as pd
import numpy as np
import sklearn
from sklearn import linear_model
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from matplotlib import style
import pickle
style.use('ggplot')
data = pd.read_csv('Linear Regression/student-mat.csv', sep=';')
# since our data is separated by semicolons we need to do sep=';'
# trimming our data to show only relevant attributes
data = data[['G1', 'G2', 'G3', 'studytime', 'failures', 'absences']]
data = shuffle(data) # shuffle the data
print(data.head())
# label - attribute we are trying to predict
predict = 'G3'
# features - attributes that will determine our label
x = np.array(data.drop([predict], 1)) # features
y = np.array(data[predict]) # labels
# splitting our data into training and testing data
# 90% training, 10% testing
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)
# train model multiple times for best score
best = 0
for _ in range(1000):
# splitting our data into training and testing data
# 90% training, 10% testing
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1)
# implementing linear regression
# defining the model
linear = linear_model.LinearRegression()
linear.fit(x_train, y_train)
acc = linear.score(x_test, y_test) # acc stands for accuracy
print('Accuracy: ', acc)
# if the current model has a better score than the one we've already trained then save it
if acc > best:
best = acc
# saving our model
with open('Linear Regression/studentgrades.pickle', 'wb') as f:
pickle.dump(linear, f)
# loading our models
pickle_in = open('Linear Regression/studentgrades.pickle', 'rb')
linear = pickle.load(pickle_in)
# now we can use linear to predict grades like before
linear.fit(x_train, y_train)
acc = linear.score(x_test, y_test) # acc stands for accuracy
print('Best Accuracy: ', acc)
# viewing constants used to generate the line of best fit
print('------------------------------')
print('Coefficient: \n', linear.coef_) # these are each slope value
print('Intercept: \n', linear.intercept_) # this is the intercept
print('------------------------------')
# predicting specific students
predictions = linear.predict(x_test) # gets a list of all predictions
for x in range(len(predictions)):
print(predictions[x], x_test[x], y_test[x])
# drawing and plotting our model
plot = 'G2' # change this to G1, G2, studytime, failures or absences to see other graphs
plt.scatter(data[plot], data['G3'])
plt.legend(loc=4)
plt.xlabel(plot)
plt.ylabel('Final Grade')
plt.show()
| 33.197531 | 100 | 0.705095 |
2e77799233f1ed7a8b0064e4a5ff6d621ddaa6af | 3,761 | py | Python | var/spack/repos/builtin/packages/ncurses/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/ncurses/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/ncurses/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
from glob import glob
from os.path import exists, join
from os import makedirs
class Ncurses(AutotoolsPackage):
"""The ncurses (new curses) library is a free software emulation of
curses in System V Release 4.0, and more. It uses terminfo format,
supports pads and color and multiple highlights and forms
characters and function-key mapping, and has all the other
SYSV-curses enhancements over BSD curses."""
homepage = "http://invisible-island.net/ncurses/ncurses.html"
# URL must remain http:// so Spack can bootstrap curl
url = "http://ftpmirror.gnu.org/ncurses/ncurses-6.1.tar.gz"
version('6.1', sha256='aa057eeeb4a14d470101eff4597d5833dcef5965331be3528c08d99cebaa0d17')
version('6.0', sha256='f551c24b30ce8bfb6e96d9f59b42fbea30fa3a6123384172f9e7284bcf647260')
version('5.9', sha256='9046298fb440324c9d4135ecea7879ffed8546dd1b58e59430ea07a4633f563b')
variant('symlinks', default=False,
description='Enables symlinks. Needed on AFS filesystem.')
variant('termlib', default=False,
description='Enables termlib needs for gnutls in emacs.')
depends_on('pkgconfig', type='build')
patch('patch_gcc_5.txt', when='@6.0%gcc@5.0:')
patch('sed_pgi.patch', when='@:6.0')
def setup_environment(self, spack_env, run_env):
spack_env.unset('TERMINFO')
def flag_handler(self, name, flags):
if name == 'cflags' or name == 'cxxflags':
flags.append(self.compiler.pic_flag)
return (flags, None, None)
def configure(self, spec, prefix):
opts = [
'--with-shared',
'--with-cxx-shared',
'--enable-overwrite',
'--without-ada',
'--enable-pc-files',
'--with-pkg-config-libdir={0}/lib/pkgconfig'.format(self.prefix)
]
nwide_opts = ['--disable-widec',
'--without-manpages',
'--without-tests']
wide_opts = ['--enable-widec',
'--without-manpages',
'--without-tests']
if '+symlinks' in self.spec:
opts.append('--enable-symlinks')
if '+termlib' in self.spec:
opts.extend(('--with-termlib',
'--enable-termcap',
'--enable-getcap',
'--enable-tcap-names'))
prefix = '--prefix={0}'.format(prefix)
configure = Executable('../configure')
with working_dir('build_ncurses', create=True):
configure(prefix, *(opts + nwide_opts))
with working_dir('build_ncursesw', create=True):
configure(prefix, *(opts + wide_opts))
def build(self, spec, prefix):
with working_dir('build_ncurses'):
make()
with working_dir('build_ncursesw'):
make()
def install(self, spec, prefix):
with working_dir('build_ncurses'):
make('install')
with working_dir('build_ncursesw'):
make('install')
# fix for packages like hstr that use "#include <ncurses/ncurses.h>"
headers = glob(join(prefix.include, '*'))
for p_dir in ['ncurses', 'ncursesw']:
path = join(prefix.include, p_dir)
if not exists(path):
makedirs(path)
for header in headers:
install(header, path)
@property
def libs(self):
return find_libraries(
['libncurses', 'libncursesw'], root=self.prefix, recursive=True)
| 34.824074 | 93 | 0.604095 |
1660a1b698bd2f3bf787ec5a8ef7917691932071 | 4,234 | py | Python | src/web/views.py | andrii1812/blood-test-ocr | 8094da4d3f2758d870f57e593e4368a51be4f1f2 | [
"MIT"
] | 1 | 2020-12-07T02:32:13.000Z | 2020-12-07T02:32:13.000Z | src/web/views.py | andrii1812/blood-test-ocr | 8094da4d3f2758d870f57e593e4368a51be4f1f2 | [
"MIT"
] | null | null | null | src/web/views.py | andrii1812/blood-test-ocr | 8094da4d3f2758d870f57e593e4368a51be4f1f2 | [
"MIT"
] | null | null | null | import os
from PIL import Image
from flask import request, jsonify, send_from_directory
from flask_restful import Resource, abort
import ocr
import web
import orm
@web.app.route('/')
def index():
return web.app.send_static_file('dist/index.html')
@web.app.route('/uploads/<path:filename>')
def uploaded_file(filename):
return send_from_directory(
os.path.join('..', web.app.config['UPLOADS_DEFAULT_DEST']), filename)
class ImageRes(Resource):
def get(self, image_id=None):
if image_id:
return jsonify(orm.get_image(image_id))
images = orm.get_all_images()
return jsonify(images)
def post(self):
image_obj = request.files['image']
filename = web.images.save(image_obj)
url = web.images.url(filename)
image_obj.stream.seek(0)
with Image.open(image_obj.stream) as image:
image_id = orm.save_image(filename, url, image.width, image.height)
return str(image_id)
def delete(self, image_id):
orm.delete_image(image_id)
return '', 204
@web.app.route('/image/<int:image_id>/parse')
def parse_existing(image_id):
image_path = orm.get_image_path(image_id)
references = orm.get_all_reference_names()
with open(image_path, 'rb') as image_stream:
image = Image.open(image_stream)
test = ocr.parse_image(image, references)
result = test._asdict()
result['images'] = [orm.get_image(image_id)]
result['tag'] = orm.get_default_tag().name
return jsonify(result)
@web.app.route('/reference_names')
def reference_names():
return jsonify(orm.get_all_reference_names())
@web.app.route('/ingest_image', methods=['POST'])
def ingest_image():
references = orm.get_all_reference_names()
image_obj = request.files['image']
image = Image.open(image_obj.stream)
test = ocr.parse_image(image, references)
image_obj.stream.seek(0)
filename = web.images.save(image_obj)
url = web.images.url(filename)
image_id = orm.save_image(filename, url, image.width, image.height)
result = test._asdict()
result['images'] = [
{
'id': image_id,
'path': url,
'width': image.width,
'height': image.height
}
]
result['tag'] = orm.get_default_tag().name
return jsonify(result)
class Test(Resource):
def get(self, test_id=None):
if not test_id:
return orm.get_all_tests()
return orm.get_test(test_id)
def post(self):
test_data = request.json
date = test_data['date']
values = test_data['values']
images = test_data['images']
tag = test_data.get('tag')
test_id = orm.save_test(date, values, images, tag)
return test_id
def put(self, test_id):
test_data = request.json
date = test_data['date']
values = test_data['values']
tag = test_data['tag']
orm.replace_test(test_id, date, values, tag)
return '', 204
def patch(self, test_id):
test_data = request.json
values = test_data.get('values', [])
tag = test_data.get('tag')
images = test_data.get('images')
if images:
image_id = images[0]['id']
else:
image_id = None
orm.update_test(test_id, values, tag, image_id)
return '', 204
def delete(self, test_id):
orm.delete_test(test_id)
return '', 204
@web.app.route('/find_test_id')
def find_test():
date = request.args.get('date')
if not date:
return abort(400)
res = orm.find_test_id(date)
if not res:
raise ValueError('test with date {0} not found'.format(date))
return jsonify(res)
@web.app.route('/tag')
def get_tags():
return jsonify(orm.get_tags())
@web.app.route('/stat', methods=['POST'])
def get_statistics():
data = request.json
from_ = data.get('from')
to = data.get('to')
tag = data.get('tag')
lines = list(map(lambda x: x['name'], data['lines']))
if not lines:
raise ValueError('there should be at least one line selected')
return jsonify(orm.generate_statistics(from_, to, tag, lines))
| 26.4625 | 79 | 0.624941 |
69c0d4a2bb75e8b6336fe3ab90dd8bf68d55f8a7 | 281 | py | Python | Beriflapp/4 CTRAONTS/CTRAONTS.py | rootaniumcca/Beriflapp | 036126550942c2825281325f13e5f70642de3566 | [
"MIT"
] | null | null | null | Beriflapp/4 CTRAONTS/CTRAONTS.py | rootaniumcca/Beriflapp | 036126550942c2825281325f13e5f70642de3566 | [
"MIT"
] | null | null | null | Beriflapp/4 CTRAONTS/CTRAONTS.py | rootaniumcca/Beriflapp | 036126550942c2825281325f13e5f70642de3566 | [
"MIT"
] | null | null | null | # Convert the resulting array of numbers to strings.
Beriflapp = 7
# check and print type of num variable
print(type(Beriflapp))
# convert the num into string
converted_num = str(Beriflapp)
# check and print type converted_num variable
print(type(converted_num))
| 20.071429 | 53 | 0.740214 |
eb9f315157040322b833961f21d983773eeb5938 | 2,637 | py | Python | tests/openfl/transport/grpc/test_director_server.py | msheller/openfl | dc6b7b55e66fa69379036e9a412bee41ed46bde5 | [
"Apache-2.0"
] | 1 | 2021-07-14T08:43:55.000Z | 2021-07-14T08:43:55.000Z | tests/openfl/transport/grpc/test_director_server.py | msheller/openfl | dc6b7b55e66fa69379036e9a412bee41ed46bde5 | [
"Apache-2.0"
] | null | null | null | tests/openfl/transport/grpc/test_director_server.py | msheller/openfl | dc6b7b55e66fa69379036e9a412bee41ed46bde5 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Director tests module."""
from pathlib import Path
from unittest import mock
import pytest
from openfl.component.director import Director
from openfl.transport import DirectorGRPCServer
@pytest.fixture
def insecure_director():
"""Initialize an insecure director mock."""
director = DirectorGRPCServer(director_cls=Director, tls=False)
return director
@pytest.fixture
def secure_director():
"""Initialize a secure director mock."""
director = DirectorGRPCServer(
director_cls=Director,
root_certificate='./cert/root_ca.crt',
private_key='./cert/localhost.key',
certificate='./cert/localhost.crt'
)
return director
def test_fill_certs(insecure_director, secure_director):
"""Test that fill_cert fill certificates params correctly."""
assert insecure_director.root_certificate is None
assert insecure_director.private_key is None
assert insecure_director.certificate is None
assert isinstance(secure_director.root_certificate, Path)
assert isinstance(secure_director.private_key, Path)
assert isinstance(secure_director.certificate, Path)
with pytest.raises(Exception):
secure_director._fill_certs('.', '.', None)
with pytest.raises(Exception):
secure_director._fill_certs('.', None, '.')
with pytest.raises(Exception):
secure_director._fill_certs(None, '.', '.')
secure_director._fill_certs('.', '.', '.')
def test_get_caller_tls(insecure_director):
"""Test that get_caller works correctly with TLS."""
insecure_director.tls = True
context = mock.Mock()
client_id = 'client_id'
context.auth_context = mock.Mock(
return_value={'x509_common_name': [client_id.encode('utf-8')]}
)
result = insecure_director.get_caller(context)
assert result == client_id
def test_get_sender_no_tls(insecure_director):
"""Test that get_sender works correctly without TLS."""
context = mock.Mock()
client_id = 'client_id'
context.invocation_metadata.return_value = (('client_id', client_id),)
result = insecure_director.get_caller(context)
assert result == client_id
def test_get_sender_no_tls_no_client_id(insecure_director):
"""Test that get_sender works correctly without TLS and client_id."""
context = mock.Mock()
context.invocation_metadata = mock.Mock()
context.invocation_metadata.return_value = (('key', 'value'),)
default_client_id = '__default__'
result = insecure_director.get_caller(context)
assert result == default_client_id
| 32.9625 | 74 | 0.7281 |
a84a7d3b2450e44e7d7a64d5224f283cbb6646a3 | 5,340 | py | Python | NNServer/src/user_info.py | yinkn/iam_plus | 9b3912147e3a66cf3726ce074d03372d873c3c79 | [
"Apache-2.0"
] | null | null | null | NNServer/src/user_info.py | yinkn/iam_plus | 9b3912147e3a66cf3726ce074d03372d873c3c79 | [
"Apache-2.0"
] | null | null | null | NNServer/src/user_info.py | yinkn/iam_plus | 9b3912147e3a66cf3726ce074d03372d873c3c79 | [
"Apache-2.0"
] | null | null | null |
"""
UserInfo:
| user_name | password | e_mail | register_time| key_value |
| demo1 | **** | demo1@demo.com | 2017-11-30 | key1=value1;key2=value2 |
UserRecord:
|user_name | record_time | typing_record | key_value |
|demo1 | 2017-11-30 07:11:00 | [200,27,102,80,-36,80,579,66,21,61,208,71,-110] | key1=value1;key2=value2 |
CREATE TABLE user_info (user_name text, password text, e_mail text, register_time text, key_value text)
CREATE TABLE user_record (user_name user_name, record_time text, typing_record text, key_value text)
"""
import os
import sqlite3
import logging
import ast
import util
class UserInfo:
"""class to handle user information"""
sqlite_conn = None
def is_connected(self):
if self.sqlite_conn is None:
return False
else:
return True
def init_sqlite(self, data_file):
if os.path.isfile(data_file):
logging.info("Connect sqlite {0}".format(data_file))
self.sqlite_conn = sqlite3.connect(data_file)
else:
#if data_file is not existing, the connect will create one
logging.info("Create sqlite {0}".format(data_file))
self.sqlite_conn = sqlite3.connect(data_file)
cur = self.sqlite_conn.cursor()
sql = "CREATE TABLE user_info (name_id integer primary key, user_name text, password text, e_mail text, register_time text, key_value text)"
cur.execute(sql)
sql = "CREATE TABLE user_record (record_id integer primary key, user_name user_name, record_time text, typing_record text, key_value text)"
cur.execute(sql)
#commit db change
self.sqlite_conn.commit()
def exit_sqlite(self):
if self.sqlite_conn != None:
self.sqlite_conn.close()
self.sqlite_conn = None
def user_register(self, user_name, password, e_mail, key_value={}):
result = False
description = "Unknow reason."
if str(user_name).strip() == "":
logging.info("username is empty.")
result = False
description = "username is empty."
else:
if self.user_is_existing(user_name.strip()) == True:
result = False
description = "User is existing."
logging.debug("[{0}] user is created before registration.")
else:
now_time = util.time_to_str()
kv_str = util.dict_to_str(key_value)
cur = self.sqlite_conn.cursor()
cur.execute("insert into user_info(user_name, password, e_mail, register_time, key_value) values (?, ?, ?, ?, ?)"
, (user_name.strip(), password.strip(), e_mail.strip(), now_time, kv_str))
#commit data change
self.sqlite_conn.commit()
result = True
description = "User is created."
logging.info("[{0}] user is created.".format(user_name))
return result, description
def user_login(self, user_name, password):
cur = self.sqlite_conn.cursor()
cur.execute('select user_name from user_info where user_name=? and password=?', (user_name, password))
user = cur.fetchone()
if user is None:
return False
else:
logging.info("[{0}] user login.".format(user_name))
return True
def user_typing_record(self, user_name, typing, key_value={}):
result = False
description = "Unknow reason."
if self.user_is_existing(user_name.strip()) == True:
now_time = util.time_to_str()
kv_str = util.dict_to_str(key_value)
cur = self.sqlite_conn.cursor()
cur.execute("insert into user_record(user_name, typing_record, record_time, key_value) values (?, ?, ?, ?)"
, (user_name.strip(), typing.strip(), now_time, kv_str))
#commit data change
self.sqlite_conn.commit()
result = True
description = "Data is record."
else:
result = False
description = "User is not existing."
return result, description
def get_typing_record_with_kv(self, user_name, key_value={}, max_record=10):
typing_record_list = []
cur = self.sqlite_conn.cursor()
kv_str = util.dict_to_str(key_value)
for row in cur.execute( 'select typing_record, key_value from user_record where user_name=? ' \
, (user_name, )):
typing_record_list.append(ast.literal_eval(row[0]))
if len(typing_record_list) >= max_record and row[1].find(kv_str)>=0:
return typing_record_list
return typing_record_list
def user_logout(self, user_name):
pass
def user_unregister(self, user_name):
pass
def user_is_existing(self, user_name):
cur = self.sqlite_conn.cursor()
cur.execute('select user_name from user_info where user_name=?', (user_name, ))
user = cur.fetchone()
#logging.debug("user_is_existing: {0}".format(user))
if user is None:
return False
else:
return True
| 37.87234 | 152 | 0.592509 |
cfb5549094d882420b1e369326e142de3864c5ba | 2,814 | py | Python | cookbook/core/flyte_basics/files.py | jinserk/flytesnacks | b4742d1dedecdf8d4d6fc20f854128bc24657e8f | [
"Apache-2.0"
] | 1 | 2021-08-20T17:28:42.000Z | 2021-08-20T17:28:42.000Z | cookbook/core/flyte_basics/files.py | jeevb/flytesnacks | f32f32482088d717b399864c5470ae546ebcba7d | [
"Apache-2.0"
] | null | null | null | cookbook/core/flyte_basics/files.py | jeevb/flytesnacks | f32f32482088d717b399864c5470ae546ebcba7d | [
"Apache-2.0"
] | null | null | null | """
Working With Files
-------------------
Files are one of the most fundamental things that users of Python work with, and they are fully supported by Flyte.
In the IDL, they are known as `Blob <https://github.com/lyft/flyteidl/blob/cee566b2e6e109120f1bb34c980b1cfaf006a473/protos/flyteidl/core/literals.proto#L33>`__ literals
and are backed by the `blob type <https://github.com/lyft/flyteidl/blob/cee566b2e6e109120f1bb34c980b1cfaf006a473/protos/flyteidl/core/types.proto#L47>`__
Note that the type message includes an optional ``format`` field which is a text-field used to denote the file extension.
"""
import os
import urllib.request
import cv2
import flytekit
from flytekit import task, workflow
from flytekit.types.file import FlyteFile
# %%
# Let's assume our mission here is pretty simple. We want to take each of these links, download the picture, rotate it
# and return the file.
default_images = [
"https://upload.wikimedia.org/wikipedia/commons/a/a8/Fractal_pyramid.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/a/ad/Julian_fractal.jpg/256px-Julian_fractal.jpg",
]
# %%
# Note the signature of the return type of this task is a ``FlyteFile``. Files do not have a native object in Python
# so we had to write one ourselves. There does exist the ``os.PathLike`` protocol, but nothing implements it.
#
# When this task finishes, the flytekit engine will detect the ``FlyteFile`` instance being returned, find a location
# in Flyte's object store (usually S3), upload the file to that location, and create a Blob literal pointing to it.
#
# Note that the ``FlyteFile`` literal is scoped with a string, which gets inserted into the format of the Blob type.
# The ``[]`` are entirely optional, and if you don't specify it, the format will just be an ``""``.
@task
def rotate(image_location: str) -> FlyteFile:
"""
Download the given image, rotate it by 180 degrees
"""
working_dir = flytekit.current_context().working_directory
local_image = os.path.join(working_dir, "incoming.jpg")
urllib.request.urlretrieve(image_location, local_image)
img = cv2.imread(local_image, 0)
if img is None:
raise Exception("Failed to read image")
(h, w) = img.shape[:2]
center = (w / 2, h / 2)
mat = cv2.getRotationMatrix2D(center, 180, 1)
res = cv2.warpAffine(img, mat, (w, h))
out_path = os.path.join(working_dir, "rotated.jpg")
cv2.imwrite(out_path, res)
return FlyteFile["jpg"](path=out_path)
@workflow
def rotate_one_workflow(in_image: str) -> FlyteFile:
return rotate(image_location=in_image)
# %%
# Execute it
if __name__ == "__main__":
print(f"Running {__file__} main...")
print(
f"Running rotate_one_workflow(in_image=default_images[0]) {rotate_one_workflow(in_image=default_images[0])}"
)
| 40.2 | 168 | 0.729922 |
c3eff7a877fffa3ad806094faf12001a64c89ba7 | 32,888 | py | Python | io_scene_vrm/importer/py_model.py | saturday06/VRM_IMPORTER_for_Blender | 42562eead251c82cf25f63451de6598b1dfa6140 | [
"MIT"
] | 105 | 2020-08-31T13:03:27.000Z | 2021-02-05T12:33:53.000Z | io_scene_vrm/importer/py_model.py | saturday06/VRM_IMPORTER_for_Blender | 42562eead251c82cf25f63451de6598b1dfa6140 | [
"MIT"
] | 44 | 2020-09-05T20:38:57.000Z | 2021-02-02T13:00:26.000Z | io_scene_vrm/importer/py_model.py | saturday06/VRM_IMPORTER_for_Blender | 42562eead251c82cf25f63451de6598b1dfa6140 | [
"MIT"
] | 12 | 2020-10-02T14:10:31.000Z | 2021-02-01T10:51:52.000Z | """
Copyright (c) 2018 iCyP
Released under the MIT license
https://opensource.org/licenses/mit-license.php
"""
import contextlib
import json
import math
import os
import re
import sys
import tempfile
from collections import OrderedDict
from dataclasses import dataclass, field
from itertools import repeat
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import ParseResult, parse_qsl, urlparse
import bpy
from .. import deep, lang, vrm_types
from ..gl_constants import GlConstants
from .binary_reader import BinaryReader
@dataclass
class PyMesh:
object_id: int
name: str = ""
face_indices: List[List[int]] = field(default_factory=list)
skin_id: Optional[int] = None
material_index: Optional[int] = None
POSITION_accessor: Optional[int] = None
POSITION: Optional[List[List[float]]] = None
JOINTS_0: Optional[List[List[int]]] = None
WEIGHTS_0: Optional[List[List[float]]] = None
NORMAL: Optional[List[List[float]]] = None
vert_normal_normalized: Optional[bool] = None
morph_target_point_list_and_accessor_index_dict: Optional[
Dict[str, List[Any]]
] = None
@dataclass
class PyNode:
name: str
position: Sequence[float]
rotation: Sequence[float]
scale: Sequence[float]
children: Optional[List[int]] = None
blend_bone: Optional[bpy.types.Bone] = None
mesh_id: Optional[int] = None
skin_id: Optional[int] = None
@dataclass
class PyMaterial:
name: str = ""
shader_name: str = ""
class PyMaterialGltf(PyMaterial):
def __init__(self) -> None:
super().__init__()
self.base_color: List[float] = [1, 1, 1, 1]
self.metallic_factor: float = 1
self.roughness_factor: float = 1
self.emissive_factor: List[float] = [0, 0, 0]
self.color_texture_index: Optional[int] = None
self.color_texcoord_index: Optional[int] = None
self.metallic_roughness_texture_index: Optional[int] = None
self.metallic_roughness_texture_texcoord: Optional[int] = None
self.normal_texture_index: Optional[int] = None
self.normal_texture_texcoord_index: Optional[int] = None
self.emissive_texture_index: Optional[int] = None
self.emissive_texture_texcoord_index: Optional[int] = None
self.occlusion_texture_index: Optional[int] = None
self.occlusion_texture_texcoord_index: Optional[int] = None
self.alphaCutoff: Optional[float] = None
self.double_sided = False
self.alpha_mode = "OPAQUE"
self.shadeless = 0 # 0 is shade ,1 is shadeless
self.vrm_addon_for_blender_legacy_gltf_material = False
class PyMaterialTransparentZWrite(PyMaterial):
def __init__(self) -> None:
super().__init__()
self.float_props_dic: Dict[str, Optional[float]] = {
prop: None for prop in vrm_types.MaterialTransparentZWrite.float_props
}
self.vector_props_dic: Dict[str, Optional[List[float]]] = {
prop: None for prop in vrm_types.MaterialTransparentZWrite.vector_props
}
self.texture_index_dic: Dict[str, Optional[int]] = {
tex: None for tex in vrm_types.MaterialTransparentZWrite.texture_index_list
}
class PyMaterialMtoon(PyMaterial):
def __init__(self) -> None:
super().__init__()
self.float_props_dic: Dict[str, Optional[float]] = {
prop: None for prop in vrm_types.MaterialMtoon.float_props_exchange_dic
}
self.vector_props_dic: Dict[str, Optional[Sequence[float]]] = {
prop: None for prop in vrm_types.MaterialMtoon.vector_props_exchange_dic
}
self.texture_index_dic: Dict[str, Optional[int]] = {
prop: None for prop in vrm_types.MaterialMtoon.texture_kind_exchange_dic
}
self.keyword_dic: Dict[str, bool] = {
kw: False for kw in vrm_types.MaterialMtoon.keyword_list
}
self.tag_dic: Dict[str, Optional[str]] = {
tag: None for tag in vrm_types.MaterialMtoon.tagmap_list
}
@dataclass
class ImageProps:
name: str
filepath: str
filetype: str
@dataclass
class PyModel:
filepath: str
extract_textures_into_folder: bool
make_new_texture_folder: bool
license_check: bool
legacy_importer: bool
decoded_binary: List[Any] = field(default_factory=list)
image_properties: List[ImageProps] = field(default_factory=list)
meshes: List[List[PyMesh]] = field(default_factory=list)
materials: List[PyMaterial] = field(default_factory=list)
nodes_dict: Dict[int, PyNode] = field(default_factory=dict)
origin_nodes_dict: Dict[int, List[Any]] = field(default_factory=dict)
skins_joints_list: List[List[int]] = field(default_factory=list)
skins_root_node_list: List[int] = field(default_factory=list)
def __post_init__(self) -> None:
# datachunkは普通一つしかない
with open(self.filepath, "rb") as f:
json_dict, body_binary = parse_glb(f.read())
self.json = json_dict
# KHR_DRACO_MESH_COMPRESSION は対応してない場合落とさないといけないらしい。どのみち壊れたデータになるからね。
if (
"extensionsRequired" in self.json
and "KHR_DRACO_MESH_COMPRESSION" in self.json["extensionsRequired"]
):
raise Exception(
"This VRM uses Draco compression. Unable to decompress. Draco圧縮されたVRMは未対応です"
)
if self.license_check:
validate_license(self)
if self.legacy_importer:
texture_rip(
self,
body_binary,
self.extract_textures_into_folder,
self.make_new_texture_folder,
)
self.decoded_binary = decode_bin(self.json, body_binary)
mesh_read(self)
material_read(self)
skin_read(self)
node_read(self)
else:
material_read(self)
class LicenseConfirmationRequiredProp:
def __init__(
self,
url: Optional[str],
json_key: Optional[str],
message_en: str,
message_ja: str,
) -> None:
self.url = url
self.json_key = json_key
self.message = lang.support(message_en, message_ja)
def description(self) -> str:
return f"""class=LicenseConfirmationRequired
url={self.url}
json_key={self.json_key}
message={self.message}
"""
class LicenseConfirmationRequired(Exception):
def __init__(self, props: List[LicenseConfirmationRequiredProp]) -> None:
self.props = props
super().__init__(self.description())
def description(self) -> str:
return "\n".join([prop.description() for prop in self.props])
def license_confirmations(self) -> List[Dict[str, str]]:
return [
{
"name": "LicenseConfirmation" + str(index),
"url": prop.url or "",
"json_key": prop.json_key or "",
"message": prop.message or "",
}
for index, prop in enumerate(self.props)
]
def parse_glb(data: bytes) -> Tuple[Dict[str, Any], bytes]:
reader = BinaryReader(data)
magic = reader.read_str(4)
if magic != "glTF":
raise Exception("glTF header signature not found: #{}".format(magic))
version = reader.read_as_data_type(GlConstants.UNSIGNED_INT)
if version != 2:
raise Exception(
"version #{} found. This plugin only supports version 2".format(version)
)
size = reader.read_as_data_type(GlConstants.UNSIGNED_INT)
size -= 12
json_str: Optional[str] = None
body: Optional[bytes] = None
while size > 0:
# print(size)
if json_str is not None and body is not None:
raise Exception(
"This VRM has multiple chunks, this plugin reads one chunk only."
)
chunk_size = reader.read_unsigned_int()
size -= 4
chunk_type = reader.read_str(4)
size -= 4
chunk_data = reader.read_binary(chunk_size)
size -= chunk_size
if chunk_type == "BIN\x00":
body = chunk_data
continue
if chunk_type == "JSON":
json_str = chunk_data.decode("utf-8") # blenderのpythonverが古く自前decode要す
continue
raise Exception("unknown chunk_type: {}".format(chunk_type))
if not json_str:
raise Exception("failed to read json chunk")
json_obj = json.loads(json_str, object_pairs_hook=OrderedDict)
if not isinstance(json_obj, dict):
raise Exception("VRM has invalid json: " + str(json_obj))
return json_obj, body if body else bytes()
def create_py_bone(node: Dict[str, Any]) -> PyNode:
v_node = PyNode(
name=node.get("name", "tmp"),
position=node.get("translation", [0, 0, 0]),
rotation=node.get("rotation", (0, 0, 0, 1)),
scale=node.get("scale", (1, 1, 1)),
)
if "children" in node:
children = node["children"]
if isinstance(children, int):
v_node.children = [children]
else:
v_node.children = children
else:
v_node.children = None
if "mesh" in node:
v_node.mesh_id = node["mesh"]
if "skin" in node:
v_node.skin_id = node["skin"]
return v_node
def create_py_material(
mat: Dict[str, Any], ext_mat: Dict[str, Any]
) -> Optional[PyMaterial]:
shader = ext_mat.get("shader")
# standard, or VRM unsupported shader(no saved)
if shader not in ["VRM/MToon", "VRM/UnlitTransparentZWrite"]:
gltf = PyMaterialGltf()
gltf.name = mat.get("name", "")
gltf.shader_name = "gltf"
if "pbrMetallicRoughness" in mat:
pbrmat = mat["pbrMetallicRoughness"]
if "baseColorTexture" in pbrmat and isinstance(
pbrmat["baseColorTexture"], dict
):
texture_index = pbrmat["baseColorTexture"].get("index")
gltf.color_texture_index = texture_index
gltf.color_texcoord_index = pbrmat["baseColorTexture"].get("texCoord")
if "baseColorFactor" in pbrmat:
gltf.base_color = pbrmat["baseColorFactor"]
if "metallicFactor" in pbrmat:
gltf.metallic_factor = pbrmat["metallicFactor"]
if "roughnessFactor" in pbrmat:
gltf.roughness_factor = pbrmat["roughnessFactor"]
if "metallicRoughnessTexture" in pbrmat and isinstance(
pbrmat["metallicRoughnessTexture"], dict
):
texture_index = pbrmat["metallicRoughnessTexture"].get("index")
gltf.metallic_roughness_texture_index = texture_index
gltf.metallic_roughness_texture_texcoord = pbrmat[
"metallicRoughnessTexture"
].get("texCoord")
if "normalTexture" in mat and isinstance(mat["normalTexture"], dict):
gltf.normal_texture_index = mat["normalTexture"].get("index")
gltf.normal_texture_texcoord_index = mat["normalTexture"].get("texCoord")
if "emissiveTexture" in mat and isinstance(mat["emissiveTexture"], dict):
gltf.emissive_texture_index = mat["emissiveTexture"].get("index")
gltf.emissive_texture_texcoord_index = mat["emissiveTexture"].get(
"texCoord"
)
if "occlusionTexture" in mat and isinstance(mat["occlusionTexture"], dict):
gltf.occlusion_texture_index = mat["occlusionTexture"].get("index")
gltf.occlusion_texture_texcoord_index = mat["occlusionTexture"].get(
"texCoord"
)
if "emissiveFactor" in mat:
gltf.emissive_factor = mat["emissiveFactor"]
if "doubleSided" in mat:
gltf.double_sided = mat["doubleSided"]
if "alphaMode" in mat:
if mat["alphaMode"] == "MASK":
gltf.alpha_mode = "MASK"
if mat.get("alphaCutoff"):
gltf.alphaCutoff = mat.get("alphaCutoff")
else:
gltf.alphaCutoff = 0.5
elif mat["alphaMode"] == "BLEND":
gltf.alpha_mode = "Z_TRANSPARENCY"
elif mat["alphaMode"] == "OPAQUE":
gltf.alpha_mode = "OPAQUE"
if "extensions" in mat and "KHR_materials_unlit" in mat["extensions"]:
gltf.shadeless = 1 # 0 is shade ,1 is shadeless
if isinstance(ext_mat.get("extras"), dict) and isinstance(
ext_mat["extras"].get("VRM_Addon_for_Blender_legacy_gltf_material"), dict
):
gltf.vrm_addon_for_blender_legacy_gltf_material = True
return gltf
# "MToon or Transparent_Zwrite"
if shader == "VRM/MToon":
mtoon = PyMaterialMtoon()
mtoon.name = ext_mat.get("name", "")
mtoon.shader_name = ext_mat.get("shader", "")
# region check unknown props exist
subset = {
"float": ext_mat.get("floatProperties", {}).keys()
- mtoon.float_props_dic.keys(),
"vector": ext_mat.get("vectorProperties", {}).keys()
- mtoon.vector_props_dic.keys(),
"texture": ext_mat.get("textureProperties", {}).keys()
- mtoon.texture_index_dic.keys(),
"keyword": ext_mat.get("keywordMap", {}).keys() - mtoon.keyword_dic.keys(),
}
for k, _subset in subset.items():
if _subset:
print(
"unknown {} properties {} in {}".format(
k, _subset, ext_mat.get("name")
)
)
# endregion check unknown props exit
mtoon.float_props_dic.update(ext_mat.get("floatProperties", {}))
mtoon.vector_props_dic.update(ext_mat.get("vectorProperties", {}))
mtoon.texture_index_dic.update(ext_mat.get("textureProperties", {}))
mtoon.keyword_dic.update(ext_mat.get("keywordMap", {}))
mtoon.tag_dic.update(ext_mat.get("tagMap", {}))
return mtoon
if shader == "VRM/UnlitTransparentZWrite":
transparent_z_write = PyMaterialTransparentZWrite()
transparent_z_write.name = ext_mat.get("name", "")
transparent_z_write.shader_name = ext_mat.get("shader", "")
transparent_z_write.float_props_dic = ext_mat.get("floatProperties", {})
transparent_z_write.vector_props_dic = ext_mat.get("vectorProperties", {})
transparent_z_write.texture_index_dic = ext_mat.get("textureProperties", {})
return transparent_z_write
# ここには入らないはず
print(
f"Unknown(or legacy) shader :material {ext_mat['name']} is {ext_mat['shader']}"
)
return None
def validate_license_url(
url_str: str, json_key: str, props: List[LicenseConfirmationRequiredProp]
) -> None:
if not url_str:
return
url = None
with contextlib.suppress(ValueError):
url = urlparse(url_str)
if url:
query_dict = dict(parse_qsl(url.query))
if validate_vroid_hub_license_url(
url, query_dict, json_key, props
) or validate_uni_virtual_license_url(url, query_dict, json_key, props):
return
props.append(
LicenseConfirmationRequiredProp(
url_str,
json_key,
"Is this VRM allowed to edited? Please check its copyright license.",
"独自のライセンスが記載されています。",
)
)
def validate_vroid_hub_license_url(
url: ParseResult,
query_dict: Dict[str, str],
json_key: str,
props: List[LicenseConfirmationRequiredProp],
) -> bool:
# https://hub.vroid.com/en/license?allowed_to_use_user=everyone&characterization_allowed_user=everyone&corporate_commercial_use=allow&credit=unnecessary&modification=allow&personal_commercial_use=profit&redistribution=allow&sexual_expression=allow&version=1&violent_expression=allow
if url.hostname != "hub.vroid.com" or not url.path.endswith("/license"):
return False
if query_dict.get("modification") == "disallow":
props.append(
LicenseConfirmationRequiredProp(
url.geturl(),
json_key,
'This VRM is licensed by VRoid Hub License "Alterations: No".',
"このVRMにはVRoid Hubの「改変: NG」ライセンスが設定されています。",
)
)
return True
def validate_uni_virtual_license_url(
url: ParseResult,
query_dict: Dict[str, str],
json_key: str,
props: List[LicenseConfirmationRequiredProp],
) -> bool:
# https://uv-license.com/en/license?utf8=%E2%9C%93&pcu=true
if url.hostname != "uv-license.com" or not url.path.endswith("/license"):
return False
if query_dict.get("remarks") == "true":
props.append(
LicenseConfirmationRequiredProp(
url.geturl(),
json_key,
'This VRM is licensed by UV License with "Remarks".',
"このVRMには特記事項(Remarks)付きのUVライセンスが設定されています。",
)
)
return True
def validate_license(py_model: PyModel) -> None:
confirmations: List[LicenseConfirmationRequiredProp] = []
# 既知の改変不可ライセンスを撥ねる
# CC_NDなど
license_name = str(
deep.get(py_model.json, ["extensions", "VRM", "meta", "licenseName"], "")
)
if re.match("CC(.*)ND(.*)", license_name):
confirmations.append(
LicenseConfirmationRequiredProp(
None,
None,
'The VRM is licensed by "{license_name}".\nNo derivative works are allowed.',
f"指定されたVRMは改変不可ライセンス「{license_name}」が設定されています。\n改変することはできません。",
)
)
validate_license_url(
str(
deep.get(
py_model.json, ["extensions", "VRM", "meta", "otherPermissionUrl"], ""
)
),
"otherPermissionUrl",
confirmations,
)
if license_name == "Other":
other_license_url_str = str(
deep.get(
py_model.json, ["extensions", "VRM", "meta", "otherLicenseUrl"], ""
)
)
if not other_license_url_str:
confirmations.append(
LicenseConfirmationRequiredProp(
None,
None,
'The VRM selects "Other" license but no license url is found.',
"このVRMには「Other」ライセンスが指定されていますが、URLが設定されていません。",
)
)
else:
validate_license_url(
other_license_url_str, "otherLicenseUrl", confirmations
)
if confirmations:
raise LicenseConfirmationRequired(confirmations)
def remove_unsafe_path_chars(filename: str) -> str:
unsafe_chars = {
0: "\x00",
1: "\x01",
2: "\x02",
3: "\x03",
4: "\x04",
5: "\x05",
6: "\x06",
7: "\x07",
8: "\x08",
9: "\t",
10: "\n",
11: "\x0b",
12: "\x0c",
13: "\r",
14: "\x0e",
15: "\x0f",
16: "\x10",
17: "\x11",
18: "\x12",
19: "\x13",
20: "\x14",
21: "\x15",
22: "\x16",
23: "\x17",
24: "\x18",
25: "\x19",
26: "\x1a",
27: "\x1b",
28: "\x1c",
29: "\x1d",
30: "\x1e",
31: "\x1f",
34: '"',
42: "*",
47: "/",
58: ":",
60: "<",
62: ">",
63: "?",
92: "\\",
124: "|",
} # 32:space #33:!
remove_table = str.maketrans(
"".join([chr(charnum) for charnum in unsafe_chars]),
"".join(repeat("_", len(unsafe_chars))),
)
safe_filename = filename.translate(remove_table)
return safe_filename
def texture_rip(
py_model: PyModel,
body_binary: bytes,
extract_textures_into_folder: bool,
make_new_texture_folder: bool,
) -> None:
buffer_views = py_model.json["bufferViews"]
binary_reader = BinaryReader(body_binary)
if "images" not in py_model.json:
return
if extract_textures_into_folder:
dir_path = os.path.abspath(py_model.filepath) + ".textures"
if make_new_texture_folder:
for i in range(100001):
checking_dir_path = dir_path if i == 0 else f"{dir_path}.{i}"
if not os.path.exists(checking_dir_path):
os.mkdir(checking_dir_path)
dir_path = checking_dir_path
break
else:
dir_path = tempfile.mkdtemp() # TODO: cleanup
for image_id, image_prop in enumerate(py_model.json["images"]):
if "extra" in image_prop:
image_name = image_prop["extra"]["name"]
else:
image_name = image_prop["name"]
binary_reader.set_pos(buffer_views[image_prop["bufferView"]]["byteOffset"])
image_binary = binary_reader.read_binary(
buffer_views[image_prop["bufferView"]]["byteLength"]
)
image_type = image_prop["mimeType"].split("/")[-1]
if image_name == "":
image_name = "texture_" + str(image_id)
print("no name image is named {}".format(image_name))
elif len(image_name) >= 50:
print(
"too long name image: {} is named {}".format(
image_name, "tex_2longname_" + str(image_id)
)
)
image_name = "tex_2longname_" + str(image_id)
image_name = remove_unsafe_path_chars(image_name)
image_path = os.path.join(dir_path, image_name)
if os.path.splitext(image_name)[1].lower() != ("." + image_type).lower():
image_path += "." + image_type
if not os.path.exists(image_path): # すでに同名の画像がある場合は基本上書きしない
with open(image_path, "wb") as image_writer:
image_writer.write(image_binary)
elif image_name in [
img.name for img in py_model.image_properties
]: # ただ、それがこのVRMを開いた時の名前の時はちょっと考えて書いてみる。
written_flag = False
for i in range(100000):
second_image_name = image_name + "_" + str(i)
image_path = os.path.join(
dir_path, second_image_name + "." + image_type
)
if not os.path.exists(image_path):
with open(image_path, "wb") as image_writer:
image_writer.write(image_binary)
image_name = second_image_name
written_flag = True
break
if not written_flag:
print(
"There are more than 100000 images with the same name in the folder."
+ f" Failed to write file: {image_name}"
)
else:
print(image_name + " Image already exists. Was not overwritten.")
image_property = ImageProps(image_name, image_path, image_type)
py_model.image_properties.append(image_property)
# "accessorの順に" データを読み込んでリストにしたものを返す
def decode_bin(json_data: Dict[str, Any], binary: bytes) -> List[Any]:
br = BinaryReader(binary)
# This list indexed by accessor index
decoded_binary: List[Any] = []
buffer_views = json_data["bufferViews"]
accessors = json_data["accessors"]
type_num_dict = {"SCALAR": 1, "VEC2": 2, "VEC3": 3, "VEC4": 4, "MAT4": 16}
for accessor_index, accessor in enumerate(accessors):
type_num = type_num_dict[accessor["type"]]
if "bufferView" not in accessor:
print(
f"WARNING: accessors[{accessor_index}] doesn't have bufferView that is not implemented yet"
)
decoded_binary.append([])
continue
br.set_pos(buffer_views[accessor["bufferView"]]["byteOffset"])
data_list = []
for _ in range(accessor["count"]):
if type_num == 1:
data = br.read_as_data_type(accessor["componentType"])
else:
data = [] # type: ignore[assignment]
for _ in range(type_num):
data.append(br.read_as_data_type(accessor["componentType"])) # type: ignore[union-attr]
data_list.append(data)
decoded_binary.append(data_list)
return decoded_binary
def mesh_read(py_model: PyModel) -> None:
# メッシュをパースする
for n, mesh in enumerate(py_model.json.get("meshes", [])):
primitives = []
for j, primitive in enumerate(mesh.get("primitives", [])):
vrm_mesh = PyMesh(object_id=n)
if j == 0: # mesh annotationとの兼ね合い
vrm_mesh.name = mesh["name"]
else:
vrm_mesh.name = mesh["name"] + str(j)
# region 頂点index
if primitive.get("mode", 4) != GlConstants.TRIANGLES:
# TODO その他メッシュタイプ対応
raise Exception(
"Unsupported polygon type(:{}) Exception".format(primitive["mode"])
)
scalar_face_indices = py_model.decoded_binary[primitive["indices"]]
while len(scalar_face_indices) % 3 != 0:
print(f"meshes[{n}]primitives[{j}] length is not a multiple of 3")
scalar_face_indices.append(0)
# 3要素ずつに変換しておく(GlConstants.TRIANGLES前提なので)
vrm_mesh.face_indices = [
scalar_face_indices[x : x + 3]
for x in range(0, len(scalar_face_indices), 3)
]
# endregion 頂点index
# ここから頂点属性
vertex_attributes = primitive.get("attributes", {})
# 頂点属性は実装によっては存在しない属性(例えばJOINTSやWEIGHTSがなかったりもする)もあるし、UVや頂点カラー0->Nで増やせる(スキニングは1要素(ボーン4本)限定
for attr in vertex_attributes.keys():
vrm_mesh.__setattr__(
attr, py_model.decoded_binary[vertex_attributes[attr]]
)
# region TEXCOORD_FIX [ 古いUniVRM誤り: uv.y = -uv.y ->修復 uv.y = 1 - ( -uv.y ) => uv.y=1+uv.y]
legacy_uv_flag = False # f***
gen = str(deep.get(py_model.json, ["assets", "generator"], ""))
if re.match("UniGLTF", gen):
with contextlib.suppress(ValueError):
if float("".join(gen[-4:])) < 1.16:
legacy_uv_flag = True
uv_count = 0
while True:
texcoord_name = "TEXCOORD_{}".format(uv_count)
if hasattr(vrm_mesh, texcoord_name):
texcoord = getattr(vrm_mesh, texcoord_name)
if legacy_uv_flag:
for uv in texcoord:
uv[1] = 1 + uv[1]
uv_count += 1
else:
break
# blenderとは上下反対のuv,それはblenderに書き込むときに直す
# endregion TEXCOORD_FIX
# meshに当てられるマテリアルの場所を記録
vrm_mesh.material_index = primitive["material"]
# 変換時のキャッシュ対応のためのデータ
vrm_mesh.POSITION_accessor = primitive.get("attributes", {}).get("POSITION")
# ここからモーフターゲット vrmのtargetは相対位置 normalは無視する
if "targets" in primitive:
morph_target_point_list_and_accessor_index_dict = OrderedDict()
for i, morph_target in enumerate(primitive["targets"]):
pos_array = py_model.decoded_binary[morph_target["POSITION"]]
if "extra" in morph_target: # for old AliciaSolid
# accessorのindexを持つのは変換時のキャッシュ対応のため
morph_name = str(primitive["targets"][i]["extra"]["name"])
else:
morph_name = str(primitive["extras"]["targetNames"][i])
# 同上
morph_target_point_list_and_accessor_index_dict[morph_name] = [
pos_array,
primitive["targets"][i]["POSITION"],
]
vrm_mesh.morph_target_point_list_and_accessor_index_dict = (
morph_target_point_list_and_accessor_index_dict
)
primitives.append(vrm_mesh)
py_model.meshes.append(primitives)
# ここからマテリアル
def material_read(py_model: PyModel) -> None:
json_materials = py_model.json.get("materials", [])
vrm_extension_material_properties = deep.get(
py_model.json,
["extensions", "VRM", "materialProperties"],
default=[{"shader": "VRM_USE_GLTFSHADER"}] * len(json_materials),
)
if not isinstance(vrm_extension_material_properties, list):
return
for mat, ext_mat in zip(json_materials, vrm_extension_material_properties):
material = create_py_material(mat, ext_mat)
if material is not None:
py_model.materials.append(material)
# skinをパース ->バイナリの中身はskinning実装の横着用
# skinのjointsの(nodesの)indexをvertsのjoints_0は指定してる
# inverseBindMatrices: 単にスキニングするときの逆行列。読み込み不要なのでしない(自前計算もできる、めんどいけど)
# ついでに[i][3]ではなく、[3][i]にマイナスx,y,zが入っている。 ここで詰まった。(出力時に)
# joints:JOINTS_0の指定node番号のindex
def skin_read(py_model: PyModel) -> None:
for skin in py_model.json.get("skins", []):
py_model.skins_joints_list.append(skin["joints"])
if "skeleton" in skin:
py_model.skins_root_node_list.append(skin["skeleton"])
# node(ボーン)をパースする->親からの相対位置で記録されている
def node_read(py_model: PyModel) -> None:
for i, node in enumerate(py_model.json["nodes"]):
py_model.nodes_dict[i] = create_py_bone(node)
# TODO こっからorigin_bone
if "mesh" in node:
py_model.origin_nodes_dict[i] = [py_model.nodes_dict[i], node["mesh"]]
if "skin" in node:
py_model.origin_nodes_dict[i].append(node["skin"])
else:
print(node["name"] + "is not have skin")
def create_vrm_dict(data: bytes) -> Dict[str, Any]:
vrm_json, binary_chunk = parse_glb(data)
vrm_json["~accessors_decoded"] = decode_bin(vrm_json, binary_chunk)
return vrm_json
def vrm_dict_diff(
left: Any, right: Any, path: str, float_tolerance: float
) -> List[str]:
if isinstance(left, list):
if not isinstance(right, list):
return [f"{path}: left is list but right is {type(right)}"]
if len(left) != len(right):
return [
f"{path}: left length is {len(left)} but right length is {len(right)}"
]
diffs = []
for i, _ in enumerate(left):
diffs.extend(
vrm_dict_diff(left[i], right[i], f"{path}[{i}]", float_tolerance)
)
return diffs
if isinstance(left, dict):
if not isinstance(right, dict):
return [f"{path}: left is dict but right is {type(right)}"]
diffs = []
for key in sorted(set(list(left.keys()) + list(right.keys()))):
if key not in left:
diffs.append(f"{path}: {key} not in left")
continue
if key not in right:
diffs.append(f"{path}: {key} not in right")
continue
diffs.extend(
vrm_dict_diff(
left[key], right[key], f'{path}["{key}"]', float_tolerance
)
)
return diffs
if isinstance(left, bool):
if not isinstance(right, bool):
return [f"{path}: left is bool but right is {type(right)}"]
if left != right:
return [f"{path}: left is {left} but right is {right}"]
return []
if isinstance(left, str):
if not isinstance(right, str):
return [f"{path}: left is str but right is {type(right)}"]
if left != right:
return [f'{path}: left is "{left}" but right is "{right}"']
return []
if left is None and right is not None:
return [f"{path}: left is None but right is {type(right)}"]
if isinstance(left, int) and isinstance(right, int):
if left != right:
return [f"{path}: left is {left} but right is {right}"]
return []
if isinstance(left, (int, float)) and isinstance(right, (int, float)):
error = math.fabs(float(left) - float(right))
if error > float_tolerance:
return [
f"{path}: left is {float(left):20.17f} but right is {float(right):20.17f}, error={error:19.17f}"
]
return []
raise Exception(f"{path}: unexpected type left={type(left)} right={type(right)}")
def vrm_diff(before: bytes, after: bytes, float_tolerance: float) -> List[str]:
return vrm_dict_diff(
create_vrm_dict(before), create_vrm_dict(after), "", float_tolerance
)
if __name__ == "__main__":
PyModel(
sys.argv[1],
extract_textures_into_folder=True,
make_new_texture_folder=True,
license_check=True,
legacy_importer=True,
)
| 35.982495 | 286 | 0.589972 |
df75012898b5d451a2b416ab0eabbe0b9f6a16e6 | 18,391 | py | Python | test/functional/interface_zmq_ttm.py | mytitanium/Titanium-Core-1.0 | 470e6a0a23de1ea867d693e362d1a0f6ccc12aa7 | [
"MIT"
] | 2 | 2020-12-01T17:15:50.000Z | 2020-12-11T13:29:54.000Z | test/functional/interface_zmq_ttm.py | mytitanium/Titanium-Core-1.0 | 470e6a0a23de1ea867d693e362d1a0f6ccc12aa7 | [
"MIT"
] | 1 | 2020-07-27T10:54:07.000Z | 2020-08-28T05:37:26.000Z | test/functional/interface_zmq_ttm.py | mytitanium/Titanium-Core-1.0 | 470e6a0a23de1ea867d693e362d1a0f6ccc12aa7 | [
"MIT"
] | 2 | 2020-11-09T16:38:04.000Z | 2021-04-02T05:27:36.000Z | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Ttm Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ttm specific ZMQ notification interfaces."""
import configparser
from enum import Enum
import io
import json
import random
import struct
import time
try:
import zmq
finally:
pass
from test_framework.test_framework import (
TtmTestFramework, skip_if_no_bitcoind_zmq, skip_if_no_py3_zmq)
from test_framework.mininode import P2PInterface, network_thread_start
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str
from test_framework.messages import (
CBlock,
CGovernanceObject,
CGovernanceVote,
CInv,
COutPoint,
CRecoveredSig,
CTransaction,
FromHex,
hash256,
msg_clsig,
msg_inv,
msg_islock,
msg_tx,
ser_string,
uint256_from_str,
uint256_to_string
)
class ZMQPublisher(Enum):
hash_chain_lock = "hashchainlock"
hash_tx_lock = "hashtxlock"
hash_governance_vote = "hashgovernancevote"
hash_governance_object = "hashgovernanceobject"
hash_instantsend_doublespend = "hashinstantsenddoublespend"
hash_recovered_sig = "hashrecoveredsig"
raw_chain_lock = "rawchainlock"
raw_chain_lock_sig = "rawchainlocksig"
raw_tx_lock = "rawtxlock"
raw_tx_lock_sig = "rawtxlocksig"
raw_governance_vote = "rawgovernancevote"
raw_governance_object = "rawgovernanceobject"
raw_instantsend_doublespend = "rawinstantsenddoublespend"
raw_recovered_sig = "rawrecoveredsig"
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.islocks = {}
self.txes = {}
def send_islock(self, islock):
hash = uint256_from_str(hash256(islock.serialize()))
self.islocks[hash] = islock
inv = msg_inv([CInv(30, hash)])
self.send_message(inv)
def send_tx(self, tx):
hash = uint256_from_str(hash256(tx.serialize()))
self.txes[hash] = tx
inv = msg_inv([CInv(30, hash)])
self.send_message(inv)
def on_getdata(self, message):
for inv in message.inv:
if inv.hash in self.islocks:
self.send_message(self.islocks[inv.hash])
if inv.hash in self.txes:
self.send_message(self.txes[inv.hash])
class TtmZMQTest (TtmTestFramework):
def set_test_params(self):
# That's where the zmq publisher will listen for subscriber
self.address = "tcp://127.0.0.1:28333"
# node0 creates all available ZMQ publisher
node0_extra_args = ["-zmqpub%s=%s" % (pub.value, self.address) for pub in ZMQPublisher]
node0_extra_args.append("-whitelist=127.0.0.1")
self.set_ttm_test_params(4, 3, fast_dip3_enforcement=True, extra_args=[node0_extra_args, [], [], []])
def run_test(self):
# Check that ttmd has been built with ZMQ enabled.
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
skip_if_no_py3_zmq()
skip_if_no_bitcoind_zmq(self)
try:
# Setup the ZMQ subscriber socket
self.zmq_context = zmq.Context()
self.socket = self.zmq_context.socket(zmq.SUB)
self.socket.connect(self.address)
# Initialize the network
self.activate_dip8()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# Create an LLMQ for testing
self.quorum_type = 100 # llmq_test
self.quorum_hash = self.mine_quorum()
self.sync_blocks()
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
# Wait a moment to avoid subscribing to recovered sig in the test before the one from the chainlock
# has been sent which leads to test failure.
time.sleep(1)
# Test all ttm related ZMQ publisher
self.test_recovered_signature_publishers()
self.test_chainlock_publishers()
self.test_instantsend_publishers()
self.test_governance_publishers()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.zmq_context.destroy(linger=None)
def subscribe(self, publishers):
# Subscribe to a list of ZMQPublishers
for pub in publishers:
self.socket.subscribe(pub.value)
def unsubscribe(self, publishers):
# Unsubscribe from a list of ZMQPublishers
for pub in publishers:
self.socket.unsubscribe(pub.value)
def receive(self, publisher, flags=0):
# Receive a ZMQ message and validate it's sent from the correct ZMQPublisher
topic, body, seq = self.socket.recv_multipart(flags)
# Topic should match the publisher value
assert_equal(topic.decode(), publisher.value)
return io.BytesIO(body)
def test_recovered_signature_publishers(self):
def validate_recovered_sig(request_id, msg_hash):
# Make sure the recovered sig exists by RPC
rpc_recovered_sig = self.get_recovered_sig(request_id, msg_hash)
# Validate hashrecoveredsig
zmq_recovered_sig_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_recovered_sig).read(32))
assert_equal(zmq_recovered_sig_hash, msg_hash)
# Validate rawrecoveredsig
zmq_recovered_sig_raw = CRecoveredSig()
zmq_recovered_sig_raw.deserialize(self.receive(ZMQPublisher.raw_recovered_sig))
assert_equal(zmq_recovered_sig_raw.llmqType, rpc_recovered_sig['llmqType'])
assert_equal(uint256_to_string(zmq_recovered_sig_raw.quorumHash), rpc_recovered_sig['quorumHash'])
assert_equal(uint256_to_string(zmq_recovered_sig_raw.id), rpc_recovered_sig['id'])
assert_equal(uint256_to_string(zmq_recovered_sig_raw.msgHash), rpc_recovered_sig['msgHash'])
assert_equal(bytes_to_hex_str(zmq_recovered_sig_raw.sig), rpc_recovered_sig['sig'])
recovered_sig_publishers = [
ZMQPublisher.hash_recovered_sig,
ZMQPublisher.raw_recovered_sig
]
self.log.info("Testing %d recovered signature publishers" % len(recovered_sig_publishers))
# Subscribe to recovered signature messages
self.subscribe(recovered_sig_publishers)
# Generate a ChainLock and make sure this leads to valid recovered sig ZMQ messages
rpc_last_block_hash = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(rpc_last_block_hash)
height = self.nodes[0].getblockcount()
rpc_request_id = hash256(ser_string(b"clsig") + struct.pack("<I", height))[::-1].hex()
validate_recovered_sig(rpc_request_id, rpc_last_block_hash)
# Sign an arbitrary and make sure this leads to valid recovered sig ZMQ messages
sign_id = uint256_to_string(random.getrandbits(256))
sign_msg_hash = uint256_to_string(random.getrandbits(256))
for mn in self.get_quorum_masternodes(self.quorum_hash):
mn.node.quorum("sign", self.quorum_type, sign_id, sign_msg_hash)
validate_recovered_sig(sign_id, sign_msg_hash)
# Unsubscribe from recovered signature messages
self.unsubscribe(recovered_sig_publishers)
def test_chainlock_publishers(self):
chain_lock_publishers = [
ZMQPublisher.hash_chain_lock,
ZMQPublisher.raw_chain_lock,
ZMQPublisher.raw_chain_lock_sig
]
self.log.info("Testing %d ChainLock publishers" % len(chain_lock_publishers))
# Subscribe to ChainLock messages
self.subscribe(chain_lock_publishers)
# Generate ChainLock
generated_hash = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(generated_hash)
rpc_best_chain_lock = self.nodes[0].getbestchainlock()
rpc_best_chain_lock_hash = rpc_best_chain_lock["blockhash"]
rpc_best_chain_lock_sig = rpc_best_chain_lock["signature"]
assert_equal(generated_hash, rpc_best_chain_lock_hash)
rpc_chain_locked_block = self.nodes[0].getblock(rpc_best_chain_lock_hash)
rpc_chain_lock_height = rpc_chain_locked_block["height"]
rpc_chain_lock_hash = rpc_chain_locked_block["hash"]
assert_equal(generated_hash, rpc_chain_lock_hash)
# Validate hashchainlock
zmq_chain_lock_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_chain_lock).read(32))
assert_equal(zmq_chain_lock_hash, rpc_best_chain_lock_hash)
# Validate rawchainlock
zmq_chain_locked_block = CBlock()
zmq_chain_locked_block.deserialize(self.receive(ZMQPublisher.raw_chain_lock))
assert(zmq_chain_locked_block.is_valid())
assert_equal(zmq_chain_locked_block.hash, rpc_chain_lock_hash)
# Validate rawchainlocksig
zmq_chain_lock_sig_stream = self.receive(ZMQPublisher.raw_chain_lock_sig)
zmq_chain_locked_block = CBlock()
zmq_chain_locked_block.deserialize(zmq_chain_lock_sig_stream)
assert(zmq_chain_locked_block.is_valid())
zmq_chain_lock = msg_clsig()
zmq_chain_lock.deserialize(zmq_chain_lock_sig_stream)
assert_equal(zmq_chain_lock.height, rpc_chain_lock_height)
assert_equal(uint256_to_string(zmq_chain_lock.blockHash), rpc_chain_lock_hash)
assert_equal(zmq_chain_locked_block.hash, rpc_chain_lock_hash)
assert_equal(bytes_to_hex_str(zmq_chain_lock.sig), rpc_best_chain_lock_sig)
# Unsubscribe from ChainLock messages
self.unsubscribe(chain_lock_publishers)
def test_instantsend_publishers(self):
instantsend_publishers = [
ZMQPublisher.hash_tx_lock,
ZMQPublisher.raw_tx_lock,
ZMQPublisher.raw_tx_lock_sig,
ZMQPublisher.hash_instantsend_doublespend,
ZMQPublisher.raw_instantsend_doublespend
]
self.log.info("Testing %d InstantSend publishers" % len(instantsend_publishers))
# Subscribe to InstantSend messages
self.subscribe(instantsend_publishers)
# Initialize test node
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
# Make sure all nodes agree
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
# Create two raw TXs, they will conflict with each other
rpc_raw_tx_1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)
rpc_raw_tx_2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)
# Send the first transaction and wait for the InstantLock
rpc_raw_tx_1_hash = self.nodes[0].sendrawtransaction(rpc_raw_tx_1['hex'])
self.wait_for_instantlock(rpc_raw_tx_1_hash, self.nodes[0])
# Validate hashtxlock
zmq_tx_lock_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_tx_lock).read(32))
assert_equal(zmq_tx_lock_hash, rpc_raw_tx_1['txid'])
# Validate rawtxlock
zmq_tx_lock_raw = CTransaction()
zmq_tx_lock_raw.deserialize(self.receive(ZMQPublisher.raw_tx_lock))
assert(zmq_tx_lock_raw.is_valid())
assert_equal(zmq_tx_lock_raw.hash, rpc_raw_tx_1['txid'])
# Validate rawtxlocksig
zmq_tx_lock_sig_stream = self.receive(ZMQPublisher.raw_tx_lock_sig)
zmq_tx_lock_tx = CTransaction()
zmq_tx_lock_tx.deserialize(zmq_tx_lock_sig_stream)
assert(zmq_tx_lock_tx.is_valid())
assert_equal(zmq_tx_lock_tx.hash, rpc_raw_tx_1['txid'])
zmq_tx_lock = msg_islock()
zmq_tx_lock.deserialize(zmq_tx_lock_sig_stream)
assert_equal(uint256_to_string(zmq_tx_lock.txid), rpc_raw_tx_1['txid'])
# Try to send the second transaction. This must throw an RPC error because it conflicts with rpc_raw_tx_1
# which already got the InstantSend lock.
assert_raises_rpc_error(-26, "tx-txlock-conflict", self.nodes[0].sendrawtransaction, rpc_raw_tx_2['hex'])
# Validate hashinstantsenddoublespend
zmq_double_spend_hash2 = bytes_to_hex_str(self.receive(ZMQPublisher.hash_instantsend_doublespend).read(32))
zmq_double_spend_hash1 = bytes_to_hex_str(self.receive(ZMQPublisher.hash_instantsend_doublespend).read(32))
assert_equal(zmq_double_spend_hash2, rpc_raw_tx_2['txid'])
assert_equal(zmq_double_spend_hash1, rpc_raw_tx_1['txid'])
# Validate rawinstantsenddoublespend
zmq_double_spend_tx_2 = CTransaction()
zmq_double_spend_tx_2.deserialize(self.receive(ZMQPublisher.raw_instantsend_doublespend))
assert (zmq_double_spend_tx_2.is_valid())
assert_equal(zmq_double_spend_tx_2.hash, rpc_raw_tx_2['txid'])
zmq_double_spend_tx_1 = CTransaction()
zmq_double_spend_tx_1.deserialize(self.receive(ZMQPublisher.raw_instantsend_doublespend))
assert(zmq_double_spend_tx_1.is_valid())
assert_equal(zmq_double_spend_tx_1.hash, rpc_raw_tx_1['txid'])
# No islock notifications when tx is not received yet
self.nodes[0].generate(1)
rpc_raw_tx_3 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)
islock = self.create_islock(rpc_raw_tx_3['hex'])
self.test_node.send_islock(islock)
# Validate NO hashtxlock
time.sleep(1)
try:
self.receive(ZMQPublisher.hash_tx_lock, zmq.NOBLOCK)
assert(False)
except zmq.ZMQError:
# this is expected
pass
# Now send the tx itself
self.test_node.send_tx(FromHex(msg_tx(), rpc_raw_tx_3['hex']))
self.wait_for_instantlock(rpc_raw_tx_3['txid'], self.nodes[0])
# Validate hashtxlock
zmq_tx_lock_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_tx_lock).read(32))
assert_equal(zmq_tx_lock_hash, rpc_raw_tx_3['txid'])
# Drop test node connection
self.nodes[0].disconnect_p2ps()
# Unsubscribe from InstantSend messages
self.unsubscribe(instantsend_publishers)
def test_governance_publishers(self):
governance_publishers = [
ZMQPublisher.hash_governance_object,
ZMQPublisher.raw_governance_object,
ZMQPublisher.hash_governance_vote,
ZMQPublisher.raw_governance_vote
]
self.log.info("Testing %d governance publishers" % len(governance_publishers))
# Subscribe to governance messages
self.subscribe(governance_publishers)
# Create a proposal and submit it to the network
proposal_rev = 1
proposal_time = int(time.time())
proposal_data = {
"type": 1, # GOVERNANCE_OBJECT_PROPOSAL
"name": "Test",
"start_epoch": proposal_time,
"end_epoch": proposal_time + 60,
"payment_amount": 5,
"payment_address": self.nodes[0].getnewaddress(),
"url": "https://titanium-blocks.org"
}
proposal_hex = ''.join(format(x, '02x') for x in json.dumps(proposal_data).encode())
collateral = self.nodes[0].gobject("prepare", "0", proposal_rev, proposal_time, proposal_hex)
self.wait_for_instantlock(collateral, self.nodes[0])
self.nodes[0].generate(6)
self.sync_blocks()
rpc_proposal_hash = self.nodes[0].gobject("submit", "0", proposal_rev, proposal_time, proposal_hex, collateral)
# Validate hashgovernanceobject
zmq_governance_object_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_governance_object).read(32))
assert_equal(zmq_governance_object_hash, rpc_proposal_hash)
zmq_governance_object_raw = CGovernanceObject()
zmq_governance_object_raw.deserialize(self.receive(ZMQPublisher.raw_governance_object))
assert_equal(zmq_governance_object_raw.nHashParent, 0)
assert_equal(zmq_governance_object_raw.nRevision, proposal_rev)
assert_equal(zmq_governance_object_raw.nTime, proposal_time)
assert_equal(json.loads(zmq_governance_object_raw.vchData.decode()), proposal_data)
assert_equal(zmq_governance_object_raw.nObjectType, proposal_data["type"])
assert_equal(zmq_governance_object_raw.masternodeOutpoint.hash, COutPoint().hash)
assert_equal(zmq_governance_object_raw.masternodeOutpoint.n, COutPoint().n)
# Vote for the proposal and validate the governance vote message
map_vote_outcomes = {
0: "none",
1: "yes",
2: "no",
3: "abstain"
}
map_vote_signals = {
0: "none",
1: "funding",
2: "valid",
3: "delete",
4: "endorsed"
}
self.nodes[0].gobject("vote-many", rpc_proposal_hash, map_vote_signals[1], map_vote_outcomes[1])
rpc_proposal_votes = self.nodes[0].gobject('getcurrentvotes', rpc_proposal_hash)
# Validate hashgovernancevote
zmq_governance_vote_hash = bytes_to_hex_str(self.receive(ZMQPublisher.hash_governance_vote).read(32))
assert(zmq_governance_vote_hash in rpc_proposal_votes)
# Validate rawgovernancevote
zmq_governance_vote_raw = CGovernanceVote()
zmq_governance_vote_raw.deserialize(self.receive(ZMQPublisher.raw_governance_vote))
assert_equal(uint256_to_string(zmq_governance_vote_raw.nParentHash), rpc_proposal_hash)
rpc_vote_parts = rpc_proposal_votes[zmq_governance_vote_hash].split(':')
rpc_outpoint_parts = rpc_vote_parts[0].split('-')
assert_equal(uint256_to_string(zmq_governance_vote_raw.masternodeOutpoint.hash), rpc_outpoint_parts[0])
assert_equal(zmq_governance_vote_raw.masternodeOutpoint.n, int(rpc_outpoint_parts[1]))
assert_equal(zmq_governance_vote_raw.nTime, int(rpc_vote_parts[1]))
assert_equal(map_vote_outcomes[zmq_governance_vote_raw.nVoteOutcome], rpc_vote_parts[2])
assert_equal(map_vote_signals[zmq_governance_vote_raw.nVoteSignal], rpc_vote_parts[3])
# Unsubscribe from governance messages
self.unsubscribe(governance_publishers)
if __name__ == '__main__':
TtmZMQTest().main()
| 47.399485 | 119 | 0.701756 |
410d08f1420b85c8ea2550903f474849b6901daf | 2,760 | py | Python | tests/data/test_agrawal_generator.py | Darkmyter/scikit-multiflow | e455cb9023bda8c2a5a4ec7917c9b3c7ab8586a9 | [
"BSD-3-Clause"
] | null | null | null | tests/data/test_agrawal_generator.py | Darkmyter/scikit-multiflow | e455cb9023bda8c2a5a4ec7917c9b3c7ab8586a9 | [
"BSD-3-Clause"
] | null | null | null | tests/data/test_agrawal_generator.py | Darkmyter/scikit-multiflow | e455cb9023bda8c2a5a4ec7917c9b3c7ab8586a9 | [
"BSD-3-Clause"
] | null | null | null | import os
import numpy as np
from skmultiflow.data.agrawal_generator import AGRAWALGenerator
def test_agrawal_generator(test_path):
stream = AGRAWALGenerator(classification_function=2, random_state=112, balance_classes=False, perturbation=0.28)
stream.prepare_for_use()
assert stream.n_remaining_samples() == -1
expected_names = ["salary", "commission", "age", "elevel", "car", "zipcode", "hvalue", "hyears", "loan"]
assert stream.feature_names == expected_names
expected_targets = [0, 1]
assert stream.target_values == expected_targets
assert stream.target_names == ['target']
assert stream.n_features == 9
assert stream.n_cat_features == 3
assert stream.n_num_features == 6
assert stream.n_targets == 1
assert stream.get_data_info() == 'AGRAWAL Generator - 1 target(s), 2 classes, 9 features'
assert stream.has_more_samples() is True
assert stream.is_restartable() is True
# Load test data corresponding to first 10 instances
test_file = os.path.join(test_path, 'agrawal_stream.npz')
data = np.load(test_file)
X_expected = data['X']
y_expected = data['y']
X, y = stream.next_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
X, y = stream.last_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
stream.restart()
X, y = stream.next_sample(10)
assert np.alltrue(X == X_expected)
assert np.alltrue(y == y_expected)
assert stream.n_targets == np.array(y).ndim
assert stream.n_features == X.shape[1]
def test_agrawal_generator_all_functions(test_path):
for f in range(10):
stream = AGRAWALGenerator(classification_function=f, random_state=1)
stream.prepare_for_use()
# Load test data corresponding to first 10 instances
test_file = os.path.join(test_path, 'agrawal_stream_{}.npz'.format(f))
data = np.load(test_file)
X_expected = data['X']
y_expected = data['y']
X, y = stream.next_sample(10)
assert np.alltrue(X == X_expected)
assert np.alltrue(y == y_expected)
def test_agrawal_drift(test_path):
stream = AGRAWALGenerator(random_state=1)
stream.prepare_for_use()
X, y = stream.next_sample(10)
stream.generate_drift()
X_drift, y_drift = stream.next_sample(10)
# Load test data corresponding to first 10 instances
test_file = os.path.join(test_path, 'agrawal_stream_drift.npz')
data = np.load(test_file)
X_expected = data['X']
y_expected = data['y']
X = np.concatenate((X, X_drift))
y = np.concatenate((y, y_drift))
assert np.alltrue(X == X_expected)
assert np.alltrue(y == y_expected)
| 30.32967 | 116 | 0.681522 |
73686f5910a4c1f0a4332723c75b2454106b20de | 3,096 | py | Python | corefacility/core/entity/entity_fields/field_managers/public_file_manager.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | corefacility/core/entity/entity_fields/field_managers/public_file_manager.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | corefacility/core/entity/entity_fields/field_managers/public_file_manager.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | from django.core.files import File
from django.db import transaction
from django.conf import settings
from core.entity.entity_fields.field_managers.entity_value_manager import EntityValueManager
from core.entity.entity_exceptions import EntityOperationNotPermitted
class PublicFileManager(EntityValueManager):
"""
Manager attaching and detaching of public file.
We call 'public files' such files that are:
(a) attached to a certain field of the entity
(b) can be uploaded by everybody who has entity access
(c) can be downloaded by everybody including non-authorized users
(d) doesn't require the corefacility application to be downloaded (i.e., they will be downloaded faster).
"""
def attach_file(self, file: File) -> None:
"""
Attaches external file to tje entity.
No any additional save/retrieve is needed but the entity state must be either 'loaded' or 'saved'.
The function does not validate or process the file: this is a view responsibility to do this
:param file: an instance of django.core.files.File object containing the attached file
:return: nothing
"""
if self.entity.state != "loaded" and self.entity.state != "saved":
raise EntityOperationNotPermitted()
self.entity.check_entity_providers_defined()
with transaction.atomic():
self.detach_file()
for provider in self.entity._entity_provider_list:
provider.attach_file(self.entity, self.field_name, file)
self._field_value = getattr(self.entity, '_' + self.field_name)
def detach_file(self) -> None:
"""
Detaches external file from the entity.
No any additional save/retrieve is needed but the entity state must be either 'loaded' or 'saved'.
:return: nothing
"""
if self.is_detached:
return
if self.entity.state != "loaded" and self.entity.state != "saved":
raise EntityOperationNotPermitted()
self.entity.check_entity_providers_defined()
with transaction.atomic():
for provider in self.entity._entity_provider_list:
provider.detach_file(self.entity, self.field_name)
self._field_value = None
@property
def is_detached(self):
"""
Checks whether some file attached to this field
:return: True is not files attached to this field
"""
return self._field_value is None or self._field_value.name is None or self._field_value.name == ""
@property
def url(self) -> str:
"""
Returns the file URL
"""
if self.is_detached:
return self._default_value
else:
filename = self._field_value.name
return settings.MEDIA_URL + filename
def __eq__(self, other):
"""
Two file fields are equal if and only if their URLs are equal
:param other: the other file field
:return: True if two files are equal
"""
return self.url == other.url
| 37.301205 | 109 | 0.656977 |
d3fe4c57c86e0a68f581adf84a94103acc57ea46 | 1,837 | py | Python | tests/test_connection_manager.py | Sceptre/sceptre-core | 83818e69d4e3c35c2a048240f7bc35f66d989db5 | [
"Apache-2.0"
] | null | null | null | tests/test_connection_manager.py | Sceptre/sceptre-core | 83818e69d4e3c35c2a048240f7bc35f66d989db5 | [
"Apache-2.0"
] | 1 | 2019-10-22T08:52:23.000Z | 2019-10-22T08:52:23.000Z | tests/test_connection_manager.py | Sceptre/sceptre-core | 83818e69d4e3c35c2a048240f7bc35f66d989db5 | [
"Apache-2.0"
] | null | null | null | import pytest
from unittest import mock
from sceptre.provider.connection_manager import ConnectionManager
from sceptre.exceptions import ClientError
from sceptre.exceptions import RetryLimitExceededError
class TestConnectionManager:
def test_connection_manager_instantiates_with_config(self):
connection_config = {"profile": "prod", "region": "eu-west-1"}
class ExampleConnectionManager(ConnectionManager):
def call(self):
pass
connection_manager = ExampleConnectionManager(connection_config)
assert connection_manager.config == connection_config
def test_connection_manager_raises_type_error_with_invalid_config(self):
connection_config = ("region", "eu-west-1")
class ExampleConnectionManager(ConnectionManager):
def call(self):
pass
with pytest.raises(TypeError):
ExampleConnectionManager(connection_config)
def test_connection_manager_raises_value_error_with_empty_config(self):
connection_config = {}
class ExampleConnectionManager(ConnectionManager):
def call(self):
pass
with pytest.raises(ValueError):
ExampleConnectionManager(connection_config)
def test_retry_provider_call_retries_max_attemps(self):
MAX_RETRY_COUNT = 29
connection_config = {"region": "eu-west-1"}
mock_fn = mock.Mock()
mock_fn.side_effect = ClientError
class ExampleConnectionManager(ConnectionManager):
def call(self):
pass
with pytest.raises(RetryLimitExceededError):
connection_manager = ExampleConnectionManager(connection_config)
connection_manager._retry_provider_call(mock_fn)()
assert MAX_RETRY_COUNT == mock_fn.call_count
| 32.803571 | 76 | 0.703321 |
9818f72487db6371b0a65c5bc276eae478dc61a3 | 1,200 | gyp | Python | node_modules/node-ios-device/binding.gyp | GertjanSmits/titanium_mobile | 5320df5bfe2a5953879a46b89906c6e73ba1e714 | [
"Apache-2.0"
] | null | null | null | node_modules/node-ios-device/binding.gyp | GertjanSmits/titanium_mobile | 5320df5bfe2a5953879a46b89906c6e73ba1e714 | [
"Apache-2.0"
] | null | null | null | node_modules/node-ios-device/binding.gyp | GertjanSmits/titanium_mobile | 5320df5bfe2a5953879a46b89906c6e73ba1e714 | [
"Apache-2.0"
] | null | null | null | {
'targets': [
{
'target_name': 'node_module_version',
'type': 'executable',
'sources': [
'src/node-module-version.cpp'
]
},
{
'target_name': 'node_ios_device',
'dependencies': [ 'node_module_version' ],
'sources': [
'src/ios-device.cpp',
'src/mobiledevice.h'
],
'libraries': [
'/System/Library/Frameworks/CoreFoundation.framework',
'/System/Library/PrivateFrameworks/MobileDevice.framework',
'../deps/boost/lib/libboost_system-mt.a',
'../deps/boost/lib/libboost_thread-mt.a'
],
'mac_framework_dirs': [
'/System/Library/PrivateFrameworks'
],
'include_dirs': [
'<!(node -e "require(\'nan\')")',
'deps/boost/include'
],
'cflags': [
'-Wl,-whole-archive -lboost_system -Wl,--no-whole-archive'
],
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS' : [ '-std=c++11', '-stdlib=libc++' ],
'OTHER_LDFLAGS': [ '-stdlib=libc++' ],
'MACOSX_DEPLOYMENT_TARGET': '10.7'
},
'postbuilds': [
{
'postbuild_name': 'Copy release to output directory',
'action': [
'sh',
'../dist.sh',
'${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}',
'${SRCROOT}/out'
]
}
]
}
]
}
| 23.076923 | 64 | 0.575833 |
211c5ee0ef79854bf2b61e2c836dceb68c5c77db | 5,568 | py | Python | syspy/pycube/_line.py | systragroup/quetzal | bb7934bcae588cddf0f0da810d75114d1c64768f | [
"CECILL-B"
] | 25 | 2018-11-20T16:33:02.000Z | 2022-03-03T12:46:52.000Z | syspy/pycube/_line.py | systragroup/quetzal | bb7934bcae588cddf0f0da810d75114d1c64768f | [
"CECILL-B"
] | 14 | 2019-06-13T13:26:20.000Z | 2022-01-13T03:51:07.000Z | syspy/pycube/_line.py | systragroup/quetzal | bb7934bcae588cddf0f0da810d75114d1c64768f | [
"CECILL-B"
] | 4 | 2020-01-31T18:34:59.000Z | 2022-03-18T17:22:45.000Z | __author__ = 'qchasserieau'
import re
class Line:
def __init__(self, chunk):
self.chunk = chunk
try:
self.name = self.chunk.split("'")[1]
except Exception:
self.name = 'not_a_line'
def change_time(self, time_factor=1, offset=0):
self.chunk = _change_time(self.chunk, time_factor, offset)
self.format_chunk()
def add_line(self, line, start='left'):
self.chunk = _add_chunk(self.chunk, line.chunk, start)
self.format_chunk()
def cut_at_node(self, n, keep='left'):
self.chunk = _cut_at_node(self.chunk, n, keep)
self.format_chunk()
def cut_between(self, from_node, to_node):
# TODO understand and kill the fix below
self.chunk = _cut_between(self.chunk, from_node, to_node).replace('RT=-', 'RT=')
self.format_chunk()
def format_chunk(self):
self.chunk = coma.sub(', ', equal.sub('=', self.chunk.replace('\n', ''))) + '\n'
self.chunk = self.chunk.replace(', ,', ',')
def formated_chunk(self):
return coma.sub(', ', equal.sub('=', self.chunk.replace('\n', ''))) + '\n'
def set_parameter(self, parameter, value):
to_sub = re.compile(',[ ]*' + parameter + '[ ]*=[0-9TFtf]*')
self.chunk = to_sub.sub(', %s=%s' % (parameter, str(value)), self.chunk)
def drop_checkpoints(self):
self.chunk = checkpoint_re.sub('', self.chunk)
def set_direct(self, from_stop, to_stop):
self.chunk = _set_direct(self.chunk, from_stop, to_stop)
def change_stop(self, from_stop, to_stop):
self.chunk = re.compile('N=[ ]*' + str(from_stop) + '[ ]*,').sub('N=' + str(to_stop) + ',', self.chunk)
def __repr__(self):
return self.chunk
equal = re.compile('[ ]*[=]+[ ]*')
coma = re.compile('[ ]*[,]+[ ]*')
checkpoint_re = re.compile('N=-[0-9]*,[ ]*RT=[0-9.]+[ ]*,')
regex_node_rt = 'N=[-]?[0-9]{4,6},[ ]?RT='
node_re = re.compile(regex_node_rt)
regex_time = 'RT=[0-9]{1,6}[.]?[0-9]{0,6}'
time_re = re.compile(regex_time)
def _stop_list(text, regex='N=[0-9]{4,6}'):
stop_re = re.compile(regex)
return [int(f[2:]) for f in stop_re.findall(text)]
def _add_chunk(left, right, start='left'):
left_offset = _chunk_times(left)[-1]
right_nodes_and_times = ', N=' + 'N='.join(_change_time(right, 1, left_offset).split('N=')[2:]) + '\n'
if start == 'left':
if _stop_list(left)[-1] == _stop_list(right)[0]:
return left.replace('\n', '') + right_nodes_and_times
else:
print('terminus do not match : %i -- %i | %i -- %i' % (
_stop_list(left)[0], _stop_list(left)[-1], _stop_list(right)[0], _stop_list(right)[-1]))
if start == 'right':
if _stop_list(right)[-1] == _stop_list(left)[0]:
return left.split('N=')[0] + ', N=' + 'N='.join(_add_chunk(right, left, start='left').split('N=')[1:]) + '\n'
else:
print('terminus do not match : %i -- %i | %i -- %i' % (
_stop_list(right)[0], _stop_list(right)[-1]), _stop_list(left)[0], _stop_list(left)[-1]
)
def _chunk_times(chunk):
clean_chunk = chunk.replace(' ', '')
return [float(f[3:]) for f in time_re.findall(clean_chunk)]
def _chunk_nodes(chunk):
clean_chunk = chunk.replace(' ', '')
return [int(f[2:-4]) for f in node_re.findall(clean_chunk)]
def _change_time(chunk, time_factor=1, time_offset=0):
clean_chunk = chunk.replace(' ', '')
nodes_rt = [int(f[2:-4]) for f in node_re.findall(clean_chunk)]
times = [float(f[3:]) for f in time_re.findall(clean_chunk)]
_times = [round(t * time_factor + time_offset, 2) for t in times]
t = ','.join(['N=' + str(node) + ',' + 'RT=' + str(time) for node, time in zip(nodes_rt, _times)])
return chunk.split('N=')[0] + t + ' '
def _zip_rt_times(chunk, time_factor=1, time_offset=0):
clean_chunk = chunk.replace(' ', '')
nodes_rt = [int(f[2:-4]) for f in node_re.findall(clean_chunk)]
times = [float(f[3:]) for f in time_re.findall(clean_chunk)]
_times = [round(t * time_factor + time_offset, 2) for t in times]
return zip(nodes_rt, _times)
def _cut_at_node(chunk, n, keep='left'):
s = 'N=' + str(n)
left = chunk.split('N=')[0]
offset = -1 * dict(_zip_rt_times(chunk))[n]
if keep == 'right':
return _change_time(left + s + chunk.split(str(n))[1], 1, time_offset=offset)
if keep == 'left':
return chunk.split(s)[0] + s + ', RT=' + str(offset)
def _cut_between(chunk, na, nb, failed=False):
try:
test = _cut_at_node(_cut_at_node(chunk, na, keep='right'), nb, keep='left')
if (str(na) in chunk) and (str(nb) in test):
return test
else:
return _cut_at_node(_cut_at_node(chunk, nb, keep='right'), na, keep='left')
except Exception:
if not failed:
return _cut_between(chunk, nb, na, failed=True)
else:
return chunk
def _set_direct(chunk, from_stop, to_stop):
try:
a, b = str(from_stop), str(to_stop)
regex = 'N=[ ]*' + a + '[ ]*,' + '.+' + 'N=[ ]*' + b + '[ ]*,'
match = re.compile(regex).findall(chunk)[0]
_chunk = chunk.replace(match, 'N='.join(match.split('N=')[:2]) + 'N=' + b + ', ')
except IndexError:
a, b = str(to_stop), str(from_stop)
regex = 'N=[ ]*' + a + '[ ]*,' + '.+' + 'N=[ ]*' + b + '[ ]*,'
match = re.compile(regex).findall(chunk)[0]
_chunk = chunk.replace(match, 'N='.join(match.split('N=')[:2]) + 'N=' + b + ', ')
return _chunk
| 35.922581 | 121 | 0.571121 |
cbd79ccea177250abb74080b6b2f3b7e3ac779c5 | 1,830 | py | Python | packager/third_party/yasm/run_yasm.py | koln67/shaka-packager | 5b9fd409a5de502e8af2e46ee12840bd2226874d | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | packager/third_party/yasm/run_yasm.py | koln67/shaka-packager | 5b9fd409a5de502e8af2e46ee12840bd2226874d | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 894 | 2016-05-17T00:39:30.000Z | 2022-03-02T18:46:21.000Z | packager/third_party/yasm/run_yasm.py | koln67/shaka-packager | 5b9fd409a5de502e8af2e46ee12840bd2226874d | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 400 | 2016-05-25T01:20:35.000Z | 2022-03-03T02:12:00.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper to run yasm.
Its main job is to provide a Python wrapper for GN integration, and to write
the makefile-style output yasm generates in stdout to a .d file for dependency
management of .inc files.
Run with:
python run_yasm.py <yasm_binary_path> <all other yasm args>
Note that <all other yasm args> must include an explicit output file (-o). This
script will append a ".d" to this and write the dependencies there. This script
will add "-M" to cause yasm to write the deps to stdout, so you don't need to
specify that.
"""
import argparse
import sys
import subprocess
# Extract the output file name from the yasm command line so we can generate a
# .d file with the same base name.
parser = argparse.ArgumentParser()
parser.add_argument("-o", dest="objfile")
options, _ = parser.parse_known_args()
objfile = options.objfile
depfile = objfile + '.d'
# Assemble.
result_code = subprocess.call(sys.argv[1:])
if result_code != 0:
sys.exit(result_code)
# Now generate the .d file listing the dependencies. The -M option makes yasm
# write the Makefile-style dependencies to stdout, but it seems that inhibits
# generating any compiled output so we need to do this in a separate pass.
# However, outputting deps seems faster than actually assembling, and yasm is
# so fast anyway this is not a big deal.
#
# This guarantees proper dependency management for assembly files. Otherwise,
# we would have to require people to manually specify the .inc files they
# depend on in the build file, which will surely be wrong or out-of-date in
# some cases.
deps = subprocess.check_output(sys.argv[1:] + ['-M'])
with open(depfile, "wb") as f:
f.write(deps)
| 35.192308 | 79 | 0.754098 |
8d8a696ecdf83ebbf6b6d030e0950e2616979178 | 1,596 | py | Python | tests/test_index.py | brunobord/static-markdown | 7d9d7f3b76068087b3754cf15d2ae4d2dc2a5531 | [
"MIT"
] | 5 | 2019-06-14T10:10:07.000Z | 2021-12-20T17:46:53.000Z | tests/test_index.py | brunobord/static-markdown | 7d9d7f3b76068087b3754cf15d2ae4d2dc2a5531 | [
"MIT"
] | 13 | 2019-06-13T21:00:58.000Z | 2021-05-12T19:35:40.000Z | tests/test_index.py | brunobord/static-markdown | 7d9d7f3b76068087b3754cf15d2ae4d2dc2a5531 | [
"MIT"
] | null | null | null | import requests
def test_home(http_server):
response = requests.get("http://127.0.0.1:8080/index.html")
assert response.status_code == 200
index_html = response.content
response = requests.get("http://127.0.0.1:8080/")
assert response.status_code == 200
assert response.content == index_html
def test_subdir(http_server):
response = requests.get("http://127.0.0.1:8080/subdir/index.html")
assert response.status_code == 200
index_html = response.content
response = requests.get("http://127.0.0.1:8080/subdir/")
assert response.status_code == 200
assert response.content == index_html
def test_alternate_index(http_server):
response = requests.get("http://127.0.0.1:8080/alternate-index/")
assert response.status_code == 200
index_html = response.content
response = requests.get("http://127.0.0.1:8080/alternate-index/index.htm")
assert response.status_code == 200
assert response.content == index_html
assert response.content == b"I am the alternate index\n"
def test_empty_404(http_server):
response = requests.get("http://127.0.0.1:8080/empty/")
assert response.status_code == 200
def test_dirlist(http_server):
response = requests.get("http://127.0.0.1:8080/no-index/")
assert response.status_code == 200
content = response.content
# Another Directory
assert b'<a href="/no-index/subdir/">subdir/</a>' in content
# Two files
assert b'<a href="/no-index/file.html">file.html</a>' in content
assert b'<a href="/no-index/other-file.html">other-file.html</a>' in content
| 34.695652 | 80 | 0.697995 |
fb80c00ee57d3260f146a3282ae2ae87fe941ec9 | 7,552 | py | Python | python/ccxt/async/coinmate.py | hippylover/ccxt | db304e95b699c1971ad37b9053ae71fcb5dc3b03 | [
"MIT"
] | 2 | 2018-02-28T02:51:59.000Z | 2018-02-28T03:25:51.000Z | python/ccxt/async/coinmate.py | August-Ghost/ccxt | 886c596ffde611b5a92cb5b6e3788ff010324c74 | [
"MIT"
] | null | null | null | python/ccxt/async/coinmate.py | August-Ghost/ccxt | 886c596ffde611b5a92cb5b6e3788ff010324c74 | [
"MIT"
] | 9 | 2018-02-20T18:24:00.000Z | 2019-06-18T14:23:11.000Z | # -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class coinmate (Exchange):
def describe(self):
return self.deep_extend(super(coinmate, self).describe(), {
'id': 'coinmate',
'name': 'CoinMate',
'countries': ['GB', 'CZ', 'EU'], # UK, Czech Republic
'rateLimit': 1000,
'has': {
'CORS': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27811229-c1efb510-606c-11e7-9a36-84ba2ce412d8.jpg',
'api': 'https://coinmate.io/api',
'www': 'https://coinmate.io',
'doc': [
'http://docs.coinmate.apiary.io',
'https://coinmate.io/developers',
],
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'orderBook',
'ticker',
'transactions',
],
},
'private': {
'post': [
'balances',
'bitcoinWithdrawal',
'bitcoinDepositAddresses',
'buyInstant',
'buyLimit',
'cancelOrder',
'cancelOrderWithInfo',
'createVoucher',
'openOrders',
'redeemVoucher',
'sellInstant',
'sellLimit',
'transactionHistory',
'unconfirmedBitcoinDeposits',
],
},
},
'markets': {
'BTC/EUR': {'id': 'BTC_EUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'precision': {'amount': 4, 'price': 2}},
'BTC/CZK': {'id': 'BTC_CZK', 'symbol': 'BTC/CZK', 'base': 'BTC', 'quote': 'CZK', 'precision': {'amount': 4, 'price': 2}},
'LTC/BTC': {'id': 'LTC_BTC', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'precision': {'amount': 4, 'price': 5}},
},
'fees': {
'trading': {
'maker': 0.0005,
'taker': 0.0035,
},
},
})
async def fetch_balance(self, params={}):
response = await self.privatePostBalances()
balances = response['data']
result = {'info': balances}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
if currency in balances:
account['free'] = balances[currency]['available']
account['used'] = balances[currency]['reserved']
account['total'] = balances[currency]['balance']
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
response = await self.publicGetOrderBook(self.extend({
'currencyPair': self.market_id(symbol),
'groupByPriceLimit': 'False',
}, params))
orderbook = response['data']
timestamp = orderbook['timestamp'] * 1000
return self.parse_order_book(orderbook, timestamp, 'bids', 'asks', 'price', 'amount')
async def fetch_ticker(self, symbol, params={}):
response = await self.publicGetTicker(self.extend({
'currencyPair': self.market_id(symbol),
}, params))
ticker = response['data']
timestamp = ticker['timestamp'] * 1000
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['amount']),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market=None):
if not market:
market = self.markets_by_id[trade['currencyPair']]
return {
'id': trade['transactionId'],
'info': trade,
'timestamp': trade['timestamp'],
'datetime': self.iso8601(trade['timestamp']),
'symbol': market['symbol'],
'type': None,
'side': None,
'price': trade['price'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = await self.publicGetTransactions(self.extend({
'currencyPair': market['id'],
'minutesIntoHistory': 10,
}, params))
return self.parse_trades(response['data'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
method = 'privatePost' + self.capitalize(side)
order = {
'currencyPair': self.market_id(symbol),
}
if type == 'market':
if side == 'buy':
order['total'] = amount # amount in fiat
else:
order['amount'] = amount # amount in fiat
method += 'Instant'
else:
order['amount'] = amount # amount in crypto
order['price'] = price
method += self.capitalize(type)
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': str(response['data']),
}
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostCancelOrder({'orderId': id})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.urlencode(self.extend({
'clientId': self.uid,
'nonce': nonce,
'publicKey': self.apiKey,
'signature': signature.upper(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'error' in response:
if response['error']:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 38.335025 | 137 | 0.476165 |
021dc3a19644ba8086ed5934b550cab5b20ac488 | 254 | py | Python | example_projects/basic/skeleton/version.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 12 | 2018-06-28T13:40:53.000Z | 2022-01-07T12:46:15.000Z | skeleton/version.py | elsampsa/skeleton | 30679fc787014e347e0b21a6f74193237c0ffe61 | [
"MIT"
] | 6 | 2019-04-29T16:55:38.000Z | 2022-03-04T17:00:15.000Z | skeleton/version.py | elsampsa/skeleton | 30679fc787014e347e0b21a6f74193237c0ffe61 | [
"MIT"
] | 5 | 2019-04-21T15:42:55.000Z | 2021-08-16T10:53:30.000Z | """This module has the version number. Automatically changed with the "setver.bash" script. Don't touch!
"""
VERSION_MAJOR=1
VERSION_MINOR=0
VERSION_PATCH=0
def getVersionTag():
return "(%i.%i.%i)" % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
| 25.4 | 106 | 0.73622 |
821e828ed68d72ac7dfa353aed38eb4937015657 | 6,483 | py | Python | test/decoders/test_rnn_transducer_decoder.py | ishine/neural_sp | 7995613541d994976b00d80dcc12e2835163acfb | [
"Apache-2.0"
] | 577 | 2018-09-17T14:39:34.000Z | 2022-03-29T10:48:09.000Z | test/decoders/test_rnn_transducer_decoder.py | ishine/neural_sp | 7995613541d994976b00d80dcc12e2835163acfb | [
"Apache-2.0"
] | 221 | 2019-04-21T01:44:09.000Z | 2022-02-10T02:08:47.000Z | test/decoders/test_rnn_transducer_decoder.py | ishine/neural_sp | 7995613541d994976b00d80dcc12e2835163acfb | [
"Apache-2.0"
] | 139 | 2019-01-09T02:18:00.000Z | 2022-03-29T07:40:08.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test for RNN Transducer."""
import argparse
import importlib
import numpy as np
import pytest
import torch
from neural_sp.datasets.token_converter.character import Idx2char
from neural_sp.models.torch_utils import (
np2tensor,
pad_list
)
ENC_N_UNITS = 16
VOCAB = 10
idx2token = Idx2char('test/decoders/dict.txt')
def make_args(**kwargs):
args = dict(
special_symbols={'blank': 0, 'unk': 1, 'eos': 2, 'pad': 3},
enc_n_units=ENC_N_UNITS,
n_units=16,
n_projs=0,
n_layers=2,
bottleneck_dim=8,
emb_dim=8,
vocab=VOCAB,
dropout=0.1,
dropout_emb=0.1,
ctc_weight=0.1,
ctc_lsm_prob=0.1,
ctc_fc_list='16_16',
external_lm=None,
global_weight=1.0,
mtl_per_batch=False,
param_init=0.1,
)
args.update(kwargs)
return args
@pytest.mark.parametrize(
"args",
[
({'n_layers': 1}),
({'n_layers': 2}),
# projection
({'n_projs': 8}),
# CTC
({'ctc_weight': 0.5}),
({'ctc_weight': 1.0}),
({'ctc_weight': 1.0, 'ctc_lsm_prob': 0.0}),
]
)
def test_forward(args):
args = make_args(**args)
bs = 4
emax = 40
device = "cpu"
eouts = np.random.randn(bs, emax, ENC_N_UNITS).astype(np.float32)
elens = torch.IntTensor([len(x) for x in eouts])
eouts = pad_list([np2tensor(x, device).float() for x in eouts], 0.)
ylens = [4, 5, 3, 7]
ys = [np.random.randint(0, VOCAB, ylen).astype(np.int32) for ylen in ylens]
module = importlib.import_module('neural_sp.models.seq2seq.decoders.rnn_transducer')
dec = module.RNNTransducer(**args)
dec = dec.to(device)
loss, observation = dec(eouts, elens, ys, task='all')
assert loss.dim() == 1
assert loss.size(0) == 1
assert loss.item() >= 0
assert isinstance(observation, dict)
def make_decode_params(**kwargs):
args = dict(
recog_batch_size=1,
recog_beam_width=1,
recog_ctc_weight=0.0,
recog_lm_weight=0.0,
recog_lm_second_weight=0.0,
recog_lm_bwd_weight=0.0,
recog_cache_embedding=True,
recog_max_len_ratio=1.0,
recog_lm_state_carry_over=False,
recog_softmax_smoothing=1.0,
recog_rnnt_beam_search_type='time_sync_mono',
nbest=1,
)
args.update(kwargs)
return args
def make_args_rnnlm(**kwargs):
args = dict(
n_units=16,
n_projs=0,
n_layers=2,
residual=False,
use_glu=False,
n_units_null_context=0,
bottleneck_dim=8,
emb_dim=8,
vocab=VOCAB,
dropout_in=0.1,
dropout_hidden=0.1,
lsm_prob=0.0,
param_init=0.1,
adaptive_softmax=False,
tie_embedding=False,
)
args.update(kwargs)
return argparse.Namespace(**args)
@pytest.mark.parametrize(
"params",
[
# greedy decoding
({'recog_beam_width': 1}),
({'recog_beam_width': 1, 'recog_batch_size': 4}),
# beam search
({'recog_beam_width': 4}),
({'recog_beam_width': 4, 'recog_batch_size': 4}),
({'recog_beam_width': 4, 'nbest': 2}),
({'recog_beam_width': 4, 'nbest': 4}),
({'recog_beam_width': 4, 'recog_softmax_smoothing': 0.8}),
({'recog_beam_width': 4, 'recog_rnnt_beam_search_type': 'time_sync'}),
# ({'recog_beam_width': 4, 'recog_ctc_weight': 0.1}),
# shallow fusion
({'recog_beam_width': 4, 'recog_lm_weight': 0.1}),
({'recog_beam_width': 4, 'recog_lm_weight': 0.1, 'recog_cache_embedding': False}),
# rescoring
({'recog_beam_width': 4, 'recog_lm_second_weight': 0.1}),
({'recog_beam_width': 4, 'recog_lm_bwd_weight': 0.1}),
]
)
def test_decoding(params):
args = make_args()
params = make_decode_params(**params)
bs = params['recog_batch_size']
emax = 40
device = "cpu"
eouts = np.random.randn(bs, emax, ENC_N_UNITS).astype(np.float32)
elens = torch.IntTensor([len(x) for x in eouts])
eouts = pad_list([np2tensor(x, device).float() for x in eouts], 0.)
ctc_log_probs = None
if params['recog_ctc_weight'] > 0:
ctc_logits = torch.FloatTensor(bs, emax, VOCAB, device=device)
ctc_log_probs = torch.softmax(ctc_logits, dim=-1)
lm = None
if params['recog_lm_weight'] > 0:
args_lm = make_args_rnnlm()
module = importlib.import_module('neural_sp.models.lm.rnnlm')
lm = module.RNNLM(args_lm).to(device)
lm_second = None
if params['recog_lm_second_weight'] > 0:
args_lm = make_args_rnnlm()
module = importlib.import_module('neural_sp.models.lm.rnnlm')
lm_second = module.RNNLM(args_lm).to(device)
lm_second_bwd = None
if params['recog_lm_bwd_weight'] > 0:
args_lm = make_args_rnnlm()
module = importlib.import_module('neural_sp.models.lm.rnnlm')
lm_second_bwd = module.RNNLM(args_lm).to(device)
ylens = [4, 5, 3, 7]
ys = [np.random.randint(0, VOCAB, ylen).astype(np.int32) for ylen in ylens]
module = importlib.import_module('neural_sp.models.seq2seq.decoders.rnn_transducer')
dec = module.RNNTransducer(**args)
dec = dec.to(device)
dec.eval()
with torch.no_grad():
if params['recog_beam_width'] == 1:
out = dec.greedy(eouts, elens, max_len_ratio=params['recog_max_len_ratio'],
idx2token=idx2token, exclude_eos=False,
refs_id=ys, utt_ids=None, speakers=None)
assert len(out) == 2
hyps, aws = out
assert isinstance(hyps, list)
assert len(hyps) == bs
assert aws is None
else:
out = dec.beam_search(eouts, elens, params, idx2token,
lm, lm_second, lm_second_bwd, ctc_log_probs,
nbest=params['nbest'], exclude_eos=False,
refs_id=None, utt_ids=None, speakers=None,
ensmbl_eouts=None, ensmbl_elens=None, ensmbl_decs=[])
assert len(out) == 3
nbest_hyps, aws, scores = out
assert isinstance(nbest_hyps, list)
assert len(nbest_hyps) == bs
assert len(nbest_hyps[0]) == params['nbest']
assert aws is None
assert scores is None
| 30.580189 | 90 | 0.592627 |
45d8bf0f51af468f019d852a9425b5aa807b3131 | 118 | py | Python | oldpost/urls.py | AmanSangwan127306/cricket-match-tip-website | b74fb2473312a9a87ac7b00e5bdf6d6bdbf1649b | [
"MIT"
] | null | null | null | oldpost/urls.py | AmanSangwan127306/cricket-match-tip-website | b74fb2473312a9a87ac7b00e5bdf6d6bdbf1649b | [
"MIT"
] | null | null | null | oldpost/urls.py | AmanSangwan127306/cricket-match-tip-website | b74fb2473312a9a87ac7b00e5bdf6d6bdbf1649b | [
"MIT"
] | null | null | null | from django.urls import path , include
from . import views
urlpatterns=[
path("",views.oldpost,name="oldpost")
]
| 16.857143 | 41 | 0.711864 |
d11ef76451e6da0a63b331ffdd029027894b338c | 2,002 | py | Python | mykoreanromanizer/syllable.py | teresagarcia/my-korean-romanizer | 0bc9813ee4b5ceb2739b58c421cedcd071fc4006 | [
"MIT"
] | 2 | 2021-08-01T13:50:51.000Z | 2022-03-30T11:21:53.000Z | mykoreanromanizer/syllable.py | teresagarcia/my-korean-romanizer | 0bc9813ee4b5ceb2739b58c421cedcd071fc4006 | [
"MIT"
] | 4 | 2020-06-28T10:32:35.000Z | 2020-10-27T10:05:20.000Z | mykoreanromanizer/syllable.py | teresagarcia/my-korean-romanizer | 0bc9813ee4b5ceb2739b58c421cedcd071fc4006 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from jamo import h2j, j2hcj
class Syllable(object):
def __init__(self, char):
self.char = char
self.initial = ''
self.medial = ''
self.final = ''
self.double_final = ''
self.separate()
def separate(self):
result = list(j2hcj(h2j(self.char)))
self.initial = result[0]
self.medial = result[1]
self.final = result[2] if(len(result) > 2) else ''
return result
def search_key(self):
functions_search = {
"before_i": self.starts_with_i,
"before_wi" : self.starts_with_wi,
"before_vowel" : self.starts_with_vowel,
"before_n" : self.initial_is_n,
"before_s" : self.initial_is_s,
"before_h" : self.initial_is_h
}
equiv_key = []
for key in functions_search:
if(functions_search[key]()):
equiv_key.append(key)
return equiv_key
def starts_with_vowel(self):
return self.initial == 'ㅇ'
def starts_with_i(self):
return (self.starts_with_vowel() and self.medial_is_i())
def starts_with_wi(self):
return (self.starts_with_vowel() and self.medial_is_wi())
def medial_is_i_or_wi(self):
return self.medial_is_i() or self.medial_is_wi()
def medial_is_i(self):
return self.medial == 'ㅣ'
def medial_is_wi(self):
return self.medial == 'ㅟ'
def initial_is_s(self):
return self.initial == 'ㅅ'
def initial_is_n(self):
return self.initial == 'ㄴ'
def initial_is_d(self):
return self.initial == 'ㄷ'
def final_is_ps(self):
return self.final == 'ㅄ'
def final_is_ss(self):
return self.final == 'ㅆ'
def final_is_h(self):
return self.final == 'ㅎ'
def initial_is_g(self):
return self.initial == 'ㄱ'
def initial_is_h(self):
return self.initial == 'ㅎ'
| 21.76087 | 65 | 0.564935 |
6675a2daabdd07c02385bb3ecda979c44b90e39b | 134,715 | py | Python | test/cpython/test_datetime.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-02-06T14:28:45.000Z | 2020-02-06T14:28:45.000Z | test/cpython/test_datetime.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | test/cpython/test_datetime.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-02-06T14:29:00.000Z | 2020-02-06T14:29:00.000Z | # expected: fail
"""Test date/time type.
See http://www.zope.org/Members/fdrake/DateTimeWiki/TestCases
"""
from __future__ import division
import sys
import pickle
import cPickle
import unittest
from test import test_support
from datetime import MINYEAR, MAXYEAR
from datetime import timedelta
from datetime import tzinfo
from datetime import time
from datetime import date, datetime
pickle_choices = [(pickler, unpickler, proto)
for pickler in pickle, cPickle
for unpickler in pickle, cPickle
for proto in range(3)]
assert len(pickle_choices) == 2*2*3
# An arbitrary collection of objects of non-datetime types, for testing
# mixed-type comparisons.
OTHERSTUFF = (10, 10L, 34.5, "abc", {}, [], ())
#############################################################################
# module tests
class TestModule(unittest.TestCase):
def test_constants(self):
import datetime
self.assertEqual(datetime.MINYEAR, 1)
self.assertEqual(datetime.MAXYEAR, 9999)
#############################################################################
# tzinfo tests
class FixedOffset(tzinfo):
def __init__(self, offset, name, dstoffset=42):
if isinstance(offset, int):
offset = timedelta(minutes=offset)
if isinstance(dstoffset, int):
dstoffset = timedelta(minutes=dstoffset)
self.__offset = offset
self.__name = name
self.__dstoffset = dstoffset
def __repr__(self):
return self.__name.lower()
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return self.__dstoffset
class PicklableFixedOffset(FixedOffset):
def __init__(self, offset=None, name=None, dstoffset=None):
FixedOffset.__init__(self, offset, name, dstoffset)
class TestTZInfo(unittest.TestCase):
def test_non_abstractness(self):
# In order to allow subclasses to get pickled, the C implementation
# wasn't able to get away with having __init__ raise
# NotImplementedError.
useless = tzinfo()
dt = datetime.max
self.assertRaises(NotImplementedError, useless.tzname, dt)
self.assertRaises(NotImplementedError, useless.utcoffset, dt)
self.assertRaises(NotImplementedError, useless.dst, dt)
def test_subclass_must_override(self):
class NotEnough(tzinfo):
def __init__(self, offset, name):
self.__offset = offset
self.__name = name
self.assertTrue(issubclass(NotEnough, tzinfo))
ne = NotEnough(3, "NotByALongShot")
self.assertIsInstance(ne, tzinfo)
dt = datetime.now()
self.assertRaises(NotImplementedError, ne.tzname, dt)
self.assertRaises(NotImplementedError, ne.utcoffset, dt)
self.assertRaises(NotImplementedError, ne.dst, dt)
def test_normal(self):
fo = FixedOffset(3, "Three")
self.assertIsInstance(fo, tzinfo)
for dt in datetime.now(), None:
self.assertEqual(fo.utcoffset(dt), timedelta(minutes=3))
self.assertEqual(fo.tzname(dt), "Three")
self.assertEqual(fo.dst(dt), timedelta(minutes=42))
def test_pickling_base(self):
# There's no point to pickling tzinfo objects on their own (they
# carry no data), but they need to be picklable anyway else
# concrete subclasses can't be pickled.
orig = tzinfo.__new__(tzinfo)
self.assertIs(type(orig), tzinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertIs(type(derived), tzinfo)
def test_pickling_subclass(self):
# Make sure we can pickle/unpickle an instance of a subclass.
offset = timedelta(minutes=-300)
orig = PicklableFixedOffset(offset, 'cookie')
self.assertIsInstance(orig, tzinfo)
self.assertTrue(type(orig) is PicklableFixedOffset)
self.assertEqual(orig.utcoffset(None), offset)
self.assertEqual(orig.tzname(None), 'cookie')
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertIsInstance(derived, tzinfo)
self.assertTrue(type(derived) is PicklableFixedOffset)
self.assertEqual(derived.utcoffset(None), offset)
self.assertEqual(derived.tzname(None), 'cookie')
#############################################################################
# Base class for testing a particular aspect of timedelta, time, date and
# datetime comparisons.
class HarmlessMixedComparison:
# Test that __eq__ and __ne__ don't complain for mixed-type comparisons.
# Subclasses must define 'theclass', and theclass(1, 1, 1) must be a
# legit constructor.
def test_harmless_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertFalse(me == ())
self.assertTrue(me != ())
self.assertFalse(() == me)
self.assertTrue(() != me)
self.assertIn(me, [1, 20L, [], me])
self.assertIn([], [me, 1, 20L, []])
def test_harmful_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertRaises(TypeError, lambda: me < ())
self.assertRaises(TypeError, lambda: me <= ())
self.assertRaises(TypeError, lambda: me > ())
self.assertRaises(TypeError, lambda: me >= ())
self.assertRaises(TypeError, lambda: () < me)
self.assertRaises(TypeError, lambda: () <= me)
self.assertRaises(TypeError, lambda: () > me)
self.assertRaises(TypeError, lambda: () >= me)
self.assertRaises(TypeError, cmp, (), me)
self.assertRaises(TypeError, cmp, me, ())
#############################################################################
# timedelta tests
class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
theclass = timedelta
def test_constructor(self):
eq = self.assertEqual
td = timedelta
# Check keyword args to constructor
eq(td(), td(weeks=0, days=0, hours=0, minutes=0, seconds=0,
milliseconds=0, microseconds=0))
eq(td(1), td(days=1))
eq(td(0, 1), td(seconds=1))
eq(td(0, 0, 1), td(microseconds=1))
eq(td(weeks=1), td(days=7))
eq(td(days=1), td(hours=24))
eq(td(hours=1), td(minutes=60))
eq(td(minutes=1), td(seconds=60))
eq(td(seconds=1), td(milliseconds=1000))
eq(td(milliseconds=1), td(microseconds=1000))
# Check float args to constructor
eq(td(weeks=1.0/7), td(days=1))
eq(td(days=1.0/24), td(hours=1))
eq(td(hours=1.0/60), td(minutes=1))
eq(td(minutes=1.0/60), td(seconds=1))
eq(td(seconds=0.001), td(milliseconds=1))
eq(td(milliseconds=0.001), td(microseconds=1))
def test_computations(self):
eq = self.assertEqual
td = timedelta
a = td(7) # One week
b = td(0, 60) # One minute
c = td(0, 0, 1000) # One millisecond
eq(a+b+c, td(7, 60, 1000))
eq(a-b, td(6, 24*3600 - 60))
eq(-a, td(-7))
eq(+a, td(7))
eq(-b, td(-1, 24*3600 - 60))
eq(-c, td(-1, 24*3600 - 1, 999000))
eq(abs(a), a)
eq(abs(-a), a)
eq(td(6, 24*3600), a)
eq(td(0, 0, 60*1000000), b)
eq(a*10, td(70))
eq(a*10, 10*a)
eq(a*10L, 10*a)
eq(b*10, td(0, 600))
eq(10*b, td(0, 600))
eq(b*10L, td(0, 600))
eq(c*10, td(0, 0, 10000))
eq(10*c, td(0, 0, 10000))
eq(c*10L, td(0, 0, 10000))
eq(a*-1, -a)
eq(b*-2, -b-b)
eq(c*-2, -c+-c)
eq(b*(60*24), (b*60)*24)
eq(b*(60*24), (60*b)*24)
eq(c*1000, td(0, 1))
eq(1000*c, td(0, 1))
eq(a//7, td(1))
eq(b//10, td(0, 6))
eq(c//1000, td(0, 0, 1))
eq(a//10, td(0, 7*24*360))
eq(a//3600000, td(0, 0, 7*24*1000))
# Issue #11576
eq(td(999999999, 86399, 999999) - td(999999999, 86399, 999998),
td(0, 0, 1))
eq(td(999999999, 1, 1) - td(999999999, 1, 0),
td(0, 0, 1))
def test_disallowed_computations(self):
a = timedelta(42)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# Mul/div by float isn't supported.
x = 2.3
self.assertRaises(TypeError, lambda: a*x)
self.assertRaises(TypeError, lambda: x*a)
self.assertRaises(TypeError, lambda: a/x)
self.assertRaises(TypeError, lambda: x/a)
self.assertRaises(TypeError, lambda: a // x)
self.assertRaises(TypeError, lambda: x // a)
# Division of int by timedelta doesn't make sense.
# Division by zero doesn't make sense.
for zero in 0, 0L:
self.assertRaises(TypeError, lambda: zero // a)
self.assertRaises(ZeroDivisionError, lambda: a // zero)
def test_basic_attributes(self):
days, seconds, us = 1, 7, 31
td = timedelta(days, seconds, us)
self.assertEqual(td.days, days)
self.assertEqual(td.seconds, seconds)
self.assertEqual(td.microseconds, us)
def test_total_seconds(self):
td = timedelta(days=365)
self.assertEqual(td.total_seconds(), 31536000.0)
for total_seconds in [123456.789012, -123456.789012, 0.123456, 0, 1e6]:
td = timedelta(seconds=total_seconds)
self.assertEqual(td.total_seconds(), total_seconds)
# Issue8644: Test that td.total_seconds() has the same
# accuracy as td / timedelta(seconds=1).
for ms in [-1, -2, -123]:
td = timedelta(microseconds=ms)
self.assertEqual(td.total_seconds(),
((24*3600*td.days + td.seconds)*10**6
+ td.microseconds)/10**6)
def test_carries(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1e6 + 1)
t2 = timedelta(microseconds=1)
self.assertEqual(t1, t2)
def test_hash_equality(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1000000)
t2 = timedelta()
self.assertEqual(hash(t1), hash(t2))
t1 += timedelta(weeks=7)
t2 += timedelta(days=7*7)
self.assertEqual(t1, t2)
self.assertEqual(hash(t1), hash(t2))
d = {t1: 1}
d[t2] = 2
self.assertEqual(len(d), 1)
self.assertEqual(d[t1], 2)
def test_pickling(self):
args = 12, 34, 56
orig = timedelta(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = timedelta(2, 3, 4)
t2 = timedelta(2, 3, 4)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = timedelta(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_str(self):
td = timedelta
eq = self.assertEqual
eq(str(td(1)), "1 day, 0:00:00")
eq(str(td(-1)), "-1 day, 0:00:00")
eq(str(td(2)), "2 days, 0:00:00")
eq(str(td(-2)), "-2 days, 0:00:00")
eq(str(td(hours=12, minutes=58, seconds=59)), "12:58:59")
eq(str(td(hours=2, minutes=3, seconds=4)), "2:03:04")
eq(str(td(weeks=-30, hours=23, minutes=12, seconds=34)),
"-210 days, 23:12:34")
eq(str(td(milliseconds=1)), "0:00:00.001000")
eq(str(td(microseconds=3)), "0:00:00.000003")
eq(str(td(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)),
"999999999 days, 23:59:59.999999")
def test_roundtrip(self):
for td in (timedelta(days=999999999, hours=23, minutes=59,
seconds=59, microseconds=999999),
timedelta(days=-999999999),
timedelta(days=1, seconds=2, microseconds=3)):
# Verify td -> string -> td identity.
s = repr(td)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
td2 = eval(s)
self.assertEqual(td, td2)
# Verify identity via reconstructing from pieces.
td2 = timedelta(td.days, td.seconds, td.microseconds)
self.assertEqual(td, td2)
def test_resolution_info(self):
self.assertIsInstance(timedelta.min, timedelta)
self.assertIsInstance(timedelta.max, timedelta)
self.assertIsInstance(timedelta.resolution, timedelta)
self.assertTrue(timedelta.max > timedelta.min)
self.assertEqual(timedelta.min, timedelta(-999999999))
self.assertEqual(timedelta.max, timedelta(999999999, 24*3600-1, 1e6-1))
self.assertEqual(timedelta.resolution, timedelta(0, 0, 1))
def test_overflow(self):
tiny = timedelta.resolution
td = timedelta.min + tiny
td -= tiny # no problem
self.assertRaises(OverflowError, td.__sub__, tiny)
self.assertRaises(OverflowError, td.__add__, -tiny)
td = timedelta.max - tiny
td += tiny # no problem
self.assertRaises(OverflowError, td.__add__, tiny)
self.assertRaises(OverflowError, td.__sub__, -tiny)
self.assertRaises(OverflowError, lambda: -timedelta.max)
def test_microsecond_rounding(self):
td = timedelta
eq = self.assertEqual
# Single-field rounding.
eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=0.6/1000), td(microseconds=1))
eq(td(milliseconds=-0.6/1000), td(microseconds=-1))
# Rounding due to contributions from more than one field.
us_per_hour = 3600e6
us_per_day = us_per_hour * 24
eq(td(days=.4/us_per_day), td(0))
eq(td(hours=.2/us_per_hour), td(0))
eq(td(days=.4/us_per_day, hours=.2/us_per_hour), td(microseconds=1))
eq(td(days=-.4/us_per_day), td(0))
eq(td(hours=-.2/us_per_hour), td(0))
eq(td(days=-.4/us_per_day, hours=-.2/us_per_hour), td(microseconds=-1))
def test_massive_normalization(self):
td = timedelta(microseconds=-1)
self.assertEqual((td.days, td.seconds, td.microseconds),
(-1, 24*3600-1, 999999))
def test_bool(self):
self.assertTrue(timedelta(1))
self.assertTrue(timedelta(0, 1))
self.assertTrue(timedelta(0, 0, 1))
self.assertTrue(timedelta(microseconds=1))
self.assertFalse(timedelta(0))
def test_subclass_timedelta(self):
class T(timedelta):
@staticmethod
def from_td(td):
return T(td.days, td.seconds, td.microseconds)
def as_hours(self):
sum = (self.days * 24 +
self.seconds / 3600.0 +
self.microseconds / 3600e6)
return round(sum)
t1 = T(days=1)
self.assertIs(type(t1), T)
self.assertEqual(t1.as_hours(), 24)
t2 = T(days=-1, seconds=-3600)
self.assertIs(type(t2), T)
self.assertEqual(t2.as_hours(), -25)
t3 = t1 + t2
self.assertIs(type(t3), timedelta)
t4 = T.from_td(t3)
self.assertIs(type(t4), T)
self.assertEqual(t3.days, t4.days)
self.assertEqual(t3.seconds, t4.seconds)
self.assertEqual(t3.microseconds, t4.microseconds)
self.assertEqual(str(t3), str(t4))
self.assertEqual(t4.as_hours(), -1)
#############################################################################
# date tests
class TestDateOnly(unittest.TestCase):
# Tests here won't pass if also run on datetime objects, so don't
# subclass this to test datetimes too.
def test_delta_non_days_ignored(self):
dt = date(2000, 1, 2)
delta = timedelta(days=1, hours=2, minutes=3, seconds=4,
microseconds=5)
days = timedelta(delta.days)
self.assertEqual(days, timedelta(1))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
delta = -delta
days = timedelta(delta.days)
self.assertEqual(days, timedelta(-2))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
class SubclassDate(date):
sub_var = 1
class TestDate(HarmlessMixedComparison, unittest.TestCase):
# Tests here should pass for both dates and datetimes, except for a
# few tests that TestDateTime overrides.
theclass = date
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3),
self.theclass.today()):
# Verify dt -> string -> date identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day)
self.assertEqual(dt, dt2)
def test_ordinal_conversions(self):
# Check some fixed values.
for y, m, d, n in [(1, 1, 1, 1), # calendar origin
(1, 12, 31, 365),
(2, 1, 1, 366),
# first example from "Calendrical Calculations"
(1945, 11, 12, 710347)]:
d = self.theclass(y, m, d)
self.assertEqual(n, d.toordinal())
fromord = self.theclass.fromordinal(n)
self.assertEqual(d, fromord)
if hasattr(fromord, "hour"):
# if we're checking something fancier than a date, verify
# the extra fields have been zeroed out
self.assertEqual(fromord.hour, 0)
self.assertEqual(fromord.minute, 0)
self.assertEqual(fromord.second, 0)
self.assertEqual(fromord.microsecond, 0)
# Check first and last days of year spottily across the whole
# range of years supported.
for year in xrange(MINYEAR, MAXYEAR+1, 7):
# Verify (year, 1, 1) -> ordinal -> y, m, d is identity.
d = self.theclass(year, 1, 1)
n = d.toordinal()
d2 = self.theclass.fromordinal(n)
self.assertEqual(d, d2)
# Verify that moving back a day gets to the end of year-1.
if year > 1:
d = self.theclass.fromordinal(n-1)
d2 = self.theclass(year-1, 12, 31)
self.assertEqual(d, d2)
self.assertEqual(d2.toordinal(), n-1)
# Test every day in a leap-year and a non-leap year.
dim = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year, isleap in (2000, True), (2002, False):
n = self.theclass(year, 1, 1).toordinal()
for month, maxday in zip(range(1, 13), dim):
if month == 2 and isleap:
maxday += 1
for day in range(1, maxday+1):
d = self.theclass(year, month, day)
self.assertEqual(d.toordinal(), n)
self.assertEqual(d, self.theclass.fromordinal(n))
n += 1
def test_extreme_ordinals(self):
a = self.theclass.min
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1))
b = a + timedelta(days=1)
self.assertEqual(b.toordinal(), aord + 1)
self.assertEqual(b, self.theclass.fromordinal(aord + 1))
a = self.theclass.max
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1))
b = a - timedelta(days=1)
self.assertEqual(b.toordinal(), aord - 1)
self.assertEqual(b, self.theclass.fromordinal(aord - 1))
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31)
# same thing
e = self.theclass(2000, 12, 31)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1)
# same thing
e = self.theclass(2001, 1, 1)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
day = timedelta(1)
week = timedelta(7)
a = self.theclass(2002, 3, 2)
self.assertEqual(a + day, self.theclass(2002, 3, 3))
self.assertEqual(day + a, self.theclass(2002, 3, 3))
self.assertEqual(a - day, self.theclass(2002, 3, 1))
self.assertEqual(-day + a, self.theclass(2002, 3, 1))
self.assertEqual(a + week, self.theclass(2002, 3, 9))
self.assertEqual(a - week, self.theclass(2002, 2, 23))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - date is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing date and (delta or date) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# date + date is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_overflow(self):
tiny = self.theclass.resolution
for delta in [tiny, timedelta(1), timedelta(2)]:
dt = self.theclass.min + delta
dt -= delta # no problem
self.assertRaises(OverflowError, dt.__sub__, delta)
self.assertRaises(OverflowError, dt.__add__, -delta)
dt = self.theclass.max - delta
dt += delta # no problem
self.assertRaises(OverflowError, dt.__add__, delta)
self.assertRaises(OverflowError, dt.__sub__, -delta)
def test_fromtimestamp(self):
import time
# Try an arbitrary fixed value.
year, month, day = 1999, 9, 19
ts = time.mktime((year, month, day, 0, 0, 0, 0, 0, -1))
d = self.theclass.fromtimestamp(ts)
self.assertEqual(d.year, year)
self.assertEqual(d.month, month)
self.assertEqual(d.day, day)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_today(self):
import time
# We claim that today() is like fromtimestamp(time.time()), so
# prove it.
for dummy in range(3):
today = self.theclass.today()
ts = time.time()
todayagain = self.theclass.fromtimestamp(ts)
if today == todayagain:
break
# There are several legit reasons that could fail:
# 1. It recently became midnight, between the today() and the
# time() calls.
# 2. The platform time() has such fine resolution that we'll
# never get the same value twice.
# 3. The platform time() has poor resolution, and we just
# happened to call today() right before a resolution quantum
# boundary.
# 4. The system clock got fiddled between calls.
# In any case, wait a little while and try again.
time.sleep(0.1)
# It worked or it didn't. If it didn't, assume it's reason #2, and
# let the test pass if they're within half a second of each other.
if today != todayagain:
self.assertAlmostEqual(todayagain, today,
delta=timedelta(seconds=0.5))
def test_weekday(self):
for i in range(7):
# March 4, 2002 is a Monday
self.assertEqual(self.theclass(2002, 3, 4+i).weekday(), i)
self.assertEqual(self.theclass(2002, 3, 4+i).isoweekday(), i+1)
# January 2, 1956 is a Monday
self.assertEqual(self.theclass(1956, 1, 2+i).weekday(), i)
self.assertEqual(self.theclass(1956, 1, 2+i).isoweekday(), i+1)
def test_isocalendar(self):
# Check examples from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
for i in range(7):
d = self.theclass(2003, 12, 22+i)
self.assertEqual(d.isocalendar(), (2003, 52, i+1))
d = self.theclass(2003, 12, 29) + timedelta(i)
self.assertEqual(d.isocalendar(), (2004, 1, i+1))
d = self.theclass(2004, 1, 5+i)
self.assertEqual(d.isocalendar(), (2004, 2, i+1))
d = self.theclass(2009, 12, 21+i)
self.assertEqual(d.isocalendar(), (2009, 52, i+1))
d = self.theclass(2009, 12, 28) + timedelta(i)
self.assertEqual(d.isocalendar(), (2009, 53, i+1))
d = self.theclass(2010, 1, 4+i)
self.assertEqual(d.isocalendar(), (2010, 1, i+1))
def test_iso_long_years(self):
# Calculate long ISO years and compare to table from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
ISO_LONG_YEARS_TABLE = """
4 32 60 88
9 37 65 93
15 43 71 99
20 48 76
26 54 82
105 133 161 189
111 139 167 195
116 144 172
122 150 178
128 156 184
201 229 257 285
207 235 263 291
212 240 268 296
218 246 274
224 252 280
303 331 359 387
308 336 364 392
314 342 370 398
320 348 376
325 353 381
"""
iso_long_years = map(int, ISO_LONG_YEARS_TABLE.split())
iso_long_years.sort()
L = []
for i in range(400):
d = self.theclass(2000+i, 12, 31)
d1 = self.theclass(1600+i, 12, 31)
self.assertEqual(d.isocalendar()[1:], d1.isocalendar()[1:])
if d.isocalendar()[1] == 53:
L.append(i)
self.assertEqual(L, iso_long_years)
def test_isoformat(self):
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02")
def test_ctime(self):
t = self.theclass(2002, 3, 2)
self.assertEqual(t.ctime(), "Sat Mar 2 00:00:00 2002")
def test_strftime(self):
t = self.theclass(2005, 3, 2)
self.assertEqual(t.strftime("m:%m d:%d y:%y"), "m:03 d:02 y:05")
self.assertEqual(t.strftime(""), "") # SF bug #761337
self.assertEqual(t.strftime('x'*1000), 'x'*1000) # SF bug #1556784
self.assertRaises(TypeError, t.strftime) # needs an arg
self.assertRaises(TypeError, t.strftime, "one", "two") # too many args
self.assertRaises(TypeError, t.strftime, 42) # arg wrong type
# test that unicode input is allowed (issue 2782)
self.assertEqual(t.strftime(u"%m"), "03")
# A naive object replaces %z and %Z w/ empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
#make sure that invalid format specifiers are handled correctly
#self.assertRaises(ValueError, t.strftime, "%e")
#self.assertRaises(ValueError, t.strftime, "%")
#self.assertRaises(ValueError, t.strftime, "%#")
#oh well, some systems just ignore those invalid ones.
#at least, excercise them to make sure that no crashes
#are generated
for f in ["%e", "%", "%#"]:
try:
t.strftime(f)
except ValueError:
pass
#check that this standard extension works
t.strftime("%f")
def test_format(self):
dt = self.theclass(2007, 9, 10)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_extreme_timedelta(self):
big = self.theclass.max - self.theclass.min
# 3652058 days, 23 hours, 59 minutes, 59 seconds, 999999 microseconds
n = (big.days*24*3600 + big.seconds)*1000000 + big.microseconds
# n == 315537897599999999 ~= 2**58.13
justasbig = timedelta(0, 0, n)
self.assertEqual(big, justasbig)
self.assertEqual(self.theclass.min + big, self.theclass.max)
self.assertEqual(self.theclass.max - big, self.theclass.min)
def test_timetuple(self):
for i in range(7):
# January 2, 1956 is a Monday (0)
d = self.theclass(1956, 1, 2+i)
t = d.timetuple()
self.assertEqual(t, (1956, 1, 2+i, 0, 0, 0, i, 2+i, -1))
# February 1, 1956 is a Wednesday (2)
d = self.theclass(1956, 2, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 2, 1+i, 0, 0, 0, (2+i)%7, 32+i, -1))
# March 1, 1956 is a Thursday (3), and is the 31+29+1 = 61st day
# of the year.
d = self.theclass(1956, 3, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 3, 1+i, 0, 0, 0, (3+i)%7, 61+i, -1))
self.assertEqual(t.tm_year, 1956)
self.assertEqual(t.tm_mon, 3)
self.assertEqual(t.tm_mday, 1+i)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 0)
self.assertEqual(t.tm_sec, 0)
self.assertEqual(t.tm_wday, (3+i)%7)
self.assertEqual(t.tm_yday, 61+i)
self.assertEqual(t.tm_isdst, -1)
def test_pickling(self):
args = 6, 7, 23
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = self.theclass(2, 3, 4)
t2 = self.theclass(2, 3, 4)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = self.theclass(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_mixed_compare(self):
our = self.theclass(2000, 4, 5)
self.assertRaises(TypeError, cmp, our, 1)
self.assertRaises(TypeError, cmp, 1, our)
class AnotherDateTimeClass(object):
def __cmp__(self, other):
# Return "equal" so calling this can't be confused with
# compare-by-address (which never says "equal" for distinct
# objects).
return 0
__hash__ = None # Silence Py3k warning
# This still errors, because date and datetime comparison raise
# TypeError instead of NotImplemented when they don't know what to
# do, in order to stop comparison from falling back to the default
# compare-by-address.
their = AnotherDateTimeClass()
self.assertRaises(TypeError, cmp, our, their)
# Oops: The next stab raises TypeError in the C implementation,
# but not in the Python implementation of datetime. The difference
# is due to that the Python implementation defines __cmp__ but
# the C implementation defines tp_richcompare. This is more pain
# to fix than it's worth, so commenting out the test.
# self.assertEqual(cmp(their, our), 0)
# But date and datetime comparison return NotImplemented instead if the
# other object has a timetuple attr. This gives the other object a
# chance to do the comparison.
class Comparable(AnotherDateTimeClass):
def timetuple(self):
return ()
their = Comparable()
self.assertEqual(cmp(our, their), 0)
self.assertEqual(cmp(their, our), 0)
self.assertTrue(our == their)
self.assertTrue(their == our)
def test_bool(self):
# All dates are considered true.
self.assertTrue(self.theclass.min)
self.assertTrue(self.theclass.max)
def test_strftime_out_of_range(self):
# For nasty technical reasons, we can't handle years before 1900.
cls = self.theclass
self.assertEqual(cls(1900, 1, 1).strftime("%Y"), "1900")
for y in 1, 49, 51, 99, 100, 1000, 1899:
self.assertRaises(ValueError, cls(y, 1, 1).strftime, "%Y")
def test_replace(self):
cls = self.theclass
args = [1, 2, 3]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_subclass_date(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month
args = 2003, 4, 14
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month - 7)
def test_pickling_subclass_date(self):
args = 6, 7, 23
orig = SubclassDate(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_backdoor_resistance(self):
# For fast unpickling, the constructor accepts a pickle string.
# This is a low-overhead backdoor. A user can (by intent or
# mistake) pass a string directly, which (if it's the right length)
# will get treated like a pickle, and bypass the normal sanity
# checks in the constructor. This can create insane objects.
# The constructor doesn't want to burn the time to validate all
# fields, but does check the month field. This stops, e.g.,
# datetime.datetime('1995-03-25') from yielding an insane object.
base = '1995-03-25'
if not issubclass(self.theclass, datetime):
base = base[:4]
for month_byte in '9', chr(0), chr(13), '\xff':
self.assertRaises(TypeError, self.theclass,
base[:2] + month_byte + base[3:])
for ord_byte in range(1, 13):
# This shouldn't blow up because of the month byte alone. If
# the implementation changes to do more-careful checking, it may
# blow up because other fields are insane.
self.theclass(base[:2] + chr(ord_byte) + base[3:])
#############################################################################
# datetime tests
class SubclassDatetime(datetime):
sub_var = 1
class TestDateTime(TestDate):
theclass = datetime
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1, 12, 0)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 0)
self.assertEqual(dt.second, 0)
self.assertEqual(dt.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
dt = self.theclass(2002, 3, 1, 12, 59, 59, 8000)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 59)
self.assertEqual(dt.second, 59)
self.assertEqual(dt.microsecond, 8000)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3, 4, 5, 6, 7),
self.theclass.now()):
# Verify dt -> string -> datetime identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond)
self.assertEqual(dt, dt2)
def test_isoformat(self):
t = self.theclass(2, 3, 2, 4, 5, 1, 123)
self.assertEqual(t.isoformat(), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat('T'), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat(' '), "0002-03-02 04:05:01.000123")
self.assertEqual(t.isoformat('\x00'), "0002-03-02\x0004:05:01.000123")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 04:05:01.000123")
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat('T'), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat(' '), "0002-03-02 00:00:00")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 00:00:00")
def test_format(self):
dt = self.theclass(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_more_ctime(self):
# Test fields that TestDate doesn't touch.
import time
t = self.theclass(2002, 3, 2, 18, 3, 5, 123)
self.assertEqual(t.ctime(), "Sat Mar 2 18:03:05 2002")
# Oops! The next line fails on Win2K under MSVC 6, so it's commented
# out. The difference is that t.ctime() produces " 2" for the day,
# but platform ctime() produces "02" for the day. According to
# C99, t.ctime() is correct here.
# self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
# So test a case where that difference doesn't matter.
t = self.theclass(2002, 3, 22, 18, 3, 5, 123)
self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
def test_tz_independent_comparing(self):
dt1 = self.theclass(2002, 3, 1, 9, 0, 0)
dt2 = self.theclass(2002, 3, 1, 10, 0, 0)
dt3 = self.theclass(2002, 3, 1, 9, 0, 0)
self.assertEqual(dt1, dt3)
self.assertTrue(dt2 > dt3)
# Make sure comparison doesn't forget microseconds, and isn't done
# via comparing a float timestamp (an IEEE double doesn't have enough
# precision to span microsecond resolution across years 1 thru 9999,
# so comparing via timestamp necessarily calls some distinct values
# equal).
dt1 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999998)
us = timedelta(microseconds=1)
dt2 = dt1 + us
self.assertEqual(dt2 - dt1, us)
self.assertTrue(dt1 < dt2)
def test_strftime_with_bad_tzname_replace(self):
# verify ok if tzinfo.tzname().replace() returns a non-string
class MyTzInfo(FixedOffset):
def tzname(self, dt):
class MyStr(str):
def replace(self, *args):
return None
return MyStr('name')
t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name'))
self.assertRaises(TypeError, t.strftime, '%Z')
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
# bad hours
self.theclass(2000, 1, 31, 0) # no exception
self.theclass(2000, 1, 31, 23) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 24)
# bad minutes
self.theclass(2000, 1, 31, 23, 0) # no exception
self.theclass(2000, 1, 31, 23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 60)
# bad seconds
self.theclass(2000, 1, 31, 23, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 60)
# bad microseconds
self.theclass(2000, 1, 31, 23, 59, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59,
1000000)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31, 23, 30, 17)
e = self.theclass(2000, 12, 31, 23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1, 0, 5, 17)
e = self.theclass(2001, 1, 1, 0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
a = self.theclass(2002, 3, 2, 17, 6)
millisec = timedelta(0, 0, 1000)
hour = timedelta(0, 3600)
day = timedelta(1)
week = timedelta(7)
self.assertEqual(a + hour, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(hour + a, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(a + 10*hour, self.theclass(2002, 3, 3, 3, 6))
self.assertEqual(a - hour, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(-hour + a, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(a - hour, a + -hour)
self.assertEqual(a - 20*hour, self.theclass(2002, 3, 1, 21, 6))
self.assertEqual(a + day, self.theclass(2002, 3, 3, 17, 6))
self.assertEqual(a - day, self.theclass(2002, 3, 1, 17, 6))
self.assertEqual(a + week, self.theclass(2002, 3, 9, 17, 6))
self.assertEqual(a - week, self.theclass(2002, 2, 23, 17, 6))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1, 17, 6))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3, 17, 6))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a + hour) - a, hour)
self.assertEqual((a + millisec) - a, millisec)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual((a - hour) - a, -hour)
self.assertEqual((a - millisec) - a, -millisec)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a + hour), -hour)
self.assertEqual(a - (a + millisec), -millisec)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
self.assertEqual(a - (a - hour), hour)
self.assertEqual(a - (a - millisec), millisec)
self.assertEqual(a + (week + day + hour + millisec),
self.theclass(2002, 3, 10, 18, 6, 0, 1000))
self.assertEqual(a + (week + day + hour + millisec),
(((a + week) + day) + hour) + millisec)
self.assertEqual(a - (week + day + hour + millisec),
self.theclass(2002, 2, 22, 16, 5, 59, 999000))
self.assertEqual(a - (week + day + hour + millisec),
(((a - week) - day) - hour) - millisec)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - datetime is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing datetime and (delta or datetime) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# datetime + datetime is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_pickling(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_pickling(self):
a = self.theclass(2003, 2, 7, 16, 48, 37, 444116)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(a, proto)
b = pickle.loads(s)
self.assertEqual(b.year, 2003)
self.assertEqual(b.month, 2)
self.assertEqual(b.day, 7)
def test_pickling_subclass_datetime(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = SubclassDatetime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_compare(self):
# The test_compare() inherited from TestDate covers the error cases.
# We just want to test lexicographic ordering on the members datetime
# has that date lacks.
args = [2000, 11, 29, 20, 58, 16, 999998]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
# A helper for timestamp constructor tests.
def verify_field_equality(self, expected, got):
self.assertEqual(expected.tm_year, got.year)
self.assertEqual(expected.tm_mon, got.month)
self.assertEqual(expected.tm_mday, got.day)
self.assertEqual(expected.tm_hour, got.hour)
self.assertEqual(expected.tm_min, got.minute)
self.assertEqual(expected.tm_sec, got.second)
def test_fromtimestamp(self):
import time
ts = time.time()
expected = time.localtime(ts)
got = self.theclass.fromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_utcfromtimestamp(self):
import time
ts = time.time()
expected = time.gmtime(ts)
got = self.theclass.utcfromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_microsecond_rounding(self):
# Test whether fromtimestamp "rounds up" floats that are less
# than one microsecond smaller than an integer.
self.assertEqual(self.theclass.fromtimestamp(0.9999999),
self.theclass.fromtimestamp(1))
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_insane_utcfromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.utcfromtimestamp,
insane)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_fromtimestamp(self):
# The result is tz-dependent; at least test that this doesn't
# fail (like it did before bug 1646728 was fixed).
self.theclass.fromtimestamp(-1.05)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_utcfromtimestamp(self):
d = self.theclass.utcfromtimestamp(-1.05)
self.assertEqual(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000))
def test_utcnow(self):
import time
# Call it a success if utcnow() and utcfromtimestamp() are within
# a second of each other.
tolerance = timedelta(seconds=1)
for dummy in range(3):
from_now = self.theclass.utcnow()
from_timestamp = self.theclass.utcfromtimestamp(time.time())
if abs(from_timestamp - from_now) <= tolerance:
break
# Else try again a few times.
self.assertLessEqual(abs(from_timestamp - from_now), tolerance)
def test_strptime(self):
import _strptime
string = '2004-12-01 13:02:47.197'
format = '%Y-%m-%d %H:%M:%S.%f'
result, frac = _strptime._strptime(string, format)
expected = self.theclass(*(result[0:6]+(frac,)))
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
def test_more_timetuple(self):
# This tests fields beyond those tested by the TestDate.test_timetuple.
t = self.theclass(2004, 12, 31, 6, 22, 33)
self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1))
self.assertEqual(t.timetuple(),
(t.year, t.month, t.day,
t.hour, t.minute, t.second,
t.weekday(),
t.toordinal() - date(t.year, 1, 1).toordinal() + 1,
-1))
tt = t.timetuple()
self.assertEqual(tt.tm_year, t.year)
self.assertEqual(tt.tm_mon, t.month)
self.assertEqual(tt.tm_mday, t.day)
self.assertEqual(tt.tm_hour, t.hour)
self.assertEqual(tt.tm_min, t.minute)
self.assertEqual(tt.tm_sec, t.second)
self.assertEqual(tt.tm_wday, t.weekday())
self.assertEqual(tt.tm_yday, t.toordinal() -
date(t.year, 1, 1).toordinal() + 1)
self.assertEqual(tt.tm_isdst, -1)
def test_more_strftime(self):
# This tests fields beyond those tested by the TestDate.test_strftime.
t = self.theclass(2004, 12, 31, 6, 22, 33, 47)
self.assertEqual(t.strftime("%m %d %y %f %S %M %H %j"),
"12 31 04 000047 33 22 06 366")
def test_extract(self):
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
def test_combine(self):
d = date(2002, 3, 4)
t = time(18, 45, 3, 1234)
expected = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
combine = self.theclass.combine
dt = combine(d, t)
self.assertEqual(dt, expected)
dt = combine(time=t, date=d)
self.assertEqual(dt, expected)
self.assertEqual(d, dt.date())
self.assertEqual(t, dt.time())
self.assertEqual(dt, combine(dt.date(), dt.time()))
self.assertRaises(TypeError, combine) # need an arg
self.assertRaises(TypeError, combine, d) # need two args
self.assertRaises(TypeError, combine, t, d) # args reversed
self.assertRaises(TypeError, combine, d, t, 1) # too many args
self.assertRaises(TypeError, combine, "date", "time") # wrong types
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4, 5, 6, 7]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_astimezone(self):
# Pretty boring! The TZ test is more interesting here. astimezone()
# simply can't be applied to a naive object.
dt = self.theclass.now()
f = FixedOffset(44, "")
self.assertRaises(TypeError, dt.astimezone) # not enough args
self.assertRaises(TypeError, dt.astimezone, f, f) # too many args
self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type
self.assertRaises(ValueError, dt.astimezone, f) # naive
self.assertRaises(ValueError, dt.astimezone, tz=f) # naive
class Bogus(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return timedelta(0)
bog = Bogus()
self.assertRaises(ValueError, dt.astimezone, bog) # naive
class AlsoBogus(tzinfo):
def utcoffset(self, dt): return timedelta(0)
def dst(self, dt): return None
alsobog = AlsoBogus()
self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive
def test_subclass_datetime(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month + self.second
args = 2003, 4, 14, 12, 13, 41
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month +
dt1.second - 7)
class SubclassTime(time):
sub_var = 1
class TestTime(HarmlessMixedComparison, unittest.TestCase):
theclass = time
def test_basic_attributes(self):
t = self.theclass(12, 0)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
t = self.theclass(12, 59, 59, 8000)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 59)
self.assertEqual(t.second, 59)
self.assertEqual(t.microsecond, 8000)
def test_roundtrip(self):
t = self.theclass(1, 2, 3, 4)
# Verify t -> string -> time identity.
s = repr(t)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
t2 = eval(s)
self.assertEqual(t, t2)
# Verify identity via reconstructing from pieces.
t2 = self.theclass(t.hour, t.minute, t.second,
t.microsecond)
self.assertEqual(t, t2)
def test_comparing(self):
args = [1, 2, 3, 4]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_bad_constructor_arguments(self):
# bad hours
self.theclass(0, 0) # no exception
self.theclass(23, 0) # no exception
self.assertRaises(ValueError, self.theclass, -1, 0)
self.assertRaises(ValueError, self.theclass, 24, 0)
# bad minutes
self.theclass(23, 0) # no exception
self.theclass(23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, -1)
self.assertRaises(ValueError, self.theclass, 23, 60)
# bad seconds
self.theclass(23, 59, 0) # no exception
self.theclass(23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 60)
# bad microseconds
self.theclass(23, 59, 59, 0) # no exception
self.theclass(23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 59, 1000000)
def test_hash_equality(self):
d = self.theclass(23, 30, 17)
e = self.theclass(23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(0, 5, 17)
e = self.theclass(0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_isoformat(self):
t = self.theclass(4, 5, 1, 123)
self.assertEqual(t.isoformat(), "04:05:01.000123")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass()
self.assertEqual(t.isoformat(), "00:00:00")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1)
self.assertEqual(t.isoformat(), "00:00:00.000001")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10)
self.assertEqual(t.isoformat(), "00:00:00.000010")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100)
self.assertEqual(t.isoformat(), "00:00:00.000100")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1000)
self.assertEqual(t.isoformat(), "00:00:00.001000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10000)
self.assertEqual(t.isoformat(), "00:00:00.010000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100000)
self.assertEqual(t.isoformat(), "00:00:00.100000")
self.assertEqual(t.isoformat(), str(t))
def test_1653736(self):
# verify it doesn't accept extra keyword arguments
t = self.theclass(second=1)
self.assertRaises(TypeError, t.isoformat, foo=3)
def test_strftime(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.strftime('%H %M %S %f'), "01 02 03 000004")
# A naive object replaces %z and %Z with empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
def test_format(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.__format__(''), str(t))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(1, 2, 3, 4)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(1, 2, 3, 4)
self.assertEqual(b.__format__(''), str(t))
for fmt in ['%H %M %S',
]:
self.assertEqual(t.__format__(fmt), t.strftime(fmt))
self.assertEqual(a.__format__(fmt), t.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_str(self):
self.assertEqual(str(self.theclass(1, 2, 3, 4)), "01:02:03.000004")
self.assertEqual(str(self.theclass(10, 2, 3, 4000)), "10:02:03.004000")
self.assertEqual(str(self.theclass(0, 2, 3, 400000)), "00:02:03.400000")
self.assertEqual(str(self.theclass(12, 2, 3, 0)), "12:02:03")
self.assertEqual(str(self.theclass(23, 15, 0, 0)), "23:15:00")
def test_repr(self):
name = 'datetime.' + self.theclass.__name__
self.assertEqual(repr(self.theclass(1, 2, 3, 4)),
"%s(1, 2, 3, 4)" % name)
self.assertEqual(repr(self.theclass(10, 2, 3, 4000)),
"%s(10, 2, 3, 4000)" % name)
self.assertEqual(repr(self.theclass(0, 2, 3, 400000)),
"%s(0, 2, 3, 400000)" % name)
self.assertEqual(repr(self.theclass(12, 2, 3, 0)),
"%s(12, 2, 3)" % name)
self.assertEqual(repr(self.theclass(23, 15, 0, 0)),
"%s(23, 15)" % name)
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_pickling(self):
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_pickling_subclass_time(self):
args = 20, 59, 16, 64**2
orig = SubclassTime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_bool(self):
cls = self.theclass
self.assertTrue(cls(1))
self.assertTrue(cls(0, 1))
self.assertTrue(cls(0, 0, 1))
self.assertTrue(cls(0, 0, 0, 1))
self.assertFalse(cls(0))
self.assertFalse(cls())
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_subclass_time(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.isoformat(), dt2.isoformat())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
def test_backdoor_resistance(self):
# see TestDate.test_backdoor_resistance().
base = '2:59.0'
for hour_byte in ' ', '9', chr(24), '\xff':
self.assertRaises(TypeError, self.theclass,
hour_byte + base[1:])
# A mixin for classes with a tzinfo= argument. Subclasses must define
# theclass as a class atribute, and theclass(1, 1, 1, tzinfo=whatever)
# must be legit (which is true for time and datetime).
class TZInfoBase:
def test_argument_passing(self):
cls = self.theclass
# A datetime passes itself on, a time passes None.
class introspective(tzinfo):
def tzname(self, dt): return dt and "real" or "none"
def utcoffset(self, dt):
return timedelta(minutes = dt and 42 or -42)
dst = utcoffset
obj = cls(1, 2, 3, tzinfo=introspective())
expected = cls is time and "none" or "real"
self.assertEqual(obj.tzname(), expected)
expected = timedelta(minutes=(cls is time and -42 or 42))
self.assertEqual(obj.utcoffset(), expected)
self.assertEqual(obj.dst(), expected)
def test_bad_tzinfo_classes(self):
cls = self.theclass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=12)
class NiceTry(object):
def __init__(self): pass
def utcoffset(self, dt): pass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=NiceTry)
class BetterTry(tzinfo):
def __init__(self): pass
def utcoffset(self, dt): pass
b = BetterTry()
t = cls(1, 1, 1, tzinfo=b)
self.assertIs(t.tzinfo, b)
def test_utc_offset_out_of_bounds(self):
class Edgy(tzinfo):
def __init__(self, offset):
self.offset = timedelta(minutes=offset)
def utcoffset(self, dt):
return self.offset
cls = self.theclass
for offset, legit in ((-1440, False),
(-1439, True),
(1439, True),
(1440, False)):
if cls is time:
t = cls(1, 2, 3, tzinfo=Edgy(offset))
elif cls is datetime:
t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset))
else:
assert 0, "impossible"
if legit:
aofs = abs(offset)
h, m = divmod(aofs, 60)
tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m)
if isinstance(t, datetime):
t = t.timetz()
self.assertEqual(str(t), "01:02:03" + tag)
else:
self.assertRaises(ValueError, str, t)
def test_tzinfo_classes(self):
cls = self.theclass
class C1(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return None
def tzname(self, dt): return None
for t in (cls(1, 1, 1),
cls(1, 1, 1, tzinfo=None),
cls(1, 1, 1, tzinfo=C1())):
self.assertIsNone(t.utcoffset())
self.assertIsNone(t.dst())
self.assertIsNone(t.tzname())
class C3(tzinfo):
def utcoffset(self, dt): return timedelta(minutes=-1439)
def dst(self, dt): return timedelta(minutes=1439)
def tzname(self, dt): return "aname"
t = cls(1, 1, 1, tzinfo=C3())
self.assertEqual(t.utcoffset(), timedelta(minutes=-1439))
self.assertEqual(t.dst(), timedelta(minutes=1439))
self.assertEqual(t.tzname(), "aname")
# Wrong types.
class C4(tzinfo):
def utcoffset(self, dt): return "aname"
def dst(self, dt): return 7
def tzname(self, dt): return 0
t = cls(1, 1, 1, tzinfo=C4())
self.assertRaises(TypeError, t.utcoffset)
self.assertRaises(TypeError, t.dst)
self.assertRaises(TypeError, t.tzname)
# Offset out of range.
class C6(tzinfo):
def utcoffset(self, dt): return timedelta(hours=-24)
def dst(self, dt): return timedelta(hours=24)
t = cls(1, 1, 1, tzinfo=C6())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
# Not a whole number of minutes.
class C7(tzinfo):
def utcoffset(self, dt): return timedelta(seconds=61)
def dst(self, dt): return timedelta(microseconds=-81)
t = cls(1, 1, 1, tzinfo=C7())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
def test_aware_compare(self):
cls = self.theclass
# Ensure that utcoffset() gets ignored if the comparands have
# the same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
expected = cmp(x.minute, y.minute)
self.assertEqual(got, expected)
# However, if they're different members, uctoffset is not ignored.
# Note that a time can't actually have an operand-depedent offset,
# though (and time.utcoffset() passes None to tzinfo.utcoffset()),
# so skip this test for time.
if cls is not time:
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = 0
elif x is y is d2:
expected = 0
elif x is d2:
expected = -1
else:
assert y is d2
expected = 1
self.assertEqual(got, expected)
# Testing time objects with a non-None tzinfo.
class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
theclass = time
def test_empty(self):
t = self.theclass()
self.assertEqual(t.hour, 0)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
self.assertIsNone(t.tzinfo)
def test_zones(self):
est = FixedOffset(-300, "EST", 1)
utc = FixedOffset(0, "UTC", -2)
met = FixedOffset(60, "MET", 3)
t1 = time( 7, 47, tzinfo=est)
t2 = time(12, 47, tzinfo=utc)
t3 = time(13, 47, tzinfo=met)
t4 = time(microsecond=40)
t5 = time(microsecond=40, tzinfo=utc)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertIsNone(t4.tzinfo)
self.assertEqual(t5.tzinfo, utc)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertIsNone(t4.utcoffset())
self.assertRaises(TypeError, t1.utcoffset, "no args")
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertIsNone(t4.tzname())
self.assertRaises(TypeError, t1.tzname, "no args")
self.assertEqual(t1.dst(), timedelta(minutes=1))
self.assertEqual(t2.dst(), timedelta(minutes=-2))
self.assertEqual(t3.dst(), timedelta(minutes=3))
self.assertIsNone(t4.dst())
self.assertRaises(TypeError, t1.dst, "no args")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertRaises(TypeError, lambda: t4 == t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive
self.assertEqual(str(t1), "07:47:00-05:00")
self.assertEqual(str(t2), "12:47:00+00:00")
self.assertEqual(str(t3), "13:47:00+01:00")
self.assertEqual(str(t4), "00:00:00.000040")
self.assertEqual(str(t5), "00:00:00.000040+00:00")
self.assertEqual(t1.isoformat(), "07:47:00-05:00")
self.assertEqual(t2.isoformat(), "12:47:00+00:00")
self.assertEqual(t3.isoformat(), "13:47:00+01:00")
self.assertEqual(t4.isoformat(), "00:00:00.000040")
self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00")
d = 'datetime.time'
self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)")
self.assertEqual(repr(t4), d + "(0, 0, 0, 40)")
self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)")
self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"),
"07:47:00 %Z=EST %z=-0500")
self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000")
self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100")
yuck = FixedOffset(-1439, "%z %Z %%z%%Z")
t1 = time(23, 59, tzinfo=yuck)
self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"),
"23:59 %Z='%z %Z %%z%%Z' %z='-2359'")
# Check that an invalid tzname result raises an exception.
class Badtzname(tzinfo):
def tzname(self, dt): return 42
t = time(2, 3, 4, tzinfo=Badtzname())
self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04")
self.assertRaises(TypeError, t.strftime, "%Z")
def test_hash_edge_cases(self):
# Offsets that overflow a basic time.
t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, ""))
t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, ""))
self.assertEqual(hash(t1), hash(t2))
t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, ""))
t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, ""))
self.assertEqual(hash(t1), hash(t2))
def test_pickling(self):
# Try one without a tzinfo.
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(5, 6, 7, tzinfo=tinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_more_bool(self):
# Test cases with non-None tzinfo.
cls = self.theclass
t = cls(0, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(300, ""))
self.assertFalse(t)
t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertFalse(t)
# Mostly ensuring this doesn't overflow internally.
t = cls(0, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(t)
# But this should yield a value error -- the utcoffset is bogus.
t = cls(0, tzinfo=FixedOffset(24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
# Likewise.
t = cls(0, tzinfo=FixedOffset(-24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertIsNone(base2.tzinfo)
self.assertIsNone(base2.tzname())
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertIs(base.tzinfo, base3.tzinfo)
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_mixed_compare(self):
t1 = time(1, 2, 3)
t2 = time(1, 2, 3)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In time w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_timetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
# Testing datetime objects with a non-None tzinfo.
class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
theclass = datetime
def test_trivial(self):
dt = self.theclass(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(dt.year, 1)
self.assertEqual(dt.month, 2)
self.assertEqual(dt.day, 3)
self.assertEqual(dt.hour, 4)
self.assertEqual(dt.minute, 5)
self.assertEqual(dt.second, 6)
self.assertEqual(dt.microsecond, 7)
self.assertEqual(dt.tzinfo, None)
def test_even_more_compare(self):
# The test_compare() and test_more_compare() inherited from TestDate
# and TestDateTime covered non-tzinfo cases.
# Smallest possible after UTC adjustment.
t1 = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
# Largest possible after UTC adjustment.
t2 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
# Make sure those compare correctly, and w/o overflow.
self.assertTrue(t1 < t2)
self.assertTrue(t1 != t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 == t1)
self.assertTrue(t2 == t2)
# Equal afer adjustment.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""))
t2 = self.theclass(2, 1, 1, 3, 13, tzinfo=FixedOffset(3*60+13+2, ""))
self.assertEqual(t1, t2)
# Change t1 not to subtract a minute, and t1 should be larger.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(0, ""))
self.assertTrue(t1 > t2)
# Change t1 to subtract 2 minutes, and t1 should be smaller.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(2, ""))
self.assertTrue(t1 < t2)
# Back to the original t1, but make seconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
second=1)
self.assertTrue(t1 > t2)
# Likewise, but make microseconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
microsecond=1)
self.assertTrue(t1 > t2)
# Make t2 naive and it should fail.
t2 = self.theclass.min
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# It's also naive if it has tzinfo but tzinfo.utcoffset() is None.
class Naive(tzinfo):
def utcoffset(self, dt): return None
t2 = self.theclass(5, 6, 7, tzinfo=Naive())
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# OTOH, it's OK to compare two of these mixing the two ways of being
# naive.
t1 = self.theclass(5, 6, 7)
self.assertEqual(t1, t2)
# Try a bogus uctoffset.
class Bogus(tzinfo):
def utcoffset(self, dt):
return timedelta(minutes=1440) # out of bounds
t1 = self.theclass(2, 2, 2, tzinfo=Bogus())
t2 = self.theclass(2, 2, 2, tzinfo=FixedOffset(0, ""))
self.assertRaises(ValueError, lambda: t1 == t2)
def test_pickling(self):
# Try one without a tzinfo.
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(*args, **{'tzinfo': tinfo})
derived = self.theclass(1, 1, 1, tzinfo=FixedOffset(0, "", 0))
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_extreme_hashes(self):
# If an attempt is made to hash these via subtracting the offset
# then hashing a datetime object, OverflowError results. The
# Python implementation used to blow up here.
t = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
hash(t)
t = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
hash(t)
# OTOH, an OOB offset should blow up.
t = self.theclass(5, 5, 5, tzinfo=FixedOffset(-1440, ""))
self.assertRaises(ValueError, hash, t)
def test_zones(self):
est = FixedOffset(-300, "EST")
utc = FixedOffset(0, "UTC")
met = FixedOffset(60, "MET")
t1 = datetime(2002, 3, 19, 7, 47, tzinfo=est)
t2 = datetime(2002, 3, 19, 12, 47, tzinfo=utc)
t3 = datetime(2002, 3, 19, 13, 47, tzinfo=met)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertEqual(str(t1), "2002-03-19 07:47:00-05:00")
self.assertEqual(str(t2), "2002-03-19 12:47:00+00:00")
self.assertEqual(str(t3), "2002-03-19 13:47:00+01:00")
d = 'datetime.datetime(2002, 3, 19, '
self.assertEqual(repr(t1), d + "7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "13, 47, tzinfo=met)")
def test_combine(self):
met = FixedOffset(60, "MET")
d = date(2002, 3, 4)
tz = time(18, 45, 3, 1234, tzinfo=met)
dt = datetime.combine(d, tz)
self.assertEqual(dt, datetime(2002, 3, 4, 18, 45, 3, 1234,
tzinfo=met))
def test_extract(self):
met = FixedOffset(60, "MET")
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
self.assertEqual(dt.timetz(), time(18, 45, 3, 1234, tzinfo=met))
def test_tz_aware_arithmetic(self):
import random
now = self.theclass.now()
tz55 = FixedOffset(-330, "west 5:30")
timeaware = now.time().replace(tzinfo=tz55)
nowaware = self.theclass.combine(now.date(), timeaware)
self.assertIs(nowaware.tzinfo, tz55)
self.assertEqual(nowaware.timetz(), timeaware)
# Can't mix aware and non-aware.
self.assertRaises(TypeError, lambda: now - nowaware)
self.assertRaises(TypeError, lambda: nowaware - now)
# And adding datetime's doesn't make sense, aware or not.
self.assertRaises(TypeError, lambda: now + nowaware)
self.assertRaises(TypeError, lambda: nowaware + now)
self.assertRaises(TypeError, lambda: nowaware + nowaware)
# Subtracting should yield 0.
self.assertEqual(now - now, timedelta(0))
self.assertEqual(nowaware - nowaware, timedelta(0))
# Adding a delta should preserve tzinfo.
delta = timedelta(weeks=1, minutes=12, microseconds=5678)
nowawareplus = nowaware + delta
self.assertIs(nowaware.tzinfo, tz55)
nowawareplus2 = delta + nowaware
self.assertIs(nowawareplus2.tzinfo, tz55)
self.assertEqual(nowawareplus, nowawareplus2)
# that - delta should be what we started with, and that - what we
# started with should be delta.
diff = nowawareplus - delta
self.assertIs(diff.tzinfo, tz55)
self.assertEqual(nowaware, diff)
self.assertRaises(TypeError, lambda: delta - nowawareplus)
self.assertEqual(nowawareplus - nowaware, delta)
# Make up a random timezone.
tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone")
# Attach it to nowawareplus.
nowawareplus = nowawareplus.replace(tzinfo=tzr)
self.assertIs(nowawareplus.tzinfo, tzr)
# Make sure the difference takes the timezone adjustments into account.
got = nowaware - nowawareplus
# Expected: (nowaware base - nowaware offset) -
# (nowawareplus base - nowawareplus offset) =
# (nowaware base - nowawareplus base) +
# (nowawareplus offset - nowaware offset) =
# -delta + nowawareplus offset - nowaware offset
expected = nowawareplus.utcoffset() - nowaware.utcoffset() - delta
self.assertEqual(got, expected)
# Try max possible difference.
min = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "min"))
max = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, "max"))
maxdiff = max - min
self.assertEqual(maxdiff, self.theclass.max - self.theclass.min +
timedelta(minutes=2*1439))
def test_tzinfo_now(self):
meth = self.theclass.now
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(off42)
again = meth(tz=off42)
self.assertIs(another.tzinfo, again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, 16)
self.assertRaises(TypeError, meth, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, off42, off42)
# We don't know which time zone we're in, and don't have a tzinfo
# class to represent it, so seeing whether a tz argument actually
# does a conversion is tricky.
weirdtz = FixedOffset(timedelta(hours=15, minutes=58), "weirdtz", 0)
utc = FixedOffset(0, "utc", 0)
for dummy in range(3):
now = datetime.now(weirdtz)
self.assertIs(now.tzinfo, weirdtz)
utcnow = datetime.utcnow().replace(tzinfo=utc)
now2 = utcnow.astimezone(weirdtz)
if abs(now - now2) < timedelta(seconds=30):
break
# Else the code is broken, or more than 30 seconds passed between
# calls; assuming the latter, just try again.
else:
# Three strikes and we're out.
self.fail("utcnow(), now(tz), or astimezone() may be broken")
def test_tzinfo_fromtimestamp(self):
import time
meth = self.theclass.fromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(ts, off42)
again = meth(ts, tz=off42)
self.assertIs(another.tzinfo, again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, ts, 16)
self.assertRaises(TypeError, meth, ts, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, ts, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, ts, off42, off42)
# Too few args.
self.assertRaises(TypeError, meth)
# Try to make sure tz= actually does some conversion.
timestamp = 1000000000
utcdatetime = datetime.utcfromtimestamp(timestamp)
# In POSIX (epoch 1970), that's 2001-09-09 01:46:40 UTC, give or take.
# But on some flavor of Mac, it's nowhere near that. So we can't have
# any idea here what time that actually is, we can only test that
# relative changes match.
utcoffset = timedelta(hours=-15, minutes=39) # arbitrary, but not zero
tz = FixedOffset(utcoffset, "tz", 0)
expected = utcdatetime + utcoffset
got = datetime.fromtimestamp(timestamp, tz)
self.assertEqual(expected, got.replace(tzinfo=None))
def test_tzinfo_utcnow(self):
meth = self.theclass.utcnow
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword; for whatever reason,
# utcnow() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, off42)
self.assertRaises(TypeError, meth, tzinfo=off42)
def test_tzinfo_utcfromtimestamp(self):
import time
meth = self.theclass.utcfromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword; for whatever reason,
# utcfromtimestamp() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, ts, off42)
self.assertRaises(TypeError, meth, ts, tzinfo=off42)
def test_tzinfo_timetuple(self):
# TestDateTime tested most of this. datetime adds a twist to the
# DST flag.
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
for dstvalue, flag in (-33, 1), (33, 1), (0, 0), (None, -1):
d = cls(1, 1, 1, 10, 20, 30, 40, tzinfo=DST(dstvalue))
t = d.timetuple()
self.assertEqual(1, t.tm_year)
self.assertEqual(1, t.tm_mon)
self.assertEqual(1, t.tm_mday)
self.assertEqual(10, t.tm_hour)
self.assertEqual(20, t.tm_min)
self.assertEqual(30, t.tm_sec)
self.assertEqual(0, t.tm_wday)
self.assertEqual(1, t.tm_yday)
self.assertEqual(flag, t.tm_isdst)
# dst() returns wrong type.
self.assertRaises(TypeError, cls(1, 1, 1, tzinfo=DST("x")).timetuple)
# dst() at the edge.
self.assertEqual(cls(1,1,1, tzinfo=DST(1439)).timetuple().tm_isdst, 1)
self.assertEqual(cls(1,1,1, tzinfo=DST(-1439)).timetuple().tm_isdst, 1)
# dst() out of range.
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(1440)).timetuple)
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(-1440)).timetuple)
def test_utctimetuple(self):
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
# This can't work: DST didn't implement utcoffset.
self.assertRaises(NotImplementedError,
cls(1, 1, 1, tzinfo=DST(0)).utcoffset)
class UOFS(DST):
def __init__(self, uofs, dofs=None):
DST.__init__(self, dofs)
self.uofs = timedelta(minutes=uofs)
def utcoffset(self, dt):
return self.uofs
# Ensure tm_isdst is 0 regardless of what dst() says: DST is never
# in effect for a UTC time.
for dstvalue in -33, 33, 0, None:
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=UOFS(-53, dstvalue))
t = d.utctimetuple()
self.assertEqual(d.year, t.tm_year)
self.assertEqual(d.month, t.tm_mon)
self.assertEqual(d.day, t.tm_mday)
self.assertEqual(11, t.tm_hour) # 20mm + 53mm = 1hn + 13mm
self.assertEqual(13, t.tm_min)
self.assertEqual(d.second, t.tm_sec)
self.assertEqual(d.weekday(), t.tm_wday)
self.assertEqual(d.toordinal() - date(1, 1, 1).toordinal() + 1,
t.tm_yday)
self.assertEqual(0, t.tm_isdst)
# At the edges, UTC adjustment can normalize into years out-of-range
# for a datetime object. Ensure that a correct timetuple is
# created anyway.
tiny = cls(MINYEAR, 1, 1, 0, 0, 37, tzinfo=UOFS(1439))
# That goes back 1 minute less than a full day.
t = tiny.utctimetuple()
self.assertEqual(t.tm_year, MINYEAR-1)
self.assertEqual(t.tm_mon, 12)
self.assertEqual(t.tm_mday, 31)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 1)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 366) # "year 0" is a leap year
self.assertEqual(t.tm_isdst, 0)
huge = cls(MAXYEAR, 12, 31, 23, 59, 37, 999999, tzinfo=UOFS(-1439))
# That goes forward 1 minute less than a full day.
t = huge.utctimetuple()
self.assertEqual(t.tm_year, MAXYEAR+1)
self.assertEqual(t.tm_mon, 1)
self.assertEqual(t.tm_mday, 1)
self.assertEqual(t.tm_hour, 23)
self.assertEqual(t.tm_min, 58)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 1)
self.assertEqual(t.tm_isdst, 0)
def test_tzinfo_isoformat(self):
zero = FixedOffset(0, "+00:00")
plus = FixedOffset(220, "+03:40")
minus = FixedOffset(-231, "-03:51")
unknown = FixedOffset(None, "")
cls = self.theclass
datestr = '0001-02-03'
for ofs in None, zero, plus, minus, unknown:
for us in 0, 987001:
d = cls(1, 2, 3, 4, 5, 59, us, tzinfo=ofs)
timestr = '04:05:59' + (us and '.987001' or '')
ofsstr = ofs is not None and d.tzname() or ''
tailstr = timestr + ofsstr
iso = d.isoformat()
self.assertEqual(iso, datestr + 'T' + tailstr)
self.assertEqual(iso, d.isoformat('T'))
self.assertEqual(d.isoformat('k'), datestr + 'k' + tailstr)
self.assertEqual(str(d), datestr + ' ' + tailstr)
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, 5, 6, 7, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertIsNone(base2.tzinfo)
self.assertIsNone(base2.tzname())
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertIs(base.tzinfo, base3.tzinfo)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_more_astimezone(self):
# The inherited test_astimezone covered some trivial and error cases.
fnone = FixedOffset(None, "None")
f44m = FixedOffset(44, "44")
fm5h = FixedOffset(-timedelta(hours=5), "m300")
dt = self.theclass.now(tz=f44m)
self.assertIs(dt.tzinfo, f44m)
# Replacing with degenerate tzinfo raises an exception.
self.assertRaises(ValueError, dt.astimezone, fnone)
# Ditto with None tz.
self.assertRaises(TypeError, dt.astimezone, None)
# Replacing with same tzinfo makes no change.
x = dt.astimezone(dt.tzinfo)
self.assertIs(x.tzinfo, f44m)
self.assertEqual(x.date(), dt.date())
self.assertEqual(x.time(), dt.time())
# Replacing with different tzinfo does adjust.
got = dt.astimezone(fm5h)
self.assertIs(got.tzinfo, fm5h)
self.assertEqual(got.utcoffset(), timedelta(hours=-5))
expected = dt - dt.utcoffset() # in effect, convert to UTC
expected += fm5h.utcoffset(dt) # and from there to local time
expected = expected.replace(tzinfo=fm5h) # and attach new tzinfo
self.assertEqual(got.date(), expected.date())
self.assertEqual(got.time(), expected.time())
self.assertEqual(got.timetz(), expected.timetz())
self.assertIs(got.tzinfo, expected.tzinfo)
self.assertEqual(got, expected)
def test_aware_subtract(self):
cls = self.theclass
# Ensure that utcoffset() is ignored when the operands have the
# same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, 11, 12, 13, 14, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
expected = timedelta(minutes=x.minute - y.minute)
self.assertEqual(got, expected)
# OTOH, if the tzinfo members are distinct, utcoffsets aren't
# ignored.
base = cls(8, 9, 10, 11, 12, 13, 14)
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = timedelta(0)
elif x is y is d2:
expected = timedelta(0)
elif x is d2:
expected = timedelta(minutes=(11-59)-0)
else:
assert y is d2
expected = timedelta(minutes=0-(11-59))
self.assertEqual(got, expected)
def test_mixed_compare(self):
t1 = datetime(1, 2, 3, 4, 5, 6, 7)
t2 = datetime(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In datetime w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_datetimetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.year
args = 2002, 12, 31, 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.year - 7)
# Pain to set up DST-aware tzinfo classes.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct,
# which is the first Sunday on or after Oct 25. Because we view 1:MM as
# being standard time on that day, there is no spelling in local time of
# the last hour of DST (that's 1:MM DST, but 1:MM is taken as standard time).
DSTEND = datetime(1, 10, 25, 1)
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception instead may be sensible here, in one or more of
# the cases.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
assert start.weekday() == 6 and start.month == 4 and start.day <= 7
# Find last Sunday in October.
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
assert end.weekday() == 6 and end.month == 10 and end.day >= 25
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
utc_real = FixedOffset(0, "UTC", 0)
# For better test coverage, we want another flavor of UTC that's west of
# the Eastern and Pacific timezones.
utc_fake = FixedOffset(-12*60, "UTCfake", 0)
class TestTimezoneConversions(unittest.TestCase):
# The DST switch times for 2002, in std time.
dston = datetime(2002, 4, 7, 2)
dstoff = datetime(2002, 10, 27, 1)
theclass = datetime
# Check a time that's inside DST.
def checkinside(self, dt, tz, utc, dston, dstoff):
self.assertEqual(dt.dst(), HOUR)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
# Conversion to UTC and back isn't always an identity here,
# because there are redundant spellings (in local time) of
# UTC time when DST begins: the clock jumps from 1:59:59
# to 3:00:00, and a local time of 2:MM:SS doesn't really
# make sense then. The classes above treat 2:MM:SS as
# daylight time then (it's "after 2am"), really an alias
# for 1:MM:SS standard time. The latter form is what
# conversion back from UTC produces.
if dt.date() == dston.date() and dt.hour == 2:
# We're in the redundant hour, and coming back from
# UTC gives the 1:MM:SS standard-time spelling.
self.assertEqual(there_and_back + HOUR, dt)
# Although during was considered to be in daylight
# time, there_and_back is not.
self.assertEqual(there_and_back.dst(), ZERO)
# They're the same times in UTC.
self.assertEqual(there_and_back.astimezone(utc),
dt.astimezone(utc))
else:
# We're not in the redundant hour.
self.assertEqual(dt, there_and_back)
# Because we have a redundant spelling when DST begins, there is
# (unfortunately) an hour when DST ends that can't be spelled at all in
# local time. When DST ends, the clock jumps from 1:59 back to 1:00
# again. The hour 1:MM DST has no spelling then: 1:MM is taken to be
# standard time. 1:MM DST == 0:MM EST, but 0:MM is taken to be
# daylight time. The hour 1:MM daylight == 0:MM standard can't be
# expressed in local time. Nevertheless, we want conversion back
# from UTC to mimic the local clock's "repeat an hour" behavior.
nexthour_utc = asutc + HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
if dt.date() == dstoff.date() and dt.hour == 0:
# We're in the hour before the last DST hour. The last DST hour
# is ineffable. We want the conversion back to repeat 1:MM.
self.assertEqual(nexthour_tz, dt.replace(hour=1))
nexthour_utc += HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
self.assertEqual(nexthour_tz, dt.replace(hour=1))
else:
self.assertEqual(nexthour_tz - dt, HOUR)
# Check a time that's outside DST.
def checkoutside(self, dt, tz, utc):
self.assertEqual(dt.dst(), ZERO)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
# Converting to UTC and back is an identity too.
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
self.assertEqual(dt, there_and_back)
def convert_between_tz_and_utc(self, tz, utc):
dston = self.dston.replace(tzinfo=tz)
# Because 1:MM on the day DST ends is taken as being standard time,
# there is no spelling in tz for the last hour of daylight time.
# For purposes of the test, the last hour of DST is 0:MM, which is
# taken as being daylight time (and 1:MM is taken as being standard
# time).
dstoff = self.dstoff.replace(tzinfo=tz)
for delta in (timedelta(weeks=13),
DAY,
HOUR,
timedelta(minutes=1),
timedelta(microseconds=1)):
self.checkinside(dston, tz, utc, dston, dstoff)
for during in dston + delta, dstoff - delta:
self.checkinside(during, tz, utc, dston, dstoff)
self.checkoutside(dstoff, tz, utc)
for outside in dston - delta, dstoff + delta:
self.checkoutside(outside, tz, utc)
def test_easy(self):
# Despite the name of this test, the endcases are excruciating.
self.convert_between_tz_and_utc(Eastern, utc_real)
self.convert_between_tz_and_utc(Pacific, utc_real)
self.convert_between_tz_and_utc(Eastern, utc_fake)
self.convert_between_tz_and_utc(Pacific, utc_fake)
# The next is really dancing near the edge. It works because
# Pacific and Eastern are far enough apart that their "problem
# hours" don't overlap.
self.convert_between_tz_and_utc(Eastern, Pacific)
self.convert_between_tz_and_utc(Pacific, Eastern)
# OTOH, these fail! Don't enable them. The difficulty is that
# the edge case tests assume that every hour is representable in
# the "utc" class. This is always true for a fixed-offset tzinfo
# class (lke utc_real and utc_fake), but not for Eastern or Central.
# For these adjacent DST-aware time zones, the range of time offsets
# tested ends up creating hours in the one that aren't representable
# in the other. For the same reason, we would see failures in the
# Eastern vs Pacific tests too if we added 3*HOUR to the list of
# offset deltas in convert_between_tz_and_utc().
#
# self.convert_between_tz_and_utc(Eastern, Central) # can't work
# self.convert_between_tz_and_utc(Central, Eastern) # can't work
def test_tricky(self):
# 22:00 on day before daylight starts.
fourback = self.dston - timedelta(hours=4)
ninewest = FixedOffset(-9*60, "-0900", 0)
fourback = fourback.replace(tzinfo=ninewest)
# 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after
# 2", we should get the 3 spelling.
# If we plug 22:00 the day before into Eastern, it "looks like std
# time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4
# to 22:00 lands on 2:00, which makes no sense in local time (the
# local clock jumps from 1 to 3). The point here is to make sure we
# get the 3 spelling.
expected = self.dston.replace(hour=3)
got = fourback.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that
# case we want the 1:00 spelling.
sixutc = self.dston.replace(hour=6, tzinfo=utc_real)
# Now 6:00 "looks like daylight", so the offset wrt Eastern is -4,
# and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST
# spelling.
expected = self.dston.replace(hour=1)
got = sixutc.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Now on the day DST ends, we want "repeat an hour" behavior.
# UTC 4:MM 5:MM 6:MM 7:MM checking these
# EST 23:MM 0:MM 1:MM 2:MM
# EDT 0:MM 1:MM 2:MM 3:MM
# wall 0:MM 1:MM 1:MM 2:MM against these
for utc in utc_real, utc_fake:
for tz in Eastern, Pacific:
first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM
# Convert that to UTC.
first_std_hour -= tz.utcoffset(None)
# Adjust for possibly fake UTC.
asutc = first_std_hour + utc.utcoffset(None)
# First UTC hour to convert; this is 4:00 when utc=utc_real &
# tz=Eastern.
asutcbase = asutc.replace(tzinfo=utc)
for tzhour in (0, 1, 1, 2):
expectedbase = self.dstoff.replace(hour=tzhour)
for minute in 0, 30, 59:
expected = expectedbase.replace(minute=minute)
asutc = asutcbase.replace(minute=minute)
astz = asutc.astimezone(tz)
self.assertEqual(astz.replace(tzinfo=None), expected)
asutcbase += HOUR
def test_bogus_dst(self):
class ok(tzinfo):
def utcoffset(self, dt): return HOUR
def dst(self, dt): return HOUR
now = self.theclass.now().replace(tzinfo=utc_real)
# Doesn't blow up.
now.astimezone(ok())
# Does blow up.
class notok(ok):
def dst(self, dt): return None
self.assertRaises(ValueError, now.astimezone, notok())
def test_fromutc(self):
self.assertRaises(TypeError, Eastern.fromutc) # not enough args
now = datetime.utcnow().replace(tzinfo=utc_real)
self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo
now = now.replace(tzinfo=Eastern) # insert correct tzinfo
enow = Eastern.fromutc(now) # doesn't blow up
self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member
self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args
self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type
# Always converts UTC to standard time.
class FauxUSTimeZone(USTimeZone):
def fromutc(self, dt):
return dt + self.stdoffset
FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT")
# UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM
# EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM
# EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM
# Check around DST start.
start = self.dston.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 23, 0, 1, 3, 4, 5:
expected = start.replace(hour=wall)
if wall == 23:
expected -= timedelta(days=1)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
# Check around DST end.
start = self.dstoff.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 0, 1, 1, 2, 3, 4:
expected = start.replace(hour=wall)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
#############################################################################
# oddballs
class Oddballs(unittest.TestCase):
def test_bug_1028306(self):
# Trying to compare a date to a datetime should act like a mixed-
# type comparison, despite that datetime is a subclass of date.
as_date = date.today()
as_datetime = datetime.combine(as_date, time())
self.assertTrue(as_date != as_datetime)
self.assertTrue(as_datetime != as_date)
self.assertFalse(as_date == as_datetime)
self.assertFalse(as_datetime == as_date)
self.assertRaises(TypeError, lambda: as_date < as_datetime)
self.assertRaises(TypeError, lambda: as_datetime < as_date)
self.assertRaises(TypeError, lambda: as_date <= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime <= as_date)
self.assertRaises(TypeError, lambda: as_date > as_datetime)
self.assertRaises(TypeError, lambda: as_datetime > as_date)
self.assertRaises(TypeError, lambda: as_date >= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime >= as_date)
# Neverthelss, comparison should work with the base-class (date)
# projection if use of a date method is forced.
self.assertTrue(as_date.__eq__(as_datetime))
different_day = (as_date.day + 1) % 20 + 1
self.assertFalse(as_date.__eq__(as_datetime.replace(day=different_day)))
# And date should compare with other subclasses of date. If a
# subclass wants to stop this, it's up to the subclass to do so.
date_sc = SubclassDate(as_date.year, as_date.month, as_date.day)
self.assertEqual(as_date, date_sc)
self.assertEqual(date_sc, as_date)
# Ditto for datetimes.
datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month,
as_date.day, 0, 0, 0)
self.assertEqual(as_datetime, datetime_sc)
self.assertEqual(datetime_sc, as_datetime)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| 39.974777 | 91 | 0.572653 |
7dd3878dadf27938dbde011e287e66a4ae78bc22 | 1,114 | py | Python | appleusergroup/meeting/tests.py | jenslauterbach/appleusergroup | c13b80e66c6054a243acebab40fb9c49ed6708db | [
"MIT"
] | null | null | null | appleusergroup/meeting/tests.py | jenslauterbach/appleusergroup | c13b80e66c6054a243acebab40fb9c49ed6708db | [
"MIT"
] | null | null | null | appleusergroup/meeting/tests.py | jenslauterbach/appleusergroup | c13b80e66c6054a243acebab40fb9c49ed6708db | [
"MIT"
] | null | null | null | from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from .models import Meeting
class MeetingTestCase(TestCase):
def test_upcoming_meeting(self):
"""
Tests if the correct meeting is found if there are multiple meetings. The meetings are in the past as well
in the future.
"""
now = timezone.now()
# Create meeting 10 days in the past:
Meeting.objects.create(start=now - timedelta(days=10))
# Create meeting 10 and 20 days in the future:
expected = Meeting.objects.create(start=now + timedelta(days=10))
Meeting.objects.create(start=now + timedelta(days=20))
actual = Meeting.meetings.upcoming_meeting()
self.assertIsNotNone(actual, "The found upcoming meeting should not be 'None'!")
self.assertEqual(expected, actual, "The found upcoming meeting is wrong!")
def test_upcoming_meeting_with_no_upcoming_meetings(self):
actual = Meeting.meetings.upcoming_meeting()
self.assertIsNone(actual, "The upcoming meeting should be 'None'!")
| 34.8125 | 114 | 0.696589 |
2c750b0c503819e0357b52527e82c2ac04d15e83 | 6,161 | py | Python | Test/spec/simd/meta/simd_i16x8_arith.py | WebAssembly/wasm-jit-prototype | 65ca25f8e6578ffc3bcf09c10c80af4f1ba443b2 | [
"BSD-3-Clause"
] | 327 | 2015-10-22T16:48:07.000Z | 2021-12-31T14:47:02.000Z | Test/spec/simd/meta/simd_i16x8_arith.py | WebAssembly/wasm-jit-prototype | 65ca25f8e6578ffc3bcf09c10c80af4f1ba443b2 | [
"BSD-3-Clause"
] | 10 | 2015-12-22T12:11:25.000Z | 2019-10-14T22:42:21.000Z | Test/spec/simd/meta/simd_i16x8_arith.py | WebAssembly/wasm-jit-prototype | 65ca25f8e6578ffc3bcf09c10c80af4f1ba443b2 | [
"BSD-3-Clause"
] | 30 | 2015-10-22T16:48:12.000Z | 2022-01-18T16:37:10.000Z | #!/usr/bin/env python3
"""
Generate i16x8 integer arithmetic operation cases.
"""
from simd_arithmetic import SimdArithmeticCase
class SimdI16x8ArithmeticCase(SimdArithmeticCase):
LANE_LEN = 8
LANE_TYPE = 'i16x8'
@property
def hex_binary_op_test_data(self):
return [
('0x3fff', '0x4000'),
('0x4000', '0x4000'),
('-0x3fff', '-0x4000'),
('-0x4000', '-0x4000'),
('-0x4000', '-0x4001'),
('0x7fff', '0x7fff'),
('0x7fff', '0x01'),
('0x8000', '-0x01'),
('0x7fff', '0x8000'),
('0x8000', '0x8000'),
('0xffff', '0x01'),
('0xffff', '0xffff')
]
@property
def hex_unary_op_test_data(self):
return ['0x01', '-0x01', '-0x8000', '-0x7fff', '0x7fff', '0x8000', '0xffff']
@property
def i16x8_i8x16_test_data(self):
return {
'i16x8.add': [
[['0x7fff', ['0', '0x80'] * 8], '-1', ['i16x8', 'i8x16', 'i16x8']],
[['1', '255'], '0', ['i16x8', 'i8x16', 'i16x8']]
],
'i16x8.sub': [
[['0x7fff', ['0', '0x80'] * 8], '-1', ['i16x8', 'i8x16', 'i16x8']],
[['1', '255'], '0x02', ['i16x8', 'i8x16', 'i16x8']]
],
'i16x8.mul': [
[['0x1000', '0x10'], '0', ['i16x8', 'i8x16', 'i16x8']],
[['65535', '255'], '0x01', ['i16x8', 'i8x16', 'i16x8']]
]
}
@property
def i16x8_i32x4_test_data(self):
return {
'i16x8.add': [
[['0x7fff', '0x80008000'], '-1', ['i16x8', 'i32x4', 'i16x8']],
[['1', '0xffffffff'], '0', ['i16x8', 'i32x4', 'i16x8']]
],
'i16x8.sub': [
[['0x7fff', '0x80008000'], '-1', ['i16x8', 'i32x4', 'i16x8']],
[['1', '0xffffffff'], '0x02', ['i16x8', 'i32x4', 'i16x8']]
],
'i16x8.mul': [
[['0x8000', '0x00020002'], '0', ['i16x8', 'i32x4', 'i16x8']],
[['65535', '0xffffffff'], '0x01', ['i16x8', 'i32x4', 'i16x8']]
]
}
@property
def i16x8_f32x4_test_data(self):
return {
'i16x8.add': [
[['0x8000', '+0.0'], '0x8000', ['i16x8', 'f32x4', 'i16x8']],
[['0x8000', '-0.0'], ['0x8000', '0'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['0x8000', '1.0'], ['0x8000', '0xbf80'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['0x8000', '-1.0'], ['0x8000', '0x3f80'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['1', '+inf'], ['0x01', '0x7f81'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['1', '-inf'], ['0x01', '0xff81'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['1', 'nan'], ['0x01', '0x7fc1'] * 4, ['i16x8', 'f32x4', 'i16x8']]
],
'i16x8.sub': [
[['0x8000', '+0.0'], '0x8000', ['i16x8', 'f32x4', 'i16x8']],
[['0x8000', '-0.0'], ['0x8000', '0'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['0x8000', '1.0'], ['0x8000', '0x4080'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['0x8000', '-1.0'], ['0x8000', '0xc080'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['1', '+inf'], ['0x01', '0x8081'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['1', '-inf'], ['0x01', '0x81'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['1', 'nan'], ['0x01', '0x8041'] * 4, ['i16x8', 'f32x4', 'i16x8']]
],
'i16x8.mul': [
[['0x8000', '+0.0'], '0', ['i16x8', 'f32x4', 'i16x8']],
[['0x8000', '-0.0'], '0', ['i16x8', 'f32x4', 'i16x8']],
[['0x8000', '1.0'], '0', ['i16x8', 'f32x4', 'i16x8']],
[['0x8000', '-1.0'], '0', ['i16x8', 'f32x4', 'i16x8']],
[['1', '+inf'], ['0', '0x7f80'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['1', '-inf'], ['0', '0xff80'] * 4, ['i16x8', 'f32x4', 'i16x8']],
[['1', 'nan'], ['0', '0x7fc0'] * 4, ['i16x8', 'f32x4', 'i16x8']]
]
}
@property
def combine_dec_hex_test_data(self):
return {
'i16x8.add': [
[[['0', '1', '2', '3', '4', '5', '6', '7'],
['0', '0xffff', '0xfffe', '0xfffd', '0xfffc', '0xfffb', '0xfffa', '0xfff9']],
['0'] * 8, ['i16x8'] * 3]
],
'i16x8.sub': [
[[['0', '1', '2', '3', '4', '5', '6', '7'],
['0', '0xffff', '0xfffe', '0xfffd', '0xfffc', '0xfffb', '0xfffa', '0xfff9']],
['0', '0x02', '0x04', '0x06', '0x08', '0x0a', '0x0c', '0x0e'], ['i16x8'] * 3]
],
'i16x8.mul': [
[[['0', '1', '2', '3', '4', '5', '6', '7'],
['0', '0xffff', '0xfffe', '0xfffd', '0xfffc', '0xfffb', '0xfffa', '0xfff9']],
['0', '0xffff', '0xfffc', '0xfff7', '0xfff0', '0xffe7', '0xffdc', '0xffcf'],
['i16x8'] * 3]
]
}
@property
def range_test_data(self):
return {
'i16x8.add': [
[[[str(i) for i in range(8)], [str(i * 2) for i in range(8)]],
[str(i * 3) for i in range(8)], ['i16x8'] * 3]
],
'i16x8.sub': [
[[[str(i) for i in range(8)], [str(i * 2) for i in range(8)]],
[str(-i) for i in range(8)], ['i16x8'] * 3]
],
'i16x8.mul': [
[[[str(i) for i in range(8)], [str(i * 2) for i in range(8)]],
['0', '0x02', '0x08', '0x12', '0x20', '0x32', '0x48', '0x62'],
['i16x8'] * 3]
]
}
@property
def full_bin_test_data(self):
return [
self.i16x8_i8x16_test_data,
self.i16x8_i32x4_test_data,
self.i16x8_f32x4_test_data,
self.combine_dec_hex_test_data,
self.range_test_data
]
def gen_test_cases():
simd_i16x8_arith = SimdI16x8ArithmeticCase()
simd_i16x8_arith.gen_test_cases()
if __name__ == '__main__':
gen_test_cases() | 38.993671 | 95 | 0.393605 |
b04e65b82446f695ae23e7f95974f7a9ebfca6d3 | 2,549 | py | Python | tests/scripts/test_partition_data.py | microsoft/lightgbm-benchmark | 286668d698d9d166857f924ecb775d5de224d489 | [
"MIT"
] | 13 | 2021-08-20T01:03:51.000Z | 2022-02-12T05:34:46.000Z | tests/scripts/test_partition_data.py | microsoft/lightgbm-benchmark | 286668d698d9d166857f924ecb775d5de224d489 | [
"MIT"
] | 199 | 2021-08-21T21:18:53.000Z | 2022-03-27T23:08:44.000Z | tests/scripts/test_partition_data.py | microsoft/lightgbm-benchmark | 286668d698d9d166857f924ecb775d5de224d489 | [
"MIT"
] | 4 | 2021-08-20T06:53:26.000Z | 2022-01-24T22:22:39.000Z | """
test src/scripts/partition_data/partition.py
"""
import os
import sys
import tempfile
import pytest
from unittest.mock import patch
from scripts.data_processing.partition_data import partition
# IMPORTANT: see conftest.py for fixtures
def verify_partitioned_files(partitioned_data_dir, expected_file_count, expected_file_length):
"""Utility for testing outputs"""
assert os.path.isdir(partitioned_data_dir)
for expected_file in [os.path.join(partitioned_data_dir, "part_{:06d}".format(i)) for i in range(expected_file_count)]:
assert os.path.isfile(
expected_file
), "Script partition.py should generate partitioned data file {expected_file} in --output, but no output files were found"
# open file in read mode
with open(expected_file, 'r') as i_file:
for count, line in enumerate(i_file):
pass
assert (count+1) == expected_file_length # expected size of each chunk
def test_partition_data_roundrobin(temporary_dir, regression_train_sample):
"""Tests src/scripts/data_processing/partition_data/partition.py"""
partitioned_data_dir = os.path.join(temporary_dir, "partitioned_data")
# create test arguments for the script
script_args = [
"partition.py",
"--input", regression_train_sample,
"--output", partitioned_data_dir,
"--mode", "roundrobin",
# regression_train_sample has 100 sample, splitting in 5 x 20
"--number", "5",
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
partition.main()
verify_partitioned_files(
partitioned_data_dir,
expected_file_count=5,
expected_file_length=20
)
def test_partition_data_chunk(temporary_dir, regression_train_sample):
"""Tests src/scripts/data_processing/partition_data/partition.py"""
partitioned_data_dir = os.path.join(temporary_dir, "partitioned_data")
# create test arguments for the script
script_args = [
"partition.py",
"--input", regression_train_sample,
"--output", partitioned_data_dir,
"--mode", "chunk",
# regression_train_sample has 100 sample, splitting in 20 x 5
"--number", "5",
]
# replaces sys.argv with test arguments and run main
with patch.object(sys, "argv", script_args):
partition.main()
verify_partitioned_files(
partitioned_data_dir,
expected_file_count=20,
expected_file_length=5
)
| 31.469136 | 130 | 0.690467 |
3299f8668d3ec246d06cc4c13ed77ee418f915b5 | 3,058 | py | Python | FeedBackSystem/NewsReader.py | xzhuah/Domain-news-enhancement-system | 0ece35bb26b811c8b00895044da2d5b076a2c553 | [
"Apache-2.0"
] | 1 | 2020-11-28T04:33:27.000Z | 2020-11-28T04:33:27.000Z | FeedBackSystem/NewsReader.py | xzhuah/Domain-news-enhancement-system | 0ece35bb26b811c8b00895044da2d5b076a2c553 | [
"Apache-2.0"
] | null | null | null | FeedBackSystem/NewsReader.py | xzhuah/Domain-news-enhancement-system | 0ece35bb26b811c8b00895044da2d5b076a2c553 | [
"Apache-2.0"
] | null | null | null | #0~14
#14~19
import json
from flask import render_template
from flask import Flask,redirect
from flask import request
import feedback
import NewsRanker
app = Flask(__name__)
result_file_path = "../sortedResult.txt"
def loadNews(news_path):
high_score_news_ = []
mid_score_news_ = []
low_score_news_ = []
f=open(news_path)
content = f.read().split("\n")
for c in content:
if c!="":
obj=json.loads(c)
if(obj["score"]>19):
high_score_news_.append(obj)
elif(obj["score"]>14):
mid_score_news_.append(obj)
else:
low_score_news_.append(obj)
return high_score_news_,mid_score_news_ ,low_score_news_
high_score_news,mid_score_news ,low_score_news = loadNews(result_file_path)
local_updater = {}
@app.route('/')
def redirectTo():
global local_updater, high_score_news, mid_score_news, low_score_news
return redirect("./highrank", code=302)
@app.route('/highrank')
def highrank():
global local_updater, high_score_news, mid_score_news, low_score_news
#return '<h1>'+str(high_score_news)+'</h1>'
return render_template('./basicHtml.html', title='High Rank News', all_news=high_score_news,keywords=local_updater)
@app.route('/midrank')
def midrank():
global local_updater, high_score_news, mid_score_news, low_score_news
#return '<h1>'+str(high_score_news)+'</h1>'
return render_template('./basicHtml.html', title='Mid Rank News', all_news=mid_score_news,keywords=local_updater)
@app.route('/lowrank')
def lowrank():
global local_updater, high_score_news, mid_score_news, low_score_news
#return '<h1>'+str(high_score_news)+'</h1>'
return render_template('./basicHtml.html', title='Low Rank News', all_news=low_score_news,keywords=local_updater)
@app.route('/submit')
def submitAndRerank():
global local_updater,high_score_news, mid_score_news, low_score_news
#return '<h1>'+str(high_score_news)+'</h1>'
feedback.updateBasewordScore(local_updater)
NewsRanker.scoreNews("../news_store.csv","../basewordScore.txt")
high_score_news, mid_score_news, low_score_news = loadNews(result_file_path)
local_updater = {}
return redirect("./highrank", code=302)
@app.route('/upgrade')
def upgradeNews():
global local_updater
title = request.args.get("title")
content = request.args.get("content")
#return title,content
updater = feedback.upgradeNews([{"title":title,"content":content}])
local_updater = feedback.combineUpdater(local_updater,updater)
print(str(local_updater))
return (''), 204
@app.route('/downgrade')
def downgradeNews():
global local_updater
title = request.args.get("title")
content = request.args.get("content")
#return title,content
updater = feedback.downgradeNews([{"title":title,"content":content}])
local_updater = feedback.combineUpdater(local_updater, updater)
print(str(local_updater ))
return (''), 204
if __name__ == '__main__':
app.run(host='0.0.0.0',port=8081)
| 30.888889 | 119 | 0.702093 |
3408e92ba358828dbc252a7698c9b7450eff78f6 | 7,021 | py | Python | h5plexos/query/solution.py | NREL/h5plexos | fa14e2f21d4710e05f058cd1cc3e714d5eab51b2 | [
"BSD-3-Clause"
] | 2 | 2021-09-09T05:29:07.000Z | 2021-12-06T05:45:54.000Z | h5plexos/query/solution.py | NREL/h5plexos | fa14e2f21d4710e05f058cd1cc3e714d5eab51b2 | [
"BSD-3-Clause"
] | 2 | 2021-07-08T14:42:41.000Z | 2022-01-18T20:33:51.000Z | h5plexos/query/solution.py | NREL/h5plexos | fa14e2f21d4710e05f058cd1cc3e714d5eab51b2 | [
"BSD-3-Clause"
] | 1 | 2021-10-08T16:23:33.000Z | 2021-10-08T16:23:33.000Z | import h5py
import numpy as np
import pandas as pd
import re
# We can probably do better
def issequence(x):
return hasattr(x, '__iter__')
version_rgx = re.compile("^v(\d+)\.(\d+)\.(\d+)$")
class PLEXOSSolution:
def __init__(self, h5filepath):
self.h5file = h5py.File(h5filepath, "r")
self.versionstring = self.h5file.attrs.get("h5plexos")
if self.versionstring:
self.versionstring = self.versionstring.decode("UTF8")
v = version_rgx.match(self.versionstring)
v = v.group(1,2,3)
v = tuple(int(i) for i in v)
else:
v = (0,5,0)
if ((0,6,0) <= v and v < (0,7,0)):
print("Querying H5PLEXOS " + self.versionstring + " file")
else:
print("Querying H5PLEXOS v0.5.0 file")
self.version = v
self.objects = {}
for name, dset in self.h5file["/metadata/objects"].items():
idx = pd.MultiIndex.from_arrays(
[dset["category"].astype("U"), dset["name"].astype("U")],
names = ["category", "name"])
self.objects[name] = pd.Series(range(len(idx)), index=idx).sort_index()
self.relations = {}
for name, dset in self.h5file["/metadata/relations"].items():
idx = pd.MultiIndex.from_arrays(
[dset["parent"].astype("U"), dset["child"].astype("U")],
names = ["parent", "child"])
self.relations[name] = pd.Series(range(len(idx)), index=idx)
self.timestamps = {}
for name, dset in self.h5file["/metadata/times"].items():
self.timestamps[name] = pd.to_datetime(dset[:].astype("U"),
format="%Y-%m-%dT%H:%M:%S")
if self.version >= (0,6,2):
self.blocks = {}
for name, dset in self.h5file["/metadata/blocks"].items():
mapping = pd.Series(
data=dset["block"],
index=pd.to_datetime(dset["interval"].astype("U"),
format="%Y-%m-%dT%H:%M:%S"))
mapping.index.name = "interval"
mapping.name = "block"
self.blocks[name] = mapping
def close(self):
self.h5file.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, collection):
# TODO: Somethting smarter to determine whether querying object or relation properties
if "_" in collection:
f = self.query_relation_property
else:
f = self.query_object_property
def _partial_query(*args, **kwargs):
return f(collection, *args, **kwargs)
return _partial_query
# Query methods
def query_object_property(
self, object_class, prop,
names=slice(None), categories=slice(None),
timescale="interval", timespan=slice(None), phase="ST"):
if ((0,6,0) <= self.version and self.version < (0,7,0)):
object_class += "s"
obj_lookup = self.objects[object_class].loc[(categories, names),].sort_values()
data_path = "/data/" + "/".join([phase, timescale, object_class, prop])
if data_path not in self.h5file:
raise ValueError(data_path + " does not exist in the solution file")
dset = self.h5file[data_path]
n_bands = dset.shape[2]
n_periods = dset.shape[1]
data = dset[obj_lookup.values, :, :]
if ((0,6,0) <= self.version and self.version < (0,7,0)):
if phase != "ST" and timescale == "interval":
timestamps = range(1, n_periods+1)
timename = "block"
else:
period_offset = dset.attrs["period_offset"]
timestamps = self.timestamps[timescale][period_offset:(period_offset+n_periods)]
timename = "timestamp"
else:
timestamps = self.timestamps[timescale]
timename = "timestamp"
# Multiindex on category, name, property, time, band
idx = pd.MultiIndex.from_product(
[obj_lookup.index.get_level_values(1), # List object categories and names
[prop], # Report property (in preperation for multi-property queries)
timestamps, # List all timestamps in data range
range(1, n_bands+1)] # List all bands
)
cidx = pd.CategoricalIndex(obj_lookup.index.get_level_values(0))
cidx_codes = (cidx.codes.repeat(n_bands * len(timestamps)))
idx = pd.MultiIndex(levels=[cidx.categories] + idx.levels, codes= [cidx_codes] + idx.codes,
names=["category", "name", "property", timename, "band"])
return pd.Series(data=data.reshape(-1), index=idx).dropna().sort_index()
def query_relation_property(
self, relation, prop,
parents=slice(None), children=slice(None),
timescale="interval", timespan=slice(None), phase="ST"):
relation_lookup = self.relations[relation].loc[(parents, children),].sort_values()
data_path = "/data/" + "/".join([phase, timescale, relation, prop])
if data_path not in self.h5file:
raise ValueError(data_path + " does not exist in the solution file")
dset = self.h5file[data_path]
n_bands = dset.shape[2]
n_periods = dset.shape[1]
data = dset[relation_lookup.values, :, :]
if ((0,6,0) <= self.version and self.version < (0,7,0)):
if phase != "ST" and timescale == "interval":
timestamps = range(1, n_periods+1)
timename = "block"
else:
period_offset = dset.attrs["period_offset"]
timestamps = self.timestamps[timescale][period_offset:(period_offset+n_periods)]
timename = "timestamp"
else:
timestamps = self.timestamps[timescale]
timename = "timestamp"
# Multiindex on parent, child, property, time, band
idx = pd.MultiIndex.from_product(
[relation_lookup.index.get_level_values(1), # List object categories and names
[prop], # Report property (in preperation for multi-property queries)
timestamps, # List all timestamps (but eventually not)
range(1, n_bands+1)] # List all bands
)
cidx = pd.CategoricalIndex(relation_lookup.index.get_level_values(0))
cidx_codes = (cidx.codes.repeat(n_bands * len(timestamps)))
idx = pd.MultiIndex(levels=[cidx.categories] + idx.levels, codes= [cidx_codes] + idx.codes,
names=["parent", "child", "property", timename, "band"])
return pd.Series(data=data.reshape(-1), index=idx).dropna().sort_index()
| 37.951351 | 99 | 0.558183 |
ce2f620cb9f13a9ef613b8db664bf874f97eb0b7 | 1,234 | py | Python | torcms/script/autocrud/base_crud.py | bukun/TorCMS | 5d7480865fd46e706b84f5f65a5c24cd03bb2142 | [
"MIT"
] | 243 | 2015-02-11T03:22:19.000Z | 2022-03-02T11:13:27.000Z | torcms/script/autocrud/base_crud.py | bukun/TorCMS | 5d7480865fd46e706b84f5f65a5c24cd03bb2142 | [
"MIT"
] | 8 | 2015-09-09T10:49:52.000Z | 2020-08-30T08:52:48.000Z | torcms/script/autocrud/base_crud.py | bukun/TorCMS | 5d7480865fd46e706b84f5f65a5c24cd03bb2142 | [
"MIT"
] | 101 | 2015-02-12T02:17:16.000Z | 2021-11-19T09:20:10.000Z | # -*- coding:utf-8 -*-
'''
Basic configuration for CRUD.
'''
import os
CRUD_PATH = os.path.abspath('./templates/autogen')
META_DIR = './database/meta'
XLSX_FILE = './database/meta/info_tags.xlsx'
for wfile in os.listdir(META_DIR):
if wfile.startswith('~'):
continue
if wfile.lower().endswith('.xlsx'):
XLSX_FILE = os.path.join(META_DIR, wfile)
# The filter key stored in the colomns below.
RAW_LIST = [
'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
]
FILTER_COLUMNS = RAW_LIST + ["A" + x for x in ['A', 'B', 'C', 'D'] + RAW_LIST] + \
["B" + x for x in ['A', 'B', 'C', 'D'] + RAW_LIST] + \
["C" + x for x in ['A', 'B', 'C', 'D'] + RAW_LIST] + \
["D" + x for x in ['A', 'B', 'C', 'D'] + RAW_LIST]
def build_dir():
'''
Build the directory used for templates.
'''
tag_arr = ['add', 'edit', 'view', 'list', 'infolist']
path_arr = [os.path.join(CRUD_PATH, x) for x in tag_arr]
for wpath in path_arr:
if os.path.exists(wpath):
continue
os.makedirs(wpath)
INPUT_ARR = ['digits', 'text', 'date', 'number', 'email', 'url', 'download']
| 27.422222 | 82 | 0.511345 |
931db214d8543db250fda3cc669da3334ed02ada | 10,483 | py | Python | setup.py | thomasjpfan/pyamg | b0904d31c8da0c29affcd7d0fcd2bb8cb910b42a | [
"MIT"
] | null | null | null | setup.py | thomasjpfan/pyamg | b0904d31c8da0c29affcd7d0fcd2bb8cb910b42a | [
"MIT"
] | null | null | null | setup.py | thomasjpfan/pyamg | b0904d31c8da0c29affcd7d0fcd2bb8cb910b42a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
PyAMG: Algebraic Multigrid Solvers in Python
PyAMG is a library of Algebraic Multigrid (AMG)
solvers with a convenient Python interface.
PyAMG features implementations of:
- Ruge-Stuben (RS) or Classical AMG
- AMG based on Smoothed Aggregation (SA)
- Adaptive Smoothed Aggregation (αSA)
- Compatible Relaxation (CR)
- Krylov methods such as CG, GMRES, FGMRES, BiCGStab, MINRES, etc
PyAMG is primarily written in Python with
supporting C++ code for performance critical operations.
"""
import os
import sys
import subprocess
import setuptools
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test as TestCommand
version = '4.0.0'
isreleased = False
install_requires = (
'numpy>=1.7.0',
'scipy>=0.12.0',
'pytest>=2',
)
# set the version information
# https://github.com/numpy/numpy/commits/master/setup.py
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
out = _minimal_ext_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
GIT_BRANCH = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
GIT_BRANCH = ''
return GIT_REVISION
def set_version_info(VERSION, ISRELEASED):
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('pyamg/version.py'):
try:
import imp
version = imp.load_source("pyamg.version", "pyamg/version.py")
GIT_REVISION = version.git_revision
except ImportError:
raise ImportError('Unable to read version information.')
else:
GIT_REVISION = 'Unknown'
GIT_BRANCH = ''
FULLVERSION = VERSION
if not ISRELEASED:
FULLVERSION += '.dev0' + '+' + GIT_REVISION[:7]
print(GIT_REVISION)
print(FULLVERSION)
return FULLVERSION, GIT_REVISION
def write_version_py(VERSION,
FULLVERSION,
GIT_REVISION,
ISRELEASED,
filename='pyamg/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
fullversion, git_revision = set_version_info(version, isreleased)
write_version_py(version, fullversion, git_revision, isreleased,
filename='pyamg/version.py')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
pytest.main(self.test_args)
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc'],
'unix': [],
}
l_opts = {
'msvc': [],
'unix': [],
}
if sys.platform == 'darwin':
l_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
def build_extensions(self):
try:
self.compiler.compiler_so.remove("-Wstrict-prototypes")
except (AttributeError, ValueError):
pass
ct = self.compiler.compiler_type
c_opts = self.c_opts.get(ct, [])
l_opts = self.l_opts.get(ct, [])
if ct == 'unix':
c_opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
c_opts.append('-fvisibility=hidden')
for ext in self.extensions:
ext.extra_compile_args = c_opts
ext.extra_link_args = l_opts
ext.define_macros = [('VERSION_INFO', '"{}"'.format(self.distribution.get_version()))]
build_ext.build_extensions(self)
# identify extension modules
# since numpy is needed (for the path), need to bootstrap the setup
# http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
def finalize_options(self):
build_ext.finalize_options(self)
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
# The issue:
# https://github.com/pybind/pybind11/issues/1067
#
# pybind11 will install files to
# TMP/pybind11-version.egg/*.h
# TMP/pybind11-version.egg/detail/*.h
#
# We need this to look like
# TMP/pybind11/*.h
# TMP/pybind11/detail/*.h
# TMPDIR/pybind11-2.2.4-py3.7.egg/pybind11/__init__.py
f = pybind11.__file__
# TMPDIR/pybind11-2.2.4-py3.7.egg/pybind11/
d = os.path.dirname(f)
# TMPDIR/pybind11-2.2.4-py3.7.egg
dd = os.path.dirname(d)
# TMPDIR
tmpdir = os.path.dirname(dd)
# check if not a half-install
if not os.path.exists(os.path.join(dd, 'pybind11.h')):
return pybind11.get_include(self.user)
# if it *is* a half-install
# Then copy all files to
# TMPDIR/pybind11
if not os.path.isdir(os.path.join(tmpdir, 'pybind11')):
import shutil
shutil.copytree(dd, os.path.join(tmpdir, 'pybind11'))
return tmpdir
amg_core_headers = ['evolution_strength.h',
'graph.h',
'krylov.h',
'linalg.h',
'relaxation.h',
'ruge_stuben.h',
'smoothed_aggregation.h']
amg_core_headers = [f.replace('.h', '') for f in amg_core_headers]
ext_modules = [Extension('pyamg.amg_core.%s' % f,
sources=['pyamg/amg_core/%s_bind.cpp' % f],
include_dirs=[get_pybind_include(), get_pybind_include(user=True)],
undef_macros=['NDEBUG'],
language='c++') for f in amg_core_headers]
ext_modules += [Extension('pyamg.amg_core.tests.bind_examples',
sources=['pyamg/amg_core/tests/bind_examples_bind.cpp'],
include_dirs=[get_pybind_include(), get_pybind_include(user=True)],
language='c++')]
setup(
name='pyamg',
version=fullversion,
keywords=['algebraic multigrid AMG sparse matrix preconditioning'],
author='Nathan Bell, Luke Olson, and Jacob Schroder',
author_email='luke.olson@gmail.com',
maintainer='Luke Olson',
maintainer_email='luke.olson@gmail.com',
url='https://github.com/pyamg/pyamg',
download_url='https://github.com/pyamg/pyamg/releases',
license='MIT',
platforms=['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],
description=__doc__.split('\n')[0],
long_description=__doc__,
#
packages=find_packages(exclude=['doc']),
package_data={'pyamg': ['gallery/example_data/*.mat', 'gallery/mesh_data/*.npz']},
include_package_data=False,
install_requires=install_requires,
zip_safe=False,
#
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExt, 'test': PyTest},
setup_requires=['numpy', 'pybind11'],
#
tests_require=['pytest'],
#
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Education',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 31.575301 | 98 | 0.603739 |
5ff790839665c94b28ed5cd469abdba2bdb4370d | 3,259 | py | Python | chapter_3/moreplus.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
] | null | null | null | chapter_3/moreplus.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
] | null | null | null | chapter_3/moreplus.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
] | null | null | null | """
split and interactively page a string, file, or stream of
text to stdout; when run as a script, page stdin or file
whose name is passed on cmdline; if input is stdin, can't
use it for user reply--use platform-specific tools or GUI;
"""
import sys
def getreply():
"""
read a reply key from an interactive user
even if stdin redirected to a file or pipe
"""
if sys.stdin.isatty(): # if stdin is console
return input('?') # read reply line from stdin
else:
if sys.platform[:3] == 'win': # if stdin was redirected
import msvcrt # can't use to ask a user
msvcrt.putch(b'?')
key = msvcrt.getche() # use windows console tools
msvcrt.putch(b'\n') # getch() does not echo key
return key
else:
assert False, 'platform not supported'
#linux?: open('/dev/tty').readline()[:-1]
def more(text, numlines=10):
"""
page multiline string to stdout
"""
lines = text.splitlines() # like split('\n') but no '' at end
while lines:
chunk = lines[:numlines]
lines = lines[numlines:]
for line in chunk: print(line)
if lines and getreply() not in [b'y', b'Y']: break
if __name__ == '__main__': # when run, not when imported
if len(sys.argv) == 1: # if no command-line arguments
more(sys.stdin.read()) # page stdin, no inputs
else:
more(open(sys.argv[1]).read()) # else page filename argument
'''
Most of the new code in this version shows up in its getreply function. The file’s
isatty method tells us whether stdin is connected to the console; if it is, we simply
read replies on stdin as before.
'''
"""
run with a command-line argument, this script interactively pages
through the named file’s text:
C:\...\PP4E\System\Streams> python moreplus.py adder.py
"""
"""
But now the script also correctly pages text redirected into stdin from either a file or a
command pipe, even if that text is too long to fit in a single display chunk.
C:\...\PP4E\System\Streams> python moreplus.py < moreplus.py
"""
"""
Finally, piping one Python script’s output into this script’s input now works as expected,
......\System\Streams> python teststreams.py < input.txt | python moreplus.py
Here, the standard output of one Python script is fed to the standard input of another
Python script located in the same directory: moreplus.py reads the output of
teststreams.py.
"""
'''
All of the redirections in such command lines work only because scripts don’t care what
standard input and output really are—interactive users, files, or pipes between programs.
We have just run this single more pager script in four
different ways: by importing and calling its function, by passing a filename commandline
argument, by redirecting stdin to a file, and by piping a command’s output to
stdin.
'''
| 36.211111 | 99 | 0.601105 |
0c015141fa322465b1476e035f87da223555b211 | 1,544 | py | Python | youtube_dl/extractor/goshgay.py | rajkotraja/YTDownloader | 01a0c511ebfa56699c1f58164c679b24f7972681 | [
"Unlicense"
] | 10 | 2020-05-29T03:20:03.000Z | 2022-03-29T01:05:20.000Z | youtube_dl/extractor/goshgay.py | huyangfeng/youtobedl | 7b0d1c28597bd38567e5b4e853f669a5a601c6e8 | [
"Unlicense"
] | 1 | 2016-05-18T01:27:28.000Z | 2016-05-18T05:00:36.000Z | PythonSamples/library/files/lib/python2.7/site-packages/youtube_dl/extractor/goshgay.py | jianglei12138/python2.7 | 280aa96d8cac98c03ca8c8ed71541f7ff7817055 | [
"PSF-2.0"
] | 9 | 2020-05-29T03:21:02.000Z | 2021-04-14T03:26:05.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
)
from ..utils import (
parse_duration,
)
class GoshgayIE(InfoExtractor):
_VALID_URL = r'https?://www\.goshgay\.com/video(?P<id>\d+?)($|/)'
_TEST = {
'url': 'http://www.goshgay.com/video299069/diesel_sfw_xxx_video',
'md5': '4b6db9a0a333142eb9f15913142b0ed1',
'info_dict': {
'id': '299069',
'ext': 'flv',
'title': 'DIESEL SFW XXX Video',
'thumbnail': 're:^http://.*\.jpg$',
'duration': 80,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h2>(.*?)<', webpage, 'title')
duration = parse_duration(self._html_search_regex(
r'<span class="duration">\s*-?\s*(.*?)</span>',
webpage, 'duration', fatal=False))
flashvars = compat_parse_qs(self._html_search_regex(
r'<embed.+?id="flash-player-embed".+?flashvars="([^"]+)"',
webpage, 'flashvars'))
thumbnail = flashvars.get('url_bigthumb', [None])[0]
video_url = flashvars['flv_url'][0]
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': 18,
}
| 29.692308 | 73 | 0.542746 |
820d60bb930a5327289229d0f68314ccd3ee0736 | 1,737 | py | Python | clubsuite/suite/views/view_activity.py | lazyCodes7/Clubspot | 39ddee5adcd6e40f7552fd9048282225ff058abd | [
"MIT"
] | 5 | 2021-09-19T15:09:51.000Z | 2021-09-20T06:43:27.000Z | clubsuite/suite/views/view_activity.py | lazyCodes7/Clubspot | 39ddee5adcd6e40f7552fd9048282225ff058abd | [
"MIT"
] | null | null | null | clubsuite/suite/views/view_activity.py | lazyCodes7/Clubspot | 39ddee5adcd6e40f7552fd9048282225ff058abd | [
"MIT"
] | 1 | 2021-09-20T13:27:13.000Z | 2021-09-20T13:27:13.000Z | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import View
from django.urls import reverse
from django.contrib import messages
from suite.models import Club
from suite.forms import DivisionCreateForm, BudgetCreateForm
from guardian.shortcuts import get_perms
from django.core.exceptions import PermissionDenied
#importing the libraries we will need
import urllib
from fake_useragent import UserAgent
import requests
import re
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
class Activity(View, LoginRequiredMixin):
def get(self, request, club_id, *args, **kwargs):
template_name = 'suite/club_activity.html'
club:Club = get_object_or_404(Club, pk=club_id)
keyword= club.club_name + ' activities'
html_keyword= urllib.parse.quote_plus(keyword)
print(html_keyword)
number_of_result=10
google_url = "https://www.google.com/search?q=" + html_keyword + "&num=" + str(number_of_result)
print(google_url)
ua = UserAgent()
response = requests.get(google_url, {"User-Agent": ua.random})
soup = BeautifulSoup(response.text, "html.parser")
result = soup.find_all('div', attrs = {'class': 'ZINbbc'})
results=[re.search('\/url\?q\=(.*)\&sa',str(i.find('a', href = True)['href'])) for i in result if "url" in str(i)]
#this is because in rare cases we can't get the urls
links=[i.group(1) for i in results if i != None]
args = {'links':links, 'club':club}
return render(request, 'club_activity.html',args)
| 34.74 | 122 | 0.716177 |
6563e82db9f2903fcedce04fb58bcfb9f2054b10 | 377 | py | Python | return_practice.py | Athenian-ComputerScience-Fall2020/functions-practice-Jackypop101 | f2b74bdbda0dc9fbc3ab94b18746c8c4443f49a0 | [
"Apache-2.0"
] | null | null | null | return_practice.py | Athenian-ComputerScience-Fall2020/functions-practice-Jackypop101 | f2b74bdbda0dc9fbc3ab94b18746c8c4443f49a0 | [
"Apache-2.0"
] | null | null | null | return_practice.py | Athenian-ComputerScience-Fall2020/functions-practice-Jackypop101 | f2b74bdbda0dc9fbc3ab94b18746c8c4443f49a0 | [
"Apache-2.0"
] | null | null | null | # Add comments to explain what the output from this program will be and how you know.
def math1():
num1 = 50
num2 = 5
return num1 + num2
#50+5=55
def math2():
num1 = 50
num2 = 5
return num1 - num2
#50-5=45
def math3():
num1 = 50
num2 = 5
return num1 * num2
#50*5=150
output_num = math2()
print(output_num)
'''
Add prediction(s) here:
45
'''
| 11.78125 | 85 | 0.615385 |
af66b7ecab355f14f12c8293a035f0f48be8962b | 525 | py | Python | spacy/lang/it/examples.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 12 | 2019-03-20T20:43:47.000Z | 2020-04-13T11:10:52.000Z | spacy/lang/it/examples.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 13 | 2018-06-05T11:54:40.000Z | 2019-07-02T11:33:14.000Z | spacy/lang/it/examples.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 2 | 2021-12-09T07:23:21.000Z | 2022-03-31T06:13:10.000Z | # coding: utf8
from __future__ import unicode_literals
"""
Example sentences to test spaCy and its language models.
>>> from spacy.lang.it.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"Apple vuole comprare una startup del Regno Unito per un miliardo di dollari",
"Le automobili a guida autonoma spostano la responsabilità assicurativa verso i produttori",
"San Francisco prevede di bandire i robot di consegna porta a porta",
"Londra è una grande città del Regno Unito.",
]
| 27.631579 | 96 | 0.742857 |
f21a740554226f5a4015b5fbabe6725fe37ff161 | 904 | py | Python | test/tts_demo/setup.py | marklogg/mini_demo | 71c71826e6819372e5e759d93eb5b4f338964c1e | [
"MIT"
] | 7 | 2020-07-02T08:03:17.000Z | 2022-03-11T12:52:00.000Z | test/tts_demo/setup.py | marklogg/mini_demo | 71c71826e6819372e5e759d93eb5b4f338964c1e | [
"MIT"
] | 2 | 2021-05-11T10:06:13.000Z | 2021-11-22T21:34:35.000Z | test/tts_demo/setup.py | marklogg/mini_demo | 71c71826e6819372e5e759d93eb5b4f338964c1e | [
"MIT"
] | 11 | 2020-07-02T06:42:03.000Z | 2021-05-31T02:10:39.000Z | #!/usr/bin/env python3
# coding=utf-8
import setuptools
setuptools.setup(
name="tts_demo",
version="0.0.2",
author='Gino Deng',
author_email='jingjing.deng@ubtrobot.com',
description="demo with mini_sdk",
long_description='demo with mini_sdk,xxxxxxx',
long_description_content_type="text/markdown",
license="GPLv3",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'alphamini > 0.1.3',
],
entry_points={
'console_scripts': [
'tts_demo = play_tts.test_playTTS:main'
],
},
zip_safe=False
)
| 26.588235 | 51 | 0.610619 |
3861276a493679b37368c6d9b56591278223452d | 4,228 | py | Python | .circleci/deselect_tests.py | a-vasenin/scikit-learn-intelex | b81f81098a7f9302c6a052a5d22ecd372682844d | [
"Apache-2.0"
] | null | null | null | .circleci/deselect_tests.py | a-vasenin/scikit-learn-intelex | b81f81098a7f9302c6a052a5d22ecd372682844d | [
"Apache-2.0"
] | null | null | null | .circleci/deselect_tests.py | a-vasenin/scikit-learn-intelex | b81f81098a7f9302c6a052a5d22ecd372682844d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#===============================================================================
# Copyright 2020-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# coding: utf-8
import argparse
import os.path
from yaml import FullLoader, load as yaml_load
from distutils.version import LooseVersion
import sklearn
from sklearn import __version__ as sklearn_version
import warnings
def evaluate_cond(cond, v):
if cond.startswith(">="):
return LooseVersion(v) >= LooseVersion(cond[2:])
if cond.startswith("<="):
return LooseVersion(v) <= LooseVersion(cond[2:])
if cond.startswith("=="):
return LooseVersion(v) == LooseVersion(cond[2:])
if cond.startswith("!="):
return LooseVersion(v) != LooseVersion(cond[2:])
if cond.startswith("<"):
return LooseVersion(v) < LooseVersion(cond[1:])
if cond.startswith(">"):
return LooseVersion(v) > LooseVersion(cond[1:])
warnings.warn(
'Test selection condition "{0}" should start with '
'>=, <=, ==, !=, < or > to compare to version of scikit-learn run. '
'The test will not be deselected'.format(cond))
return False
def filter_by_version(entry, sk_ver):
if not entry:
return None
t = entry.split(' ')
if len(t) == 1:
return entry
if len(t) != 2:
return None
test_name, cond = t
conds = cond.split(',')
if all([evaluate_cond(cond, sk_ver) for cond in conds]):
return test_name
return None
def create_pytest_switches(filename, absolute, reduced, public, gpu):
pytest_switches = []
if os.path.exists(filename):
with open(filename, 'r') as fh:
dt = yaml_load(fh, Loader=FullLoader)
if absolute:
base_dir = os.path.relpath(
os.path.dirname(sklearn.__file__),
os.path.expanduser('~')) + '/'
else:
base_dir = ""
filtered_deselection = [
filter_by_version(test_name, sklearn_version)
for test_name in dt.get('deselected_tests', [])]
if reduced:
filtered_deselection.extend(
[filter_by_version(test_name, sklearn_version)
for test_name in dt.get('reduced_tests', [])])
if public:
filtered_deselection.extend(
[filter_by_version(test_name, sklearn_version)
for test_name in dt.get('public', [])])
if gpu:
filtered_deselection.extend(
[filter_by_version(test_name, sklearn_version)
for test_name in dt.get('gpu', [])])
pytest_switches = []
for test_name in filtered_deselection:
if test_name:
pytest_switches.extend(["--deselect", base_dir + test_name])
return pytest_switches
if __name__ == '__main__':
argParser = argparse.ArgumentParser(
prog="deselect_tests.py",
description="Produce pytest CLI options to deselect tests specified in yaml file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
argParser.add_argument('conf_file', nargs=1, type=str)
argParser.add_argument('--absolute', action='store_true')
argParser.add_argument('--reduced', action='store_true')
argParser.add_argument('--public', action='store_true')
argParser.add_argument('--gpu', action='store_true')
args = argParser.parse_args()
fn = args.conf_file[0]
if os.path.exists(fn):
print(" ".join(create_pytest_switches(fn, args.absolute, args.reduced,
args.public, args.gpu)))
| 36.765217 | 90 | 0.613765 |
ed3a183446962ac18acd72bd23b851e3d75bf0ba | 10,611 | py | Python | exp/exp016_RoBERTa_large_ITPTv2.py | Hiroki29/kaggle2021-CommonLit-Readability-Prize | c588174c378f93890d6a16436050ec21f76cf919 | [
"MIT"
] | null | null | null | exp/exp016_RoBERTa_large_ITPTv2.py | Hiroki29/kaggle2021-CommonLit-Readability-Prize | c588174c378f93890d6a16436050ec21f76cf919 | [
"MIT"
] | null | null | null | exp/exp016_RoBERTa_large_ITPTv2.py | Hiroki29/kaggle2021-CommonLit-Readability-Prize | c588174c378f93890d6a16436050ec21f76cf919 | [
"MIT"
] | null | null | null | import logging
import math
import os
import datasets
import pandas as pd
import torch
import transformers
from accelerate import Accelerator
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import (AdamW, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, CONFIG_MAPPING,
DataCollatorForLanguageModeling, MODEL_MAPPING, get_scheduler, set_seed)
class TrainConfig:
train_file = '../input/mlm_data.csv'
validation_file = '../input/mlm_data.csv'
model_name_or_path = 'roberta-large'
config_name = 'roberta-large'
tokenizer_name = 'roberta-large'
use_slow_tokenizer = True
per_device_train_batch_size = 8
per_device_eval_batch_size = 8
learning_rate = 5e-5
weight_decay = 0.0
num_train_epochs = 20 # change to 5
max_train_steps = None
gradient_accumulation_steps = 1
lr_scheduler_type = 'constant_with_warmup'
num_warmup_steps = 0
output_dir = '../out/exp016_RoBERTa_large_ITPTv2/roberta-large-' + str(num_train_epochs) + '-epochs/'
seed = 42
model_type = 'roberta'
max_seq_length = 320
preprocessing_num_workers = 4
overwrite_cache = True
mlm_probability = 0.15
def main():
logger = logging.getLogger(__name__)
args = TrainConfig()
accelerator = Accelerator()
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json",
"txt"], "`train_file` should be a csv, json or txt file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json",
"txt"], "`validation_file` should be a csv, json or txt file."
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process: # よくわからん
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if args.seed is not None:
set_seed(args.seed)
data_files = {}
if args.train_file is not None: # 'mlm_data.csv'
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1] # csv
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files)
if args.config_name: # 'roberta-base'
config = AutoConfig.from_pretrained(args.config_name)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name: # 'roberta-base'
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name,
use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path,
use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path: # 'roberta-base'
model = AutoModelForMaskedLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
column_names = raw_datasets["train"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if args.max_seq_length is None: # max_seq_length = None
max_seq_length = tokenizer.model_max_length
# print(max_seq_length)
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(args.max_seq_length, tokenizer.model_max_length)
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
)
def group_texts(examples):
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
total_length = (total_length // max_seq_length) * max_seq_length
result = {
k: [t[i: i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=args.preprocessing_num_workers,
load_from_cache_file=not args.overwrite_cache,
)
train_dataset = tokenized_datasets["train"]
eval_dataset = tokenized_datasets["validation"]
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer,
mlm_probability=args.mlm_probability)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator,
batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator,
batch_size=args.per_device_eval_batch_size)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if
not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# gradient_accumulation_steps = 1
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(
f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps # 1だからlossのまま
accelerator.backward(loss) # 逆伝播して(.grad)勾配計算
# 毎回やる
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step() # optimizerを計算して次の重みにする
lr_scheduler.step()
optimizer.zero_grad() # 勾配を0にする
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))
losses = torch.cat(losses)
losses = losses[: len(eval_dataset)]
perplexity = math.exp(torch.mean(losses))
logger.info(f"epoch {epoch}: perplexity: {perplexity}")
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) #保存
if __name__ == "__main__":
main() | 40.655172 | 118 | 0.664782 |
e333c41d024a3ccbaa54c08a6664915dc6c442f4 | 5,970 | py | Python | goap/Automaton.py | leopepe/GOApy | 6cc3c355e5b286a6aef01b83a0c2373a58d01278 | [
"BSD-2-Clause"
] | 37 | 2016-09-17T16:25:27.000Z | 2022-01-28T17:06:42.000Z | goap/Automaton.py | leopepe/GOApy | 6cc3c355e5b286a6aef01b83a0c2373a58d01278 | [
"BSD-2-Clause"
] | 7 | 2017-02-22T03:24:38.000Z | 2021-10-02T07:35:47.000Z | goap/Automaton.py | leopepe/GOApy | 6cc3c355e5b286a6aef01b83a0c2373a58d01278 | [
"BSD-2-Clause"
] | 11 | 2017-02-03T11:31:20.000Z | 2021-08-02T19:33:29.000Z | from datetime import datetime
from automat import MethodicalMachine
from goap.Sensor import Sensors
from goap.Action import Actions
from goap.Planner import Planner
from goap.WorldState import WorldState
from time import sleep
class Fact(object):
def __init__(self, sensor, data, binding):
self.binding = binding
self.data = data
self.time_stamp = datetime.now()
self.parent_sensor = sensor
def __str__(self):
return "{}: {}".format(self.binding, self.data)
def __repr__(self):
return self.__str__()
class AutomatonPriorities:
def __init__(self, items: list):
self._items = items
def __iter__(self):
return self._items
def __repr__(self):
return str(self.__dict__)
def __str__(self):
return self.__repr__()
class Automaton:
""" A 3 State Machine Automaton: observing (aka monitor or patrol), planning and acting """
machine = MethodicalMachine()
def __init__(
self,
name: str,
sensors: Sensors,
actions: Actions,
world_state_facts: dict):
# setup
self.world_state = WorldState(world_state_facts)
self.working_memory = []
self.name = name
self.sensors = sensors
self.actions = actions
self.planner = Planner(actions=actions)
#
self.action_plan = []
self.action_plan_response = None
self.sensors_responses = {}
self.actions_response = []
self.goal = {}
def __sense_environment(self):
for sensor in self.sensors:
self.working_memory.append(
Fact(
sensor=sensor.name,
data=sensor.exec(),
binding=sensor.binding
)
)
for fact in self.working_memory:
setattr(
self.world_state,
fact.binding,
fact.data.response
)
def __set_action_plan(self):
self.action_plan = self.planner.plan(self.world_state, self.goal)
return self.action_plan
def __execute_action_plan(self):
self.actions_response = [action[2]['object'].exec()
for action in self.action_plan]
return 'Action planning execution results: {}'.format(
self.action_plan_response)
@machine.state(initial=True)
def waiting_orders(self):
""" Waiting goal / orders """
@machine.state()
def sensing(self):
""" Running sensors and assimilating sensor's responses """
@machine.state()
def planning(self):
""" Generating action plan to change actual world state to achieve goal """
@machine.state()
def acting(self):
""" Executing action plan"""
@machine.input()
def wait(self):
""" Input waiting_orders state """
@machine.input()
def sense(self):
""" Input sense state """
@machine.output()
def __sense(self):
""" Execute sensors """
self.__sense_environment()
@machine.input()
def plan(self):
""" Input for planning state """
@machine.output()
def __plan(self):
""" Generate action plan """
self.__set_action_plan()
@machine.input()
def act(self):
""" Input for acting state"""
@machine.output()
def __act(self):
""" Execute action plan """
self.__execute_action_plan()
@machine.input()
def input_goal(self, goal):
""" Change / Set AI goal """
@machine.output()
def __input_goal(self, goal):
""" Actually sets goal """
self.goal = goal
@machine.output()
def __reset_working_memory(self):
self.working_memory = []
# cyclical main states
waiting_orders.upon(sense, enter=sensing, outputs=[__sense])
sensing.upon(plan, enter=planning, outputs=[__plan])
planning.upon(act, enter=acting, outputs=[__act])
acting.upon(
sense,
enter=sensing,
outputs=[
__reset_working_memory,
__sense])
# change orders
waiting_orders.upon(
input_goal,
enter=waiting_orders,
outputs=[__input_goal])
planning.upon(input_goal, enter=waiting_orders, outputs=[__input_goal])
acting.upon(input_goal, enter=waiting_orders, outputs=[__input_goal])
# reset working memory from sensing
sensing.upon(wait, enter=waiting_orders, outputs=[__reset_working_memory])
class AutomatonController(object):
def __init__(
self,
actions: Actions,
sensors: Sensors,
name: str,
world_state: dict):
self.automaton = Automaton(
actions=actions,
sensors=sensors,
name=name,
world_state_facts=world_state)
@property
def world_state(self):
return self.automaton.world_state
@world_state.setter
def world_state(self, value):
self.automaton.world_state = value
@property
def goal(self):
return self.automaton.goal
@goal.setter
def goal(self, value):
self.automaton.input_goal(value)
def start(self):
while True:
self.automaton.sense()
if self.automaton.world_state != self.goal:
print(
'World state differs from goal: \nState: {}\nGoal: {}'.format(
self.automaton.world_state, self.goal))
print('Need to find an action plan')
self.automaton.plan()
print(
'Plain found. Will execute the action plan: {}'.format(
self.automaton.action_plan))
self.automaton.act()
else:
print("World state equals to goal: {}".format(self.goal))
self.automaton.wait()
sleep(5)
| 27.511521 | 95 | 0.582245 |
704dcf0acbe14b487504a079c6e0f927895a8e88 | 956 | py | Python | samples/reporting/coreservices/delete_subscription_of_report_name_by_organization.py | shalltell/cybersource-rest-samples-python | 95b0557456d99538fca5d01c238d3600378930de | [
"MIT"
] | null | null | null | samples/reporting/coreservices/delete_subscription_of_report_name_by_organization.py | shalltell/cybersource-rest-samples-python | 95b0557456d99538fca5d01c238d3600378930de | [
"MIT"
] | null | null | null | samples/reporting/coreservices/delete_subscription_of_report_name_by_organization.py | shalltell/cybersource-rest-samples-python | 95b0557456d99538fca5d01c238d3600378930de | [
"MIT"
] | null | null | null | from CyberSource import *
import os
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
def delete_subscriptions_report(reportname):
try:
report_name = reportname
# Reading Merchant details from Configuration file
config_obj = configuration.Configuration()
details_dict1 = config_obj.get_configuration()
report_subscription_obj = ReportSubscriptionsApi(details_dict1)
return_data, status, body = report_subscription_obj.delete_subscription(report_name)
print("API RESPONSE CODE : ", status)
print("API RESPONSE BODY : ", body)
except Exception as e:
print("Exception when calling ReportSubscriptionsApi->delete_subscription: %s\n" % e)
if __name__ == "__main__":
delete_subscriptions_report(reportname="Cybersource-rest-py")
| 35.407407 | 93 | 0.738494 |
38f9f6af9e6f1dfc3daca10322b07d4eb4cf065f | 8,008 | py | Python | blog/views.py | ProjectInAction/final-work-blog | 898dc45389fbd9302971b7e26558e4c31cda7d21 | [
"Apache-2.0"
] | null | null | null | blog/views.py | ProjectInAction/final-work-blog | 898dc45389fbd9302971b7e26558e4c31cda7d21 | [
"Apache-2.0"
] | null | null | null | blog/views.py | ProjectInAction/final-work-blog | 898dc45389fbd9302971b7e26558e4c31cda7d21 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from blog import app
from flask import Flask, render_template, session, redirect, url_for, request, g, flash, send_from_directory,abort
from .mylib import Comment,Article,ArtiList,blogInfo
from random import randint as randint
def getName():
preName=['路过的','隐匿的','黯淡的','蒙面的','薄情的','无名的']
secName=['茶杯','毛线','喵喵','柚子','拐杖','硬币','木剑']
return preName[randint(0,5)]+secName[randint(0,6)]
#异常处理
@app.errorhandler(404)
def page_not_found(e):
return render_template('error.html' ,info=blogInfo()), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('error.html',info=blogInfo()), 500
@app.errorhandler(400)
def bad_request(e):
return render_template('error.html',info=blogInfo()), 400
@app.errorhandler(403)
def forbidden(e):
return render_template('forbidden.html',info=blogInfo()), 403
#自动关闭数据库连接
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'db'):
g.db.close()
@app.route('/')
def index():
info=blogInfo()
curPage = ArtiList(page=1)
curPage.getAl()
curPage.getPagn()
curPage.getRe()
return render_template('index.html',results = curPage.results, pagn = curPage.pagn,info=info)
@app.route('/page/<int:pg>')
def page(pg):
info=blogInfo()
curPage = ArtiList(page=pg)
if curPage.getPagn():
curPage.getAl()
curPage.getRe()
return render_template('page.html',results = curPage.results, pagn = curPage.pagn,info=info)
else:
abort(404)
@app.route('/arch<int:file>/<int:pg>')
def arch(file,pg):
info=blogInfo()
curPage = ArtiList('file',file,pg)
if curPage.getPagn():
curPage.getAl()
curPage.getRe()
return render_template('page.html',results = curPage.results, pagn = curPage.pagn,info=info)
else:
abort(404)
@app.route('/arch/<tag>/<int:pg>')
def tag(tag,pg):
info=blogInfo()
curPage = ArtiList('tag',tag,pg)
if curPage.getPagn():
curPage.getAl()
curPage.getRe()
return render_template('page.html',results = curPage.results, pagn = curPage.pagn,info=info)
else:
abort(404)
@app.route('/login', methods = ['GET', 'POST'])
def login():
info=blogInfo()
pwd = request.form.get('password')
if pwd and info.verify(pwd):
session['log'] = True
return redirect(url_for('admin'))
return render_template('login.html',info=info)
@app.route('/logout')
def logout():
session['log'] = False
return redirect(url_for('page',pg = 1))
@app.route('/admin')
def admin():
if not session.get('log'):
return redirect(url_for('login'))
return render_template('admin.html',info=blogInfo(),cl=Comment().getNew())
@app.route('/article/<int:bg_id>', methods=['Get','POST'])
def article(bg_id):
if request.method == 'POST':
try:
author = request.form.get('author')
if not author:
author = session.get('name')
if not author:
author = getName()
session['name'] = author
content = request.form.get('content')
rid = request.form.get('rid',type=int)
Comment(bg_id).insert(content,author,rid)
return redirect(url_for('article',bg_id = bg_id))
except:
abort(500)
if bg_id==0:
return redirect(url_for('memo'))
try:
curArti = Article(bg_id)
curArti.getIt()
except:
abort(404)
if curArti.file!=0 or session.get('log'):
info=blogInfo()
info.subtitle=info.title
info.title=curArti.title
curComm=Comment(bg_id)
curComm.getIt()
return render_template('article.html',curArti = curArti,info=info,cl=curComm.cl)
abort(404)
@app.route('/post/comment', methods=['POST'])
def commentPost():
author = request.form.get('author')
content = request.form.get('content')
bid = request.form.get('bid',type=int)
rid = request.form.get('rid',type=int)
try:
Comment(bid).insert(content,author,rid)
return "Success"
except:
return "Error"
@app.route('/post/article', methods=['POST'])
def articlePost():
if not session.get('log'):
abort(403)
title = request.form.get('title')
content = request.form.get('editor')
img = request.form.get('img')
file = request.form.get('file',type=int)
tag = request.form.get('tags')
id = request.form.get('id',type=int)
try:
curArti=Article(id)
curArti.edit(title, tag, img, file, content)
return str(curArti.id)
except:
return "Error"
@app.route('/del/comment', methods=['POST'])
def commentDel():
if not session.get('log'):
abort(403)
try:
cid = request.form.get('cid',type=int)
Comment(0).delIt(cid)
return "Success"
except:
return "Error"
@app.route('/del/article', methods=['POST'])
def articleDel():
if not session.get('log'):
abort(403)
try:
bid = request.form.get('bid',type=int)
Article(bid).hideIt()
return "Success"
except:
return "Error"
@app.route('/config/tcg0', methods=['POST'])
def tcg0():
if not session.get('log'):
abort(403)
title = request.form.get('tcg0')
try:
blogInfo().config(title=title)
return "Success"
except:
return "Error"
@app.route('/config/tcg1', methods=['POST'])
def tcg1():
if not session.get('log'):
abort(403)
subtitle = request.form.get('tcg1')
try:
blogInfo().config(subtitle=subtitle)
return "Success"
except:
return "Error"
@app.route('/config/tcg2', methods=['POST'])
def tcg2():
if not session.get('log'):
abort(403)
old = request.form.get('old')
new = request.form.get('new')
try:
res = blogInfo().setPwd(old,new)
return res
except:
return "Error"
@app.route('/config/tcg3', methods=['POST'])
def tcg3():
if not session.get('log'):
abort(403)
sidebar = request.form.get('tcg3')
try:
blogInfo().config(sidebar=sidebar)
return "Success"
except:
return "Error"
@app.route('/config/tcg4', methods=['POST'])
def tcg4():
if not session.get('log'):
abort(403)
tags = request.form.get('tcg4')
try:
blogInfo().config(tags=tags)
return "Success"
except:
return "Error"
@app.route('/config/cate', methods=['POST'])
def cate():
if not session.get('log'):
abort(403)
oldId = request.form.get('oldId',type=int)
newId = request.form.get('newId',type=int)
content = request.form.get('content')
res = blogInfo().setCate(oldId,newId,content)
return res
@app.route('/memo')
def memo():
curComm=Comment(0)
curComm.getIt()
return render_template('memo.html',cl = curComm.cl,info=blogInfo())
# @app.route('/wish')
# def wish():
# return render_template('wish.html',info=blogInfo())
@app.route('/robots.txt')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@app.route('/edit/<int:bg_id>')
def edit(bg_id):
if not session.get('log'):
abort(403)
try:
curArti = Article(bg_id)
curArti.getIt()
except:
abort(404)
return render_template('edit.html',curArti = curArti,info=blogInfo())
@app.route('/edit')
def new():
if not session.get('log'):
abort(403)
curArti = Article(0)
return render_template('edit.html',curArti = curArti,info=blogInfo())
'''
@app.route('/new', methods = ['GET', 'POST'])
def new():
if session.get('log'):
curArti = Article(0)
if request.method == 'POST':
curArti.update(request.form['title'],request.form['tags'],request.form['img'],request.form['file'],request.form['editor'])
return redirect(url_for('page',pg = 1))
return render_template('edit.html',curArti = curArti)
return redirect(url_for('page',pg = 1))
''' | 29.659259 | 134 | 0.611014 |
ef6d050b264fe1f8f3d9192f4d8dab65923dd510 | 5,625 | py | Python | homeassistant/components/solarlog/sensor.py | aaearon/core | b2c9bd2ca67df277093790e3fc0def34d9f7fb04 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/solarlog/sensor.py | aaearon/core | b2c9bd2ca67df277093790e3fc0def34d9f7fb04 | [
"Apache-2.0"
] | 43 | 2020-12-21T08:18:31.000Z | 2022-03-31T06:04:27.000Z | homeassistant/components/solarlog/sensor.py | aaearon/core | b2c9bd2ca67df277093790e3fc0def34d9f7fb04 | [
"Apache-2.0"
] | null | null | null | """Platform for solarlog sensors."""
import logging
from urllib.parse import ParseResult, urlparse
from requests.exceptions import HTTPError, Timeout
from sunwatcher.solarlog.solarlog import SolarLog
from homeassistant.const import CONF_HOST
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from .const import SCAN_INTERVAL, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the solarlog platform."""
_LOGGER.warning(
"Configuration of the solarlog platform in configuration.yaml is deprecated in Home Assistant 0.119. Please remove entry from your configuration"
)
return True
async def async_setup_entry(hass, entry, async_add_entities):
"""Add solarlog entry."""
host_entry = entry.data[CONF_HOST]
device_name = entry.title
url = urlparse(host_entry, "http")
netloc = url.netloc or url.path
path = url.path if url.netloc else ""
url = ParseResult("http", netloc, path, *url[3:])
host = url.geturl()
try:
api = await hass.async_add_executor_job(SolarLog, host)
_LOGGER.debug("Connected to Solar-Log device, setting up entries")
except (OSError, HTTPError, Timeout):
_LOGGER.error(
"Could not connect to Solar-Log device at %s, check host ip address", host
)
return
# Create solarlog data service which will retrieve and update the data.
data = await hass.async_add_executor_job(SolarlogData, hass, api, host)
# Create a new sensor for each sensor type.
entities = []
for sensor_key in SENSOR_TYPES:
sensor = SolarlogSensor(entry.entry_id, device_name, sensor_key, data)
entities.append(sensor)
async_add_entities(entities, True)
return True
class SolarlogSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, entry_id, device_name, sensor_key, data):
"""Initialize the sensor."""
self.device_name = device_name
self.sensor_key = sensor_key
self.data = data
self.entry_id = entry_id
self._state = None
self._json_key = SENSOR_TYPES[self.sensor_key][0]
self._label = SENSOR_TYPES[self.sensor_key][1]
self._unit_of_measurement = SENSOR_TYPES[self.sensor_key][2]
self._icon = SENSOR_TYPES[self.sensor_key][3]
@property
def unique_id(self):
"""Return the unique id."""
return f"{self.entry_id}_{self.sensor_key}"
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.device_name} {self._label}"
@property
def unit_of_measurement(self):
"""Return the state of the sensor."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the sensor icon."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Get the latest data from the sensor and update the state."""
self.data.update()
self._state = self.data.data[self._json_key]
class SolarlogData:
"""Get and update the latest data."""
def __init__(self, hass, api, host):
"""Initialize the data object."""
self.api = api
self.hass = hass
self.host = host
self.update = Throttle(SCAN_INTERVAL)(self._update)
self.data = {}
def _update(self):
"""Update the data from the SolarLog device."""
try:
self.api = SolarLog(self.host)
response = self.api.time
_LOGGER.debug(
"Connection to Solarlog successful. Retrieving latest Solarlog update of %s",
response,
)
except (OSError, Timeout, HTTPError):
_LOGGER.error("Connection error, Could not retrieve data, skipping update")
return
try:
self.data["TIME"] = self.api.time
self.data["powerAC"] = self.api.power_ac
self.data["powerDC"] = self.api.power_dc
self.data["voltageAC"] = self.api.voltage_ac
self.data["voltageDC"] = self.api.voltage_dc
self.data["yieldDAY"] = self.api.yield_day / 1000
self.data["yieldYESTERDAY"] = self.api.yield_yesterday / 1000
self.data["yieldMONTH"] = self.api.yield_month / 1000
self.data["yieldYEAR"] = self.api.yield_year / 1000
self.data["yieldTOTAL"] = self.api.yield_total / 1000
self.data["consumptionAC"] = self.api.consumption_ac
self.data["consumptionDAY"] = self.api.consumption_day / 1000
self.data["consumptionYESTERDAY"] = self.api.consumption_yesterday / 1000
self.data["consumptionMONTH"] = self.api.consumption_month / 1000
self.data["consumptionYEAR"] = self.api.consumption_year / 1000
self.data["consumptionTOTAL"] = self.api.consumption_total / 1000
self.data["totalPOWER"] = self.api.total_power
self.data["alternatorLOSS"] = self.api.alternator_loss
self.data["CAPACITY"] = round(self.api.capacity * 100, 0)
self.data["EFFICIENCY"] = round(self.api.efficiency * 100, 0)
self.data["powerAVAILABLE"] = self.api.power_available
self.data["USAGE"] = self.api.usage
_LOGGER.debug("Updated Solarlog overview data: %s", self.data)
except AttributeError:
_LOGGER.error("Missing details data in Solarlog response")
| 36.290323 | 153 | 0.645689 |
d87c7d0e0c062aa38797760fdaed67fc901a6e19 | 1,488 | py | Python | src/resources/baserouter.py | solnsumei/properties | 45361b7d46a5ac34931f3ed24bb6c5eb7fc8a81b | [
"MIT"
] | null | null | null | src/resources/baserouter.py | solnsumei/properties | 45361b7d46a5ac34931f3ed24bb6c5eb7fc8a81b | [
"MIT"
] | null | null | null | src/resources/baserouter.py | solnsumei/properties | 45361b7d46a5ac34931f3ed24bb6c5eb7fc8a81b | [
"MIT"
] | null | null | null | from fastapi import APIRouter
class BaseRouter(APIRouter):
def __init__(self, request_schema, response_schema, model):
super().__init__()
self.request_schema = request_schema
self.response_schema = response_schema
self.model = model
def load_crud_routes(self):
request_schema = self.request_schema
response_schema = self.response_schema
model = self.model
@self.get("/", response_model=list[response_schema])
async def fetch_all():
return await response_schema.from_queryset(model.all())
@self.get("/{item_id}", response_model=response_schema)
async def fetch_one(item_id: int):
return await response_schema \
.from_queryset_single(model.get(id=item_id))
@self.post("/", status_code=201, response_model=response_schema)
async def create(item: request_schema):
new_item = await model.create_one(item)
return await response_schema.from_tortoise_orm(new_item)
@self.put("/{item_id}", response_model=response_schema)
async def update(item_id: int, item: request_schema):
updated_item = await model.update_one(item_id, item)
return await response_schema.from_queryset_single(updated_item)
@self.delete("/{item_id}")
async def delete(item_id: int):
await model.delete_one(item_id)
return {"message": "Item deleted successfully"}
| 37.2 | 75 | 0.663978 |
ae2a3e91cc6000057372bf5bb793c33d21ff2440 | 43,779 | py | Python | src/toil/jobStores/abstractJobStore.py | smoe/toil | 3dae6d44d98516f0100f769aedfbfd57371ab458 | [
"Apache-2.0"
] | null | null | null | src/toil/jobStores/abstractJobStore.py | smoe/toil | 3dae6d44d98516f0100f769aedfbfd57371ab458 | [
"Apache-2.0"
] | null | null | null | src/toil/jobStores/abstractJobStore.py | smoe/toil | 3dae6d44d98516f0100f769aedfbfd57371ab458 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import map
from builtins import object
from builtins import super
import shutil
import re
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager, closing
from datetime import timedelta
from uuid import uuid4
# Python 3 compatibility imports
from six import itervalues
from six.moves.urllib.request import urlopen
import six.moves.urllib.parse as urlparse
from toil.lib.retry import retry_http
from toil.common import safeUnpickleFromStream
from toil.fileStores import FileID
from toil.job import JobException
from toil.lib.memoize import memoize
from toil.lib.objects import abstractclassmethod
from future.utils import with_metaclass
try:
import cPickle as pickle
except ImportError:
import pickle
import logging
logger = logging.getLogger(__name__)
class InvalidImportExportUrlException(Exception):
def __init__(self, url):
"""
:param urlparse.ParseResult url:
"""
super().__init__("The URL '%s' is invalid." % url.geturl())
class NoSuchJobException(Exception):
"""Indicates that the specified job does not exist."""
def __init__(self, jobStoreID):
"""
:param str jobStoreID: the jobStoreID that was mistakenly assumed to exist
"""
super().__init__("The job '%s' does not exist." % jobStoreID)
class ConcurrentFileModificationException(Exception):
"""Indicates that the file was attempted to be modified by multiple processes at once."""
def __init__(self, jobStoreFileID):
"""
:param str jobStoreFileID: the ID of the file that was modified by multiple workers
or processes concurrently
"""
super().__init__('Concurrent update to file %s detected.' % jobStoreFileID)
class NoSuchFileException(Exception):
"""Indicates that the specified file does not exist."""
def __init__(self, jobStoreFileID, customName=None, *extra):
"""
:param str jobStoreFileID: the ID of the file that was mistakenly assumed to exist
:param str customName: optionally, an alternate name for the nonexistent file
:param list extra: optional extra information to add to the error message
"""
# Having the extra argument may help resolve the __init__() takes at
# most three arguments error reported in
# https://github.com/DataBiosphere/toil/issues/2589#issuecomment-481912211
if customName is None:
message = "File '%s' does not exist." % jobStoreFileID
else:
message = "File '%s' (%s) does not exist." % (customName, jobStoreFileID)
if extra:
# Append extra data.
message += " Extra info: " + " ".join((str(x) for x in extra))
super().__init__(message)
class NoSuchJobStoreException(Exception):
"""Indicates that the specified job store does not exist."""
def __init__(self, locator):
super().__init__("The job store '%s' does not exist, so there is nothing to restart." % locator)
class JobStoreExistsException(Exception):
"""Indicates that the specified job store already exists."""
def __init__(self, locator):
super().__init__(
"The job store '%s' already exists. Use --restart to resume the workflow, or remove "
"the job store with 'toil clean' to start the workflow from scratch." % locator)
class AbstractJobStore(with_metaclass(ABCMeta, object)):
"""
Represents the physical storage for the jobs and files in a Toil workflow.
"""
def __init__(self):
"""
Create an instance of the job store. The instance will not be fully functional until
either :meth:`.initialize` or :meth:`.resume` is invoked. Note that the :meth:`.destroy`
method may be invoked on the object with or without prior invocation of either of these two
methods.
"""
self.__config = None
def initialize(self, config):
"""
Create the physical storage for this job store, allocate a workflow ID and persist the
given Toil configuration to the store.
:param toil.common.Config config: the Toil configuration to initialize this job store
with. The given configuration will be updated with the newly allocated workflow ID.
:raises JobStoreExistsException: if the physical storage for this job store already exists
"""
assert config.workflowID is None
config.workflowID = str(uuid4())
logger.debug("The workflow ID is: '%s'" % config.workflowID)
self.__config = config
self.writeConfig()
def writeConfig(self):
"""
Persists the value of the :attr:`AbstractJobStore.config` attribute to the
job store, so that it can be retrieved later by other instances of this class.
"""
with self.writeSharedFileStream('config.pickle', isProtected=False) as fileHandle:
pickle.dump(self.__config, fileHandle, pickle.HIGHEST_PROTOCOL)
def resume(self):
"""
Connect this instance to the physical storage it represents and load the Toil configuration
into the :attr:`AbstractJobStore.config` attribute.
:raises NoSuchJobStoreException: if the physical storage for this job store doesn't exist
"""
with self.readSharedFileStream('config.pickle') as fileHandle:
config = safeUnpickleFromStream(fileHandle)
assert config.workflowID is not None
self.__config = config
@property
def config(self):
"""
The Toil configuration associated with this job store.
:rtype: toil.common.Config
"""
return self.__config
rootJobStoreIDFileName = 'rootJobStoreID'
def setRootJob(self, rootJobStoreID):
"""
Set the root job of the workflow backed by this job store
:param str rootJobStoreID: The ID of the job to set as root
"""
with self.writeSharedFileStream(self.rootJobStoreIDFileName) as f:
f.write(rootJobStoreID.encode('utf-8'))
def loadRootJob(self):
"""
Loads the root job in the current job store.
:raises toil.job.JobException: If no root job is set or if the root job doesn't exist in
this job store
:return: The root job.
:rtype: toil.jobGraph.JobGraph
"""
try:
with self.readSharedFileStream(self.rootJobStoreIDFileName) as f:
rootJobStoreID = f.read().decode('utf-8')
except NoSuchFileException:
raise JobException('No job has been set as the root in this job store')
if not self.exists(rootJobStoreID):
raise JobException("The root job '%s' doesn't exist. Either the Toil workflow "
"is finished or has never been started" % rootJobStoreID)
return self.load(rootJobStoreID)
# FIXME: This is only used in tests, why do we have it?
def createRootJob(self, *args, **kwargs):
"""
Create a new job and set it as the root job in this job store
:rtype: toil.jobGraph.JobGraph
"""
rootJob = self.create(*args, **kwargs)
self.setRootJob(rootJob.jobStoreID)
return rootJob
def getRootJobReturnValue(self):
"""
Parse the return value from the root job.
Raises an exception if the root job hasn't fulfilled its promise yet.
"""
# Parse out the return value from the root job
with self.readSharedFileStream('rootJobReturnValue') as fH:
return safeUnpickleFromStream(fH)
@property
@memoize
def _jobStoreClasses(self):
"""
A list of concrete AbstractJobStore implementations whose dependencies are installed.
:rtype: list[AbstractJobStore]
"""
jobStoreClassNames = (
"toil.jobStores.azureJobStore.AzureJobStore",
"toil.jobStores.fileJobStore.FileJobStore",
"toil.jobStores.googleJobStore.GoogleJobStore",
"toil.jobStores.aws.jobStore.AWSJobStore",
"toil.jobStores.abstractJobStore.JobStoreSupport")
jobStoreClasses = []
for className in jobStoreClassNames:
moduleName, className = className.rsplit('.', 1)
from importlib import import_module
try:
module = import_module(moduleName)
except ImportError:
logger.debug("Unable to import '%s' as is expected if the corresponding extra was "
"omitted at installation time.", moduleName)
else:
jobStoreClass = getattr(module, className)
jobStoreClasses.append(jobStoreClass)
return jobStoreClasses
def _findJobStoreForUrl(self, url, export=False):
"""
Returns the AbstractJobStore subclass that supports the given URL.
:param urlparse.ParseResult url: The given URL
:param bool export: The URL for
:rtype: toil.jobStore.AbstractJobStore
"""
for jobStoreCls in self._jobStoreClasses:
if jobStoreCls._supportsUrl(url, export):
return jobStoreCls
raise RuntimeError("No job store implementation supports %sporting for URL '%s'" %
('ex' if export else 'im', url.geturl()))
def importFile(self, srcUrl, sharedFileName=None, hardlink=False):
"""
Imports the file at the given URL into job store. The ID of the newly imported file is
returned. If the name of a shared file name is provided, the file will be imported as
such and None is returned.
Currently supported schemes are:
- 's3' for objects in Amazon S3
e.g. s3://bucket/key
- 'wasb' for blobs in Azure Blob Storage
e.g. wasb://container/blob
- 'file' for local files
e.g. file:///local/file/path
- 'http'
e.g. http://someurl.com/path
- 'gs'
e.g. gs://bucket/file
:param str srcUrl: URL that points to a file or object in the storage mechanism of a
supported URL scheme e.g. a blob in an Azure Blob Storage container.
:param str sharedFileName: Optional name to assign to the imported file within the job store
:return: The jobStoreFileId of the imported file or None if sharedFileName was given
:rtype: toil.fileStores.FileID or None
"""
# Note that the helper method _importFile is used to read from the source and write to
# destination (which is the current job store in this case). To implement any
# optimizations that circumvent this, the _importFile method should be overridden by
# subclasses of AbstractJobStore.
srcUrl = urlparse.urlparse(srcUrl)
otherCls = self._findJobStoreForUrl(srcUrl)
return self._importFile(otherCls, srcUrl, sharedFileName=sharedFileName, hardlink=hardlink)
def _importFile(self, otherCls, url, sharedFileName=None, hardlink=False):
"""
Import the file at the given URL using the given job store class to retrieve that file.
See also :meth:`.importFile`. This method applies a generic approach to importing: it
asks the other job store class for a stream and writes that stream as either a regular or
a shared file.
:param AbstractJobStore otherCls: The concrete subclass of AbstractJobStore that supports
reading from the given URL and getting the file size from the URL.
:param urlparse.ParseResult url: The location of the file to import.
:param str sharedFileName: Optional name to assign to the imported file within the job store
:return The jobStoreFileId of imported file or None if sharedFileName was given
:rtype: toil.fileStores.FileID or None
"""
if sharedFileName is None:
with self.writeFileStream() as (writable, jobStoreFileID):
otherCls._readFromUrl(url, writable)
return FileID(jobStoreFileID, otherCls.getSize(url))
else:
self._requireValidSharedFileName(sharedFileName)
with self.writeSharedFileStream(sharedFileName) as writable:
otherCls._readFromUrl(url, writable)
return None
def exportFile(self, jobStoreFileID, dstUrl):
"""
Exports file to destination pointed at by the destination URL.
Refer to :meth:`.AbstractJobStore.importFile` documentation for currently supported URL schemes.
Note that the helper method _exportFile is used to read from the source and write to
destination. To implement any optimizations that circumvent this, the _exportFile method
should be overridden by subclasses of AbstractJobStore.
:param str jobStoreFileID: The id of the file in the job store that should be exported.
:param str dstUrl: URL that points to a file or object in the storage mechanism of a
supported URL scheme e.g. a blob in an Azure Blob Storage container.
"""
dstUrl = urlparse.urlparse(dstUrl)
otherCls = self._findJobStoreForUrl(dstUrl, export=True)
self._exportFile(otherCls, jobStoreFileID, dstUrl)
def _exportFile(self, otherCls, jobStoreFileID, url):
"""
Refer to exportFile docstring for information about this method.
:param AbstractJobStore otherCls: The concrete subclass of AbstractJobStore that supports
exporting to the given URL. Note that the type annotation here is not completely
accurate. This is not an instance, it's a class, but there is no way to reflect
that in :pep:`484` type hints.
:param str jobStoreFileID: The id of the file that will be exported.
:param urlparse.ParseResult url: The parsed URL of the file to export to.
"""
with self.readFileStream(jobStoreFileID) as readable:
otherCls._writeToUrl(readable, url)
@abstractclassmethod
def getSize(cls, url):
"""
returns the size in bytes of the file at the given URL
:param urlparse.ParseResult url: URL that points to a file or object in the storage
mechanism of a supported URL scheme e.g. a blob in an Azure Blob Storage container.
"""
raise NotImplementedError
@abstractclassmethod
def _readFromUrl(cls, url, writable):
"""
Reads the contents of the object at the specified location and writes it to the given
writable stream.
Refer to :func:`~AbstractJobStore.importFile` documentation for currently supported URL schemes.
:param urlparse.ParseResult url: URL that points to a file or object in the storage
mechanism of a supported URL scheme e.g. a blob in an Azure Blob Storage container.
:param writable: a writable stream
"""
raise NotImplementedError()
@abstractclassmethod
def _writeToUrl(cls, readable, url):
"""
Reads the contents of the given readable stream and writes it to the object at the
specified location.
Refer to AbstractJobStore.importFile documentation for currently supported URL schemes.
:param urlparse.ParseResult url: URL that points to a file or object in the storage
mechanism of a supported URL scheme e.g. a blob in an Azure Blob Storage container.
:param readable: a readable stream
"""
raise NotImplementedError()
@abstractclassmethod
def _supportsUrl(cls, url, export=False):
"""
Returns True if the job store supports the URL's scheme.
Refer to AbstractJobStore.importFile documentation for currently supported URL schemes.
:param bool export: Determines if the url is supported for exported
:param urlparse.ParseResult url: a parsed URL that may be supported
:return bool: returns true if the cls supports the URL
"""
raise NotImplementedError()
@abstractmethod
def destroy(self):
"""
The inverse of :meth:`.initialize`, this method deletes the physical storage represented
by this instance. While not being atomic, this method *is* at least idempotent,
as a means to counteract potential issues with eventual consistency exhibited by the
underlying storage mechanisms. This means that if the method fails (raises an exception),
it may (and should be) invoked again. If the underlying storage mechanism is eventually
consistent, even a successful invocation is not an ironclad guarantee that the physical
storage vanished completely and immediately. A successful invocation only guarantees that
the deletion will eventually happen. It is therefore recommended to not immediately reuse
the same job store location for a new Toil workflow.
"""
raise NotImplementedError()
def getEnv(self):
"""
Returns a dictionary of environment variables that this job store requires to be set in
order to function properly on a worker.
:rtype: dict[str,str]
"""
return {}
# Cleanup functions
def clean(self, jobCache=None):
"""
Function to cleanup the state of a job store after a restart.
Fixes jobs that might have been partially updated. Resets the try counts and removes jobs
that are not successors of the current root job.
:param dict[str,toil.jobGraph.JobGraph] jobCache: if a value it must be a dict
from job ID keys to JobGraph object values. Jobs will be loaded from the cache
(which can be downloaded from the job store in a batch) instead of piecemeal when
recursed into.
"""
if jobCache is None:
logger.warning("Cleaning jobStore recursively. This may be slow.")
# Functions to get and check the existence of jobs, using the jobCache
# if present
def getJob(jobId):
if jobCache is not None:
try:
return jobCache[jobId]
except KeyError:
return self.load(jobId)
else:
return self.load(jobId)
def haveJob(jobId):
if jobCache is not None:
if jobId in jobCache:
return True
else:
return self.exists(jobId)
else:
return self.exists(jobId)
def getJobs():
if jobCache is not None:
return itervalues(jobCache)
else:
return self.jobs()
# Iterate from the root jobGraph and collate all jobs that are reachable from it
# All other jobs returned by self.jobs() are orphaned and can be removed
reachableFromRoot = set()
def getConnectedJobs(jobGraph):
if jobGraph.jobStoreID in reachableFromRoot:
return
reachableFromRoot.add(jobGraph.jobStoreID)
# Traverse jobs in stack
for jobs in jobGraph.stack:
for successorJobStoreID in [x.jobStoreID for x in jobs]:
if (successorJobStoreID not in reachableFromRoot
and haveJob(successorJobStoreID)):
getConnectedJobs(getJob(successorJobStoreID))
# Traverse service jobs
for jobs in jobGraph.services:
for serviceJobStoreID in [x.jobStoreID for x in jobs]:
if haveJob(serviceJobStoreID):
assert serviceJobStoreID not in reachableFromRoot
reachableFromRoot.add(serviceJobStoreID)
logger.debug("Checking job graph connectivity...")
getConnectedJobs(self.loadRootJob())
logger.debug("%d jobs reachable from root." % len(reachableFromRoot))
# Cleanup jobs that are not reachable from the root, and therefore orphaned
jobsToDelete = [x for x in getJobs() if x.jobStoreID not in reachableFromRoot]
for jobGraph in jobsToDelete:
# clean up any associated files before deletion
for fileID in jobGraph.filesToDelete:
# Delete any files that should already be deleted
logger.warn("Deleting file '%s'. It is marked for deletion but has not yet been "
"removed.", fileID)
self.deleteFile(fileID)
# Delete the job
self.delete(jobGraph.jobStoreID)
jobGraphsReachableFromRoot = {id: getJob(id) for id in reachableFromRoot}
# Clean up any checkpoint jobs -- delete any successors it
# may have launched, and restore the job to a pristine
# state
jobsDeletedByCheckpoints = set()
for jobGraph in [jG for jG in jobGraphsReachableFromRoot.values() if jG.checkpoint is not None]:
if jobGraph.jobStoreID in jobsDeletedByCheckpoints:
# This is a checkpoint that was nested within an
# earlier checkpoint, so it and all its successors are
# already gone.
continue
logger.debug("Restarting checkpointed job %s" % jobGraph)
deletedThisRound = jobGraph.restartCheckpoint(self)
jobsDeletedByCheckpoints |= set(deletedThisRound)
for jobID in jobsDeletedByCheckpoints:
del jobGraphsReachableFromRoot[jobID]
# Clean up jobs that are in reachable from the root
for jobGraph in jobGraphsReachableFromRoot.values():
# jobGraphs here are necessarily in reachable from root.
changed = [False] # This is a flag to indicate the jobGraph state has
# changed
# If the job has files to delete delete them.
if len(jobGraph.filesToDelete) != 0:
# Delete any files that should already be deleted
for fileID in jobGraph.filesToDelete:
logger.critical("Removing file in job store: %s that was "
"marked for deletion but not previously removed" % fileID)
self.deleteFile(fileID)
jobGraph.filesToDelete = []
changed[0] = True
# For a job whose command is already executed, remove jobs from the stack that are
# already deleted. This cleans up the case that the jobGraph had successors to run,
# but had not been updated to reflect this.
if jobGraph.command is None:
stackSizeFn = lambda: sum(map(len, jobGraph.stack))
startStackSize = stackSizeFn()
# Remove deleted jobs
jobGraph.stack = [[y for y in x if self.exists(y.jobStoreID)] for x in jobGraph.stack]
# Remove empty stuff from the stack
jobGraph.stack = [x for x in jobGraph.stack if len(x) > 0]
# Check if anything got removed
if stackSizeFn() != startStackSize:
changed[0] = True
# Cleanup any services that have already been finished.
# Filter out deleted services and update the flags for services that exist
# If there are services then renew
# the start and terminate flags if they have been removed
def subFlagFile(jobStoreID, jobStoreFileID, flag):
if self.fileExists(jobStoreFileID):
return jobStoreFileID
# Make a new flag
newFlag = self.getEmptyFileStoreID(jobStoreID, cleanup=False)
# Load the jobGraph for the service and initialise the link
serviceJobGraph = getJob(jobStoreID)
if flag == 1:
logger.debug("Recreating a start service flag for job: %s, flag: %s",
jobStoreID, newFlag)
serviceJobGraph.startJobStoreID = newFlag
elif flag == 2:
logger.debug("Recreating a terminate service flag for job: %s, flag: %s",
jobStoreID, newFlag)
serviceJobGraph.terminateJobStoreID = newFlag
else:
logger.debug("Recreating a error service flag for job: %s, flag: %s",
jobStoreID, newFlag)
assert flag == 3
serviceJobGraph.errorJobStoreID = newFlag
# Update the service job on disk
self.update(serviceJobGraph)
changed[0] = True
return newFlag
servicesSizeFn = lambda: sum(map(len, jobGraph.services))
startServicesSize = servicesSizeFn()
def replaceFlagsIfNeeded(serviceJobNode):
serviceJobNode.startJobStoreID = subFlagFile(serviceJobNode.jobStoreID, serviceJobNode.startJobStoreID, 1)
serviceJobNode.terminateJobStoreID = subFlagFile(serviceJobNode.jobStoreID, serviceJobNode.terminateJobStoreID, 2)
serviceJobNode.errorJobStoreID = subFlagFile(serviceJobNode.jobStoreID, serviceJobNode.errorJobStoreID, 3)
# jobGraph.services is a list of lists containing serviceNodes
# remove all services that no longer exist
services = jobGraph.services
jobGraph.services = []
for serviceList in services:
existingServices = [service for service in serviceList if self.exists(service.jobStoreID)]
if existingServices:
jobGraph.services.append(existingServices)
list(map(lambda serviceList: list(map(replaceFlagsIfNeeded, serviceList)), jobGraph.services))
if servicesSizeFn() != startServicesSize:
changed[0] = True
# Reset the retry count of the jobGraph
if jobGraph.remainingRetryCount != self._defaultTryCount():
jobGraph.remainingRetryCount = self._defaultTryCount()
changed[0] = True
# This cleans the old log file which may
# have been left if the jobGraph is being retried after a jobGraph failure.
if jobGraph.logJobStoreFileID != None:
self.deleteFile(jobGraph.logJobStoreFileID)
jobGraph.logJobStoreFileID = None
changed[0] = True
if changed[0]: # Update, but only if a change has occurred
logger.critical("Repairing job: %s" % jobGraph.jobStoreID)
self.update(jobGraph)
# Remove any crufty stats/logging files from the previous run
logger.debug("Discarding old statistics and logs...")
# We have to manually discard the stream to avoid getting
# stuck on a blocking write from the job store.
def discardStream(stream):
"""Read the stream 4K at a time until EOF, discarding all input."""
while len(stream.read(4096)) != 0:
pass
self.readStatsAndLogging(discardStream)
logger.debug("Job store is clean")
# TODO: reloading of the rootJob may be redundant here
return self.loadRootJob()
##########################################
# The following methods deal with creating/loading/updating/writing/checking for the
# existence of jobs
##########################################
@contextmanager
def batch(self):
"""
All calls to create() with this context manager active will be performed in a batch
after the context manager is released.
:rtype: None
"""
yield
@abstractmethod
def create(self, jobNode):
"""
Creates a job graph from the given job node & writes it to the job store.
:rtype: toil.jobGraph.JobGraph
"""
raise NotImplementedError()
@abstractmethod
def exists(self, jobStoreID):
"""
Indicates whether the job with the specified jobStoreID exists in the job store
:rtype: bool
"""
raise NotImplementedError()
# One year should be sufficient to finish any pipeline ;-)
publicUrlExpiration = timedelta(days=365)
@abstractmethod
def getPublicUrl(self, fileName):
"""
Returns a publicly accessible URL to the given file in the job store. The returned URL may
expire as early as 1h after its been returned. Throw an exception if the file does not
exist.
:param str fileName: the jobStoreFileID of the file to generate a URL for
:raise NoSuchFileException: if the specified file does not exist in this job store
:rtype: str
"""
raise NotImplementedError()
@abstractmethod
def getSharedPublicUrl(self, sharedFileName):
"""
Differs from :meth:`getPublicUrl` in that this method is for generating URLs for shared
files written by :meth:`writeSharedFileStream`.
Returns a publicly accessible URL to the given file in the job store. The returned URL
starts with 'http:', 'https:' or 'file:'. The returned URL may expire as early as 1h
after its been returned. Throw an exception if the file does not exist.
:param str sharedFileName: The name of the shared file to generate a publically accessible url for.
:raise NoSuchFileException: raised if the specified file does not exist in the store
:rtype: str
"""
raise NotImplementedError()
@abstractmethod
def load(self, jobStoreID):
"""
Loads the job referenced by the given ID and returns it.
:param str jobStoreID: the ID of the job to load
:raise NoSuchJobException: if there is no job with the given ID
:rtype: toil.jobGraph.JobGraph
"""
raise NotImplementedError()
@abstractmethod
def update(self, job):
"""
Persists the job in this store atomically.
:param toil.jobGraph.JobGraph job: the job to write to this job store
"""
raise NotImplementedError()
@abstractmethod
def delete(self, jobStoreID):
"""
Removes from store atomically, can not then subsequently call load(), write(), update(),
etc. with the job.
This operation is idempotent, i.e. deleting a job twice or deleting a non-existent job
will succeed silently.
:param str jobStoreID: the ID of the job to delete from this job store
"""
raise NotImplementedError()
def jobs(self):
"""
Best effort attempt to return iterator on all jobs in the store. The iterator may not
return all jobs and may also contain orphaned jobs that have already finished successfully
and should not be rerun. To guarantee you get any and all jobs that can be run instead
construct a more expensive ToilState object
:return: Returns iterator on jobs in the store. The iterator may or may not contain all jobs and may contain
invalid jobs
:rtype: Iterator[toil.jobGraph.JobGraph]
"""
raise NotImplementedError()
##########################################
# The following provide an way of creating/reading/writing/updating files
# associated with a given job.
##########################################
@abstractmethod
def writeFile(self, localFilePath, jobStoreID=None, cleanup=False):
"""
Takes a file (as a path) and places it in this job store. Returns an ID that can be used
to retrieve the file at a later time.
:param str localFilePath: the path to the local file that will be uploaded to the job store.
:param str jobStoreID: the id of a job, or None. If specified, the may be associated
with that job in a job-store-specific way. This may influence the returned ID.
:param bool cleanup: Whether to attempt to delete the file when the job
whose jobStoreID was given as jobStoreID is deleted with
jobStore.delete(job). If jobStoreID was not given, does nothing.
:raise ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
:raise NoSuchJobException: if the job specified via jobStoreID does not exist
FIXME: some implementations may not raise this
:return: an ID referencing the newly created file and can be used to read the
file in the future.
:rtype: str
"""
raise NotImplementedError()
@abstractmethod
@contextmanager
def writeFileStream(self, jobStoreID=None, cleanup=False):
"""
Similar to writeFile, but returns a context manager yielding a tuple of
1) a file handle which can be written to and 2) the ID of the resulting
file in the job store. The yielded file handle does not need to and
should not be closed explicitly.
:param str jobStoreID: the id of a job, or None. If specified, the may be associated
with that job in a job-store-specific way. This may influence the returned ID.
:param bool cleanup: Whether to attempt to delete the file when the job
whose jobStoreID was given as jobStoreID is deleted with
jobStore.delete(job). If jobStoreID was not given, does nothing.
:raise ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
:raise NoSuchJobException: if the job specified via jobStoreID does not exist
FIXME: some implementations may not raise this
:return: an ID that references the newly created file and can be used to read the
file in the future.
:rtype: str
"""
raise NotImplementedError()
@abstractmethod
def getEmptyFileStoreID(self, jobStoreID=None, cleanup=False):
"""
Creates an empty file in the job store and returns its ID.
Call to fileExists(getEmptyFileStoreID(jobStoreID)) will return True.
:param str jobStoreID: the id of a job, or None. If specified, the may be associated
with that job in a job-store-specific way. This may influence the returned ID.
:param bool cleanup: Whether to attempt to delete the file when the job
whose jobStoreID was given as jobStoreID is deleted with
jobStore.delete(job). If jobStoreID was not given, does nothing.
:return: a jobStoreFileID that references the newly created file and can be used to reference the
file in the future.
:rtype: str
"""
raise NotImplementedError()
@abstractmethod
def readFile(self, jobStoreFileID, localFilePath, symlink=False):
"""
Copies or hard links the file referenced by jobStoreFileID to the given
local file path. The version will be consistent with the last copy of
the file written/updated. If the file in the job store is later
modified via updateFile or updateFileStream, it is
implementation-defined whether those writes will be visible at
localFilePath.
The file at the given local path may not be modified after this method returns!
:param str jobStoreFileID: ID of the file to be copied
:param str localFilePath: the local path indicating where to place the contents of the
given file in the job store
:param bool symlink: whether the reader can tolerate a symlink. If set to true, the job
store may create a symlink instead of a full copy of the file or a hard link.
"""
raise NotImplementedError()
@abstractmethod
@contextmanager
def readFileStream(self, jobStoreFileID):
"""
Similar to readFile, but returns a context manager yielding a file handle which can be
read from. The yielded file handle does not need to and should not be closed explicitly.
:param str jobStoreFileID: ID of the file to get a readable file handle for
"""
raise NotImplementedError()
@abstractmethod
def deleteFile(self, jobStoreFileID):
"""
Deletes the file with the given ID from this job store. This operation is idempotent, i.e.
deleting a file twice or deleting a non-existent file will succeed silently.
:param str jobStoreFileID: ID of the file to delete
"""
raise NotImplementedError()
@abstractmethod
def fileExists(self, jobStoreFileID):
"""
Determine whether a file exists in this job store.
:param str jobStoreFileID: an ID referencing the file to be checked
:rtype: bool
"""
raise NotImplementedError()
@abstractmethod
def updateFile(self, jobStoreFileID, localFilePath):
"""
Replaces the existing version of a file in the job store. Throws an exception if the file
does not exist.
:param str jobStoreFileID: the ID of the file in the job store to be updated
:param str localFilePath: the local path to a file that will overwrite the current version
in the job store
:raise ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
:raise NoSuchFileException: if the specified file does not exist
"""
raise NotImplementedError()
@abstractmethod
def updateFileStream(self, jobStoreFileID):
"""
Replaces the existing version of a file in the job store. Similar to writeFile, but
returns a context manager yielding a file handle which can be written to. The
yielded file handle does not need to and should not be closed explicitly.
:param str jobStoreFileID: the ID of the file in the job store to be updated
:raise ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
:raise NoSuchFileException: if the specified file does not exist
"""
raise NotImplementedError()
##########################################
# The following methods deal with shared files, i.e. files not associated
# with specific jobs.
##########################################
sharedFileNameRegex = re.compile(r'^[a-zA-Z0-9._-]+$')
# FIXME: Rename to updateSharedFileStream
@abstractmethod
@contextmanager
def writeSharedFileStream(self, sharedFileName, isProtected=None):
"""
Returns a context manager yielding a writable file handle to the global file referenced
by the given name.
:param str sharedFileName: A file name matching AbstractJobStore.fileNameRegex, unique within
this job store
:param bool isProtected: True if the file must be encrypted, None if it may be encrypted or
False if it must be stored in the clear.
:raise ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError()
@abstractmethod
@contextmanager
def readSharedFileStream(self, sharedFileName):
"""
Returns a context manager yielding a readable file handle to the global file referenced
by the given name.
:param str sharedFileName: A file name matching AbstractJobStore.fileNameRegex, unique within
this job store
"""
raise NotImplementedError()
@abstractmethod
def writeStatsAndLogging(self, statsAndLoggingString):
"""
Adds the given statistics/logging string to the store of statistics info.
:param str statsAndLoggingString: the string to be written to the stats file
:raise ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
"""
raise NotImplementedError()
@abstractmethod
def readStatsAndLogging(self, callback, readAll=False):
"""
Reads stats/logging strings accumulated by the writeStatsAndLogging() method. For each
stats/logging string this method calls the given callback function with an open,
readable file handle from which the stats string can be read. Returns the number of
stats/logging strings processed. Each stats/logging string is only processed once unless
the readAll parameter is set, in which case the given callback will be invoked for all
existing stats/logging strings, including the ones from a previous invocation of this
method.
:param Callable callback: a function to be applied to each of the stats file handles found
:param bool readAll: a boolean indicating whether to read the already processed stats files
in addition to the unread stats files
:raise ConcurrentFileModificationException: if the file was modified concurrently during
an invocation of this method
:return: the number of stats files processed
:rtype: int
"""
raise NotImplementedError()
## Helper methods for subclasses
def _defaultTryCount(self):
return int(self.config.retryCount + 1)
@classmethod
def _validateSharedFileName(cls, sharedFileName):
return bool(cls.sharedFileNameRegex.match(sharedFileName))
@classmethod
def _requireValidSharedFileName(cls, sharedFileName):
if not cls._validateSharedFileName(sharedFileName):
raise ValueError("Not a valid shared file name: '%s'." % sharedFileName)
class JobStoreSupport(with_metaclass(ABCMeta, AbstractJobStore)):
@classmethod
def _supportsUrl(cls, url, export=False):
return url.scheme.lower() in ('http', 'https', 'ftp') and not export
@classmethod
def getSize(cls, url):
if url.scheme.lower() == 'ftp':
return None
for attempt in retry_http():
with attempt:
with closing(urlopen(url.geturl())) as readable:
# just read the header for content length
return int(readable.info().get('content-length'))
@classmethod
def _readFromUrl(cls, url, writable):
for attempt in retry_http():
with attempt:
with closing(urlopen(url.geturl())) as readable:
shutil.copyfileobj(readable, writable)
| 41.300943 | 130 | 0.645515 |
a73808d1cbadb32576a374d5d727e33c6fa880d9 | 222 | py | Python | lib/Gassmann.py | yohanesnuwara/CO2Inject | 0be66ca14e1eb3787701541fd91d2d190c1e50b9 | [
"MIT"
] | null | null | null | lib/Gassmann.py | yohanesnuwara/CO2Inject | 0be66ca14e1eb3787701541fd91d2d190c1e50b9 | [
"MIT"
] | null | null | null | lib/Gassmann.py | yohanesnuwara/CO2Inject | 0be66ca14e1eb3787701541fd91d2d190c1e50b9 | [
"MIT"
] | null | null | null | def Ks(Kd, Km, Kf, phi):
gamma = 1.0 - phi - Kd/Km
return Kd + (gamma + phi)**2/(gamma/Km + phi/Kf)
def Kd(Ks, Km, Kf, phi):
gamma = phi*(Km/Kf - 1.0)
return (Ks*(gamma + 1.0) - Km)/(gamma - 1.0 + Ks/Km)
| 24.666667 | 56 | 0.509009 |
e467627aceed4ba4feb788a5253732784b5ecff7 | 1,224 | py | Python | atest/testresources/testlibs/ArgumentsPython.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 7 | 2015-02-25T10:55:02.000Z | 2015-11-04T03:20:05.000Z | atest/testresources/testlibs/ArgumentsPython.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 12 | 2015-02-24T17:00:06.000Z | 2015-07-31T08:32:07.000Z | atest/testresources/testlibs/ArgumentsPython.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 2 | 2015-12-15T11:00:35.000Z | 2018-02-24T18:11:24.000Z | import sys
class ArgumentsPython:
# method docs are used in unit tests as expected min and max args
def a_0(self):
"""(0,0)"""
return 'a_0'
def a_1(self, arg):
"""(1,1)"""
return 'a_1: ' + arg
def a_3(self, arg1, arg2, arg3):
"""(3,3)"""
return ' '.join(['a_3:',arg1,arg2,arg3])
def a_0_1(self, arg='default'):
"""(0,1)"""
return 'a_0_1: ' + arg
def a_1_3(self, arg1, arg2='default', arg3='default'):
"""(1,3)"""
return ' '.join(['a_1_3:',arg1,arg2,arg3])
def a_0_n(self, *args):
"""(0,sys.maxint)"""
return ' '.join(['a_0_n:', ' '.join(args)])
if sys.version_info[0] == 3:
a_0_n.__doc__ = """(0,sys.maxsize)"""
def a_1_n(self, arg, *args):
"""(1,sys.maxint)"""
return ' '.join(['a_1_n:', arg, ' '.join(args)])
if sys.version_info[0] == 3:
a_1_n.__doc__ = """(1,sys.maxsize)"""
def a_1_2_n(self, arg1, arg2='default', *args):
"""(1,sys.maxint)"""
return ' '.join(['a_1_2_n:', arg1, arg2, ' '.join(args)])
if sys.version_info[0] == 3:
a_1_2_n.__doc__ = """(1,sys.maxsize)"""
| 25.5 | 69 | 0.482026 |
54515116899fa6adb55ce236d3ca284b639c067c | 8,604 | py | Python | train_abstractor.py | binhna/fast_abs_rl | f371696590ac7ab7116c729dbcde5f8992b69420 | [
"MIT"
] | null | null | null | train_abstractor.py | binhna/fast_abs_rl | f371696590ac7ab7116c729dbcde5f8992b69420 | [
"MIT"
] | null | null | null | train_abstractor.py | binhna/fast_abs_rl | f371696590ac7ab7116c729dbcde5f8992b69420 | [
"MIT"
] | null | null | null | """ train the abstractor"""
import argparse
import json
import os
from os.path import join, exists
import pickle as pkl
from cytoolz import compose
import torch
from torch import optim
from torch.nn import functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from model.copy_summ import CopySumm
from model.util import sequence_loss
from training import get_basic_grad_fn, basic_validate
from training import BasicPipeline, BasicTrainer
from data.data import CnnDmDataset
from data.batcher import coll_fn, prepro_fn
from data.batcher import convert_batch_copy, batchify_fn_copy
from data.batcher import BucketedGenerater
from utils import PAD, UNK, START, END
from utils import make_vocab, make_embedding
# NOTE: bucket size too large may sacrifice randomness,
# to low may increase # of PAD tokens
BUCKET_SIZE = 6400
# try:
# DATA_DIR = os.environ['DATA']
# except KeyError:
# print('please use environment variable to specify data directories')
class MatchDataset(CnnDmDataset):
""" single article sentence -> single abstract sentence
(dataset created by greedily matching ROUGE)
"""
def __init__(self, args):
super().__init__(args.mode, args.data_dir)
def __getitem__(self, i):
js_data = super().__getitem__(i)
art_sents, abs_sents, extracts = (
js_data['article'], js_data['abstract'], js_data['extracted'])
matched_arts = [art_sents[i] for i in extracts]
return matched_arts, abs_sents[:len(extracts)]
def configure_net(vocab_size, emb_dim,
n_hidden, bidirectional, n_layer):
net_args = {}
net_args['vocab_size'] = vocab_size
net_args['emb_dim'] = emb_dim
net_args['n_hidden'] = n_hidden
net_args['bidirectional'] = bidirectional
net_args['n_layer'] = n_layer
net = CopySumm(**net_args)
return net, net_args
def configure_training(opt, lr, clip_grad, lr_decay, batch_size):
""" supports Adam optimizer only"""
assert opt in ['adam']
opt_kwargs = {}
opt_kwargs['lr'] = lr
train_params = {}
train_params['optimizer'] = (opt, opt_kwargs)
train_params['clip_grad_norm'] = clip_grad
train_params['batch_size'] = batch_size
train_params['lr_decay'] = lr_decay
nll = lambda logit, target: F.nll_loss(logit, target, reduction='none')
def criterion(logits, targets):
return sequence_loss(logits, targets, nll, pad_idx=PAD)
return criterion, train_params
def build_batchers(word2id, args):
cuda = args.cuda
debug = args.debug
prepro = prepro_fn(args.max_art, args.max_abs)
def sort_key(sample):
src, target = sample
return (len(target), len(src))
batchify = compose(
batchify_fn_copy(PAD, START, END, cuda=cuda),
convert_batch_copy(UNK, word2id)
)
setattr(args, 'mode', 'train')
train_loader = DataLoader(
MatchDataset(args), batch_size=BUCKET_SIZE,
shuffle=not debug,
num_workers=4 if cuda and not debug else 0,
collate_fn=coll_fn
)
train_batcher = BucketedGenerater(train_loader, prepro, sort_key, batchify,
single_run=False, fork=not debug)
setattr(args, 'mode', 'val')
val_loader = DataLoader(
MatchDataset(args), batch_size=BUCKET_SIZE,
shuffle=False, num_workers=4 if cuda and not debug else 0,
collate_fn=coll_fn
)
val_batcher = BucketedGenerater(val_loader, prepro, sort_key, batchify,
single_run=True, fork=not debug)
return train_batcher, val_batcher
def main(args):
# create data batcher, vocabulary
# batcher
with open(join(args.data_dir, 'vocab_cnt.pkl'), 'rb') as f:
wc = pkl.load(f)
word2id = make_vocab(wc, args.vsize)
train_batcher, val_batcher = build_batchers(word2id,
args)
# make net
net, net_args = configure_net(len(word2id), args.emb_dim,
args.n_hidden, args.bi, args.n_layer)
if args.w2v:
# NOTE: the pretrained embedding having the same dimension
# as args.emb_dim should already be trained
embedding, _ = make_embedding(
{i: w for w, i in word2id.items()}, args.w2v)
net.set_embedding(embedding)
# configure training setting
criterion, train_params = configure_training(
'adam', args.lr, args.clip, args.decay, args.batch
)
# save experiment setting
if not exists(args.path):
os.makedirs(args.path)
with open(join(args.path, 'vocab.pkl'), 'wb') as f:
pkl.dump(word2id, f, pkl.HIGHEST_PROTOCOL)
meta = {}
meta['net'] = 'base_abstractor'
meta['net_args'] = net_args
meta['traing_params'] = train_params
with open(join(args.path, 'meta.json'), 'w') as f:
json.dump(meta, f, indent=4)
# prepare trainer
val_fn = basic_validate(net, criterion)
grad_fn = get_basic_grad_fn(net, args.clip)
optimizer = optim.Adam(net.parameters(), **train_params['optimizer'][1])
scheduler = ReduceLROnPlateau(optimizer, 'min', verbose=True,
factor=args.decay, min_lr=0,
patience=args.lr_p)
if args.cuda:
net = net.cuda()
pipeline = BasicPipeline(meta['net'], net,
train_batcher, val_batcher, args.batch, val_fn,
criterion, optimizer, grad_fn)
trainer = BasicTrainer(pipeline, args.path,
args.ckpt_freq, args.patience, scheduler)
print('start training with the following hyper-parameters:')
print(meta)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='training of the abstractor (ML)'
)
parser.add_argument('--path', required=True, help='root of the model')
parser.add_argument('--vsize', type=int, action='store', default=30000,
help='vocabulary size')
parser.add_argument('--emb_dim', type=int, action='store', default=300,
help='the dimension of word embedding')
parser.add_argument('--w2v', action='store',
help='use pretrained word2vec embedding')
parser.add_argument('--n_hidden', type=int, action='store', default=256,
help='the number of hidden units of LSTM')
parser.add_argument('--n_layer', type=int, action='store', default=1,
help='the number of layers of LSTM')
parser.add_argument('--no-bi', action='store_true',
help='disable bidirectional LSTM encoder')
parser.add_argument('--data_dir', required=True,
help='path data which contains train, val, test folders and vocab_cnt.pkl')
# length limit
parser.add_argument('--max_art', type=int, action='store', default=100,
help='maximun words in a single article sentence')
parser.add_argument('--max_abs', type=int, action='store', default=30,
help='maximun words in a single abstract sentence')
# training options
parser.add_argument('--lr', type=float, action='store', default=1e-3,
help='learning rate')
parser.add_argument('--decay', type=float, action='store', default=0.5,
help='learning rate decay ratio')
parser.add_argument('--lr_p', type=int, action='store', default=0,
help='patience for learning rate decay')
parser.add_argument('--clip', type=float, action='store', default=2.0,
help='gradient clipping')
parser.add_argument('--batch', type=int, action='store', default=32,
help='the training batch size')
parser.add_argument(
'--ckpt_freq', type=int, action='store', default=3000,
help='number of update steps for checkpoint and validation'
)
parser.add_argument('--patience', type=int, action='store', default=5,
help='patience for early stopping')
parser.add_argument('--debug', action='store_true',
help='run in debugging mode')
parser.add_argument('--no-cuda', action='store_true',
help='disable GPU training')
args = parser.parse_args()
args.bi = not args.no_bi
args.cuda = torch.cuda.is_available() and not args.no_cuda
main(args)
| 37.903084 | 99 | 0.63331 |
90a73edfccd41d24f41722ce07c98b27ed940ea4 | 614 | py | Python | pysimpleframe/interface/input/linux/test.py | OriDevTeam/PySimpleFrame | 105654736a0ecc2ddb00921f1bc139faeaba2c84 | [
"BSD-3-Clause"
] | null | null | null | pysimpleframe/interface/input/linux/test.py | OriDevTeam/PySimpleFrame | 105654736a0ecc2ddb00921f1bc139faeaba2c84 | [
"BSD-3-Clause"
] | null | null | null | pysimpleframe/interface/input/linux/test.py | OriDevTeam/PySimpleFrame | 105654736a0ecc2ddb00921f1bc139faeaba2c84 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Name: TOFILL\n
Description: TOFILL
"""
"""PySimpleFrame
Author: Miguel Silva
License: Check LICENSE file
"""
import sys
import tty, termios, fcntl
import os
def smt():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
old_flags = fcntl.fcntl(fd, fcntl.F_GETFL)
try:
tty.setraw(fd)
fcntl.fcntl(fd, fcntl.F_SETFL, old_flags | os.O_NONBLOCK)
return sys.stdin.buffer.raw.read(1)
finally:
fcntl.fcntl(fd, fcntl.F_SETFL, old_flags)
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
while True:
k = smt()
if k: print(k) | 18.606061 | 59 | 0.690554 |
1d0f18d54bb0fb6a07989d6cf00ee97741bd9f4f | 128 | py | Python | wrappers/python/manual/virgil_crypto/phe/_c_bridge/__init__.py | odidev/virgil-crypto-c | 3d5d5cb19fdcf81eab08cdc63647f040117ecbd8 | [
"BSD-3-Clause"
] | 26 | 2018-12-17T13:45:25.000Z | 2022-01-16T20:00:04.000Z | wrappers/python/manual/virgil_crypto/phe/_c_bridge/__init__.py | odidev/virgil-crypto-c | 3d5d5cb19fdcf81eab08cdc63647f040117ecbd8 | [
"BSD-3-Clause"
] | 4 | 2019-01-03T12:08:52.000Z | 2021-12-02T05:21:13.000Z | wrappers/python/manual/virgil_crypto/phe/_c_bridge/__init__.py | odidev/virgil-crypto-c | 3d5d5cb19fdcf81eab08cdc63647f040117ecbd8 | [
"BSD-3-Clause"
] | 8 | 2019-01-24T08:22:06.000Z | 2022-02-07T11:37:00.000Z | from ._vsce_phe_server import *
from ._vsce_phe_client import *
from ._vsce_phe_common import *
from ._vsce_phe_cipher import *
| 25.6 | 31 | 0.8125 |
aaead560932eeb19eb50b8fda73e2ec57be748f6 | 1,841 | py | Python | BST_CRUD/BST_Delete.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | BST_CRUD/BST_Delete.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | BST_CRUD/BST_Delete.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | """基本操作之删除(Delete)
思路(最为复杂)
考虑待删除的节点为叶子节点,可以直接删除并修改父亲节点(Parent Node)的指针,需要区分待删节点是否为根节点
考虑待删除的节点为单支节点(只有一棵子树——左子树 or 右子树),与删除链表节点操作类似,同样的需要区分待删节点是否为根节点
考虑待删节点有两棵子树,可以将待删节点与左子树中的最大节点进行交换,由于左子树中的最大节点一定为叶子节点,所以这时再删除待删的节点可以参考第一条
详细的解释可以看 http://www.algolist.net/Data_structures/Binary_search_tree/Removal"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
def removeNode(root, value):
dummy = TreeNode(0)
dummy.left = root
parent = findNode(dummy, root, value)
node = None
if parent.left and parent.left.val == value:
node = parent.left
elif parent.right and parent.right.val == value:
node = parent.right
else:
return dummy.left
deleteNode(parent, node)
return dummy.left
def findNode(parent, node, value):
if not node:
return parent
if node.val == value:
return parent
if value < node.val:
return findNode(node,node.left, value)
else:
return findNode(node, node.right, value)
def deleteNode(parent, node):
if not node.right:
if parent.left == node:
parent.left = node.left
else:
parent.right = node.left
else:
temp = node.right
father = node
while temp.left:
father = temp
temp = temp.left
if father.left == temp:
father.left = temp.right
else:
father.right = temp.right
if parent.left == node:
parent.left = temp
else:
parent.right = temp
temp.left = node.left
temp.right = node.right
| 31.20339 | 78 | 0.560022 |
83b7a24dc72af1cbe81b7e3405b84b47288702fe | 8,713 | py | Python | supervisely/train/src/train_config.py | supervisely-ecosystem/mmsegmentation | 494333c74cbe9e919bc0b6dbe57d0bbbb8cee79b | [
"Apache-2.0"
] | null | null | null | supervisely/train/src/train_config.py | supervisely-ecosystem/mmsegmentation | 494333c74cbe9e919bc0b6dbe57d0bbbb8cee79b | [
"Apache-2.0"
] | 13 | 2022-02-15T20:05:18.000Z | 2022-02-15T20:05:21.000Z | supervisely/train/src/train_config.py | supervisely-ecosystem/mmsegmentation | 494333c74cbe9e919bc0b6dbe57d0bbbb8cee79b | [
"Apache-2.0"
] | null | null | null | import os
import re
import supervisely_lib as sly
from mmcv.utils.config import Config
import sly_globals as g
import architectures
import augs
model_config_name = "model_config.py"
dataset_config_name = "dataset_config.py"
schedule_config_name = "schedule_config.py"
runtime_config_name = "runtime_config.py"
main_config_name = "train_config.py"
configs_dir = os.path.join(g.artifacts_dir, "configs")
model_config_path = os.path.join(configs_dir, model_config_name)
dataset_config_path = os.path.join(configs_dir, dataset_config_name)
schedule_config_path = os.path.join(configs_dir, schedule_config_name)
runtime_config_path = os.path.join(configs_dir, runtime_config_name)
main_config_path = os.path.join(configs_dir, main_config_name)
main_config_template = f"""
_base_ = [
'./{model_config_name}', './{dataset_config_name}',
'./{schedule_config_name}', './{runtime_config_name}'
]
"""
sly.fs.mkdir(configs_dir)
def _replace_function(var_name, var_value, template, match):
m0 = match.group(0)
m1 = match.group(1)
return template.format(var_name, var_value)
def generate_model_config(state):
# model_name = state["selectedModel"]
# model_info = architectures.get_model_info_by_name(model_name)
#
# model_config = 'config' + model_info["config"].split('config')[-1]
# path_to_model_config = os.path.join(g.root_source_dir, model_config) # model_info["config"]
# all_cfgs = Config.fromfile(path_to_model_config)
model_cfg = architectures.all_cfgs.model if 'model' in architectures.all_cfgs.keys() else architectures.all_cfgs
if state['selectedArch'] in ['segformer', 'setr', 'swin', 'vit']:
model_cfg['pretrained'] = None
model_config = Config(dict(model=dict(model_cfg)))
py_config = model_config.pretty_text
py_config = py_config.replace('SyncBN', 'BN')
num_classes = g.project_meta.obj_classes.__len__() + 1
py_config = re.sub(r"num_classes*=(\d+),",
lambda m: _replace_function("num_classes", num_classes, "{}={},", m), # num_tags
py_config, 0, re.MULTILINE)
with open(model_config_path, 'w') as f:
f.write(py_config)
return model_config_path, py_config
def generate_dataset_config(state):
config_path = os.path.join(g.root_source_dir, "supervisely/train/configs/dataset.py")
if augs.augs_config_path is None:
config_path = os.path.join(g.root_source_dir, "supervisely/train/configs/dataset_no_augs.py")
with open(config_path) as f:
py_config = f.read()
if augs.augs_config_path is not None:
py_config = re.sub(r"augs_config_path\s*=\s*(None)",
lambda m: _replace_function("augs_config_path", augs.augs_config_path, "{} = '{}'", m),
py_config, 0, re.MULTILINE)
py_config = re.sub(r"batch_size_per_gpu\s*=\s*(\d+)",
lambda m: _replace_function("batch_size_per_gpu", state["batchSizePerGPU"], "{} = {}", m),
py_config, 0, re.MULTILINE)
py_config = re.sub(r"num_workers_per_gpu\s*=\s*(\d+)",
lambda m: _replace_function("num_workers_per_gpu", state["workersPerGPU"], "{} = {}", m),
py_config, 0, re.MULTILINE)
py_config = re.sub(r"validation_interval\s*=\s*(\d+)",
lambda m: _replace_function("validation_interval", state["valInterval"], "{} = {}", m),
py_config, 0, re.MULTILINE)
# https://mmcv.readthedocs.io/en/latest/_modules/mmcv/runner/hooks/evaluation.html#EvalHook
save_best = None if state["saveBest"] is False else "'auto'"
py_config = re.sub(r"save_best\s*=\s*([a-zA-Z]+)\s",
lambda m: _replace_function("save_best", save_best, "{} = {}\n", m),
py_config, 0, re.MULTILINE)
py_config = re.sub(r"project_dir\s*=\s*(None)",
lambda m: _replace_function("project_dir", g.dataset_dir, "{} = '{}'", m),
py_config, 0, re.MULTILINE)
py_config = re.sub(r"crop_size\s*=\s*(None)",
lambda m: _replace_function("crop_size", architectures.img_crop, "{} = {}", m),
py_config, 0, re.MULTILINE)
py_config = re.sub(r"img_scale\s*=\s*(None)",
lambda m: _replace_function("img_scale", architectures.img_scale, "{} = {}", m),
py_config, 0, re.MULTILINE)
with open(dataset_config_path, 'w') as f:
f.write(py_config)
return dataset_config_path, py_config
def generate_schedule_config(state):
momentum = f"momentum={state['momentum']}, " if state['optimizer'] == 'SGD' else ''
optimizer = f"optimizer = dict(type='{state['optimizer']}', " \
f"lr={state['lr']}, " \
f"{momentum}" \
f"weight_decay={state['weightDecay']}" \
f"{', nesterov=True' if (state['nesterov'] is True and state.optimizer == 'SGD') else ''})"
grad_clip = f"optimizer_config = dict(grad_clip=None)"
if state["gradClipEnabled"] is True:
grad_clip = f"optimizer_config = dict(grad_clip=dict(max_norm={state['maxNorm']}))"
lr_updater = ""
if state["lrPolicyEnabled"] is True:
py_text = state["lrPolicyPyConfig"]
py_lines = py_text.splitlines()
num_uncommented = 0
for line in py_lines:
res_line = line.strip()
if res_line != "" and res_line[0] != "#":
lr_updater += res_line
num_uncommented += 1
if num_uncommented == 0:
raise ValueError(
"LR policy is enabled but not defined, please uncomment and modify one of the provided examples")
if num_uncommented > 1:
raise ValueError("several LR policies were uncommented, please keep only one")
runner = f"runner = dict(type='EpochBasedRunner', max_epochs={state['epochs']})"
if lr_updater == "":
lr_updater = "lr_config = dict(policy='fixed')"
py_config = optimizer + os.linesep + \
grad_clip + os.linesep + \
lr_updater + os.linesep + \
runner + os.linesep
with open(schedule_config_path, 'w') as f:
f.write(py_config)
return schedule_config_path, py_config
def generate_runtime_config(state):
config_path = os.path.join(g.root_source_dir, "supervisely/train/configs/runtime.py")
with open(config_path) as f:
py_config = f.read()
# https://mmcv.readthedocs.io/en/latest/_modules/mmcv/runner/hooks/checkpoint.html
add_ckpt_to_config = []
def _get_ckpt_arg(arg_name, state_flag, state_field, suffix=","):
flag = True if state_flag is None else state[state_flag]
if flag is True:
add_ckpt_to_config.append(True)
return f" {arg_name}={state[state_field]}{suffix}"
return ""
checkpoint = "checkpoint_config = dict({interval}{max_keep_ckpts}{save_last})".format(
interval=_get_ckpt_arg("interval", None, "checkpointInterval"),
max_keep_ckpts=_get_ckpt_arg("max_keep_ckpts", "maxKeepCkptsEnabled", "maxKeepCkpts"),
save_last=_get_ckpt_arg("save_last", "saveLast", "saveLast", suffix=""),
)
py_config = re.sub(r"(checkpoint_config = dict\(interval=1\))",
lambda m: checkpoint,
py_config, 0, re.MULTILINE)
# logger hook
# https://mmcv.readthedocs.io/en/latest/_modules/mmcv/runner/hooks/logger/text.html
py_config = re.sub(r"log_interval\s*=\s*(\d+)",
lambda m: _replace_function("log_interval", state["metricsPeriod"], "{} = {}", m),
py_config, 0, re.MULTILINE)
py_config = re.sub(r"load_from\s*=\s*(None)",
lambda m: _replace_function("load_from", architectures.local_weights_path, "{} = '{}'", m),
py_config, 0, re.MULTILINE)
with open(runtime_config_path, 'w') as f:
f.write(py_config)
return runtime_config_path, py_config
def generate_main_config(state):
with open(main_config_path, 'w') as f:
f.write(main_config_template)
return main_config_path, str(main_config_template)
def save_from_state(state):
with open(model_config_path, 'w') as f:
f.write(state["modelPyConfig"])
with open(dataset_config_path, 'w') as f:
f.write(state["datasetPyConfig"])
with open(schedule_config_path, 'w') as f:
f.write(state["schedulePyConfig"])
with open(runtime_config_path, 'w') as f:
f.write(state["runtimePyConfig"])
with open(main_config_path, 'w') as f:
f.write(state["mainPyConfig"])
| 41.293839 | 116 | 0.63411 |
c120f8563cd73a0d8e91d9974a8a695a18ee508a | 7,384 | py | Python | DonkiDirector/DonkiOrchestraLib.py | ess-dmsc/do-ess-data-simulator | 37ef0d87ad0152b092e3a636ef8d080db0711aaa | [
"BSD-2-Clause"
] | null | null | null | DonkiDirector/DonkiOrchestraLib.py | ess-dmsc/do-ess-data-simulator | 37ef0d87ad0152b092e3a636ef8d080db0711aaa | [
"BSD-2-Clause"
] | null | null | null | DonkiDirector/DonkiOrchestraLib.py | ess-dmsc/do-ess-data-simulator | 37ef0d87ad0152b092e3a636ef8d080db0711aaa | [
"BSD-2-Clause"
] | null | null | null | import zmq
import traceback
import socket
import time
class CommunicationClass:
def __init__(self, name='director'):
self.context = zmq.Context()
self.poller = zmq.Poller()
self.pub_sock = None
self.sub_socks = {}
self.pub_tag = name
#
self.create_pub_socket()
#-----------------------------------------------------------------------------------
# create_pub_socket:
#
#-----------------------------------------------------------------------------------
def create_pub_socket(self):
try:
self.pub_sock = self.context.socket(zmq.PUB)
self.pub_port = self.pub_sock.bind_to_random_port("tcp://0.0.0.0")
print "PUB " + "tcp://" + str(self.pub_port)
except:
traceback.print_exc()
self.pub_sock = None
#-----------------------------------------------------------------------------------
# create_sub_socket:
#
#-----------------------------------------------------------------------------------
def create_sub_socket(self, name, url):
try:
if name in self.sub_socks:
self.poller.unregister(self.sub_socks[name])
self.sub_socks[name].close()
self.sub_socks[name] = self.context.socket(zmq.SUB)
self.sub_socks[name].setsockopt(zmq.SUBSCRIBE, '')
self.sub_socks[name].connect("tcp://"+str(url))
self.poller.register(self.sub_socks[name], zmq.POLLIN)
#print "SUB TO " + "tcp://" + str(url),self.sub_socks[name]
except:
traceback.print_exc()
print "tcp://"+str(url)
del self.sub_socks[name]
return False
return True
#-----------------------------------------------------------------------------------
# my_pub_socket_info :
#
#-----------------------------------------------------------------------------------
def my_pub_socket_info(self):
return socket.gethostname()+":"+str(self.pub_port)
#-----------------------------------------------------------------------------------
# publish_ack :
#
#-----------------------------------------------------------------------------------
def publish_ack(self, ack_tag, trg_start, trg_stop):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj([ack_tag, trg_start,trg_stop])
#-----------------------------------------------------------------------------------
# publish_data :
#
#-----------------------------------------------------------------------------------
def publish_data(self, tag, trg_start, trg_stop, data_value):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj(['data',tag.lower(), trg_start,trg_stop,data_value])
#-----------------------------------------------------------------------------------
# publish_info :
#
#-----------------------------------------------------------------------------------
def publish_info( self, priority = -1, data_names=[]):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj(['info',{'prio':priority,'data':data_names}])
#-----------------------------------------------------------------------------------
# ask_for_info :
#
#-----------------------------------------------------------------------------------
def ask_for_info(self, srv_name, timeout_sec=1):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj(["info", srv_name])
msg = []
sub_socket = self.sub_socks[srv_name]
max_retries = 5
retry = 0
while retry < max_retries and msg == []:
socks = dict(self.poller.poll((1000./max_retries)*timeout_sec))
#if len(socks) == 0:
# return msg
if sub_socket in socks and socks[sub_socket] == zmq.POLLIN:
try:
reply = sub_socket.recv_pyobj()
if reply[0] == 'info':
msg = reply[1]
except:
traceback.print_exc()
msg = []
retry += 1
return msg
#-----------------------------------------------------------------------------------
# ask_for_log :
#
#-----------------------------------------------------------------------------------
def ask_for_log(self, srv_name, timeout_sec=1):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj(["playerlog", srv_name])
msg = []
sub_socket = self.sub_socks[srv_name]
max_retries = 5
retry = 0
while retry < max_retries and msg == []:
socks = dict(self.poller.poll((1000./max_retries)*timeout_sec))
#if len(socks) == 0:
# return msg
if sub_socket in socks and socks[sub_socket] == zmq.POLLIN:
try:
reply = sub_socket.recv_pyobj()
if reply[0] == 'data' and reply[1] == 'playerlog':
msg = reply[4]
except:
traceback.print_exc()
msg = []
retry += 1
return msg
#-----------------------------------------------------------------------------------
# wait_message :
#
#-----------------------------------------------------------------------------------
def wait_message(self, srv_names, timeout_sec=1):
try:
msg = {}
socks = dict(self.poller.poll(1000*timeout_sec))
if len(socks) == 0:
return msg
for sn in srv_names:
s = self.sub_socks[sn]
if s in socks and socks[s] == zmq.POLLIN:
recv_msg = s.recv_pyobj()
msg[sn] = recv_msg
except:
traceback.print_exc()
msg = None
return msg
#-----------------------------------------------------------------------------------
# publish_command :
#
#-----------------------------------------------------------------------------------
def publish_command(self, command, srv_name, argin=None, timeout_sec=1):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj([command, srv_name, argin])
print "Sent command:", command, srv_name, argin
msg = []
sub_socket = self.sub_socks[srv_name]
max_retries = 5
retry = 0
while retry < max_retries and msg == []:
socks = dict(self.poller.poll((1000./max_retries)*timeout_sec))
if sub_socket in socks and socks[sub_socket] == zmq.POLLIN:
try:
reply = sub_socket.recv_pyobj()
if reply[0] == command and reply[1] == reply[2] == -1:
return True
except:
traceback.print_exc()
return False
retry += 1
return False
#-----------------------------------------------------------------------------------
# publish_trigger :
#
#-----------------------------------------------------------------------------------
def publish_trigger(self, trigger_value, priority):
# At the moment just use send_pyobj
self.pub_sock.send_pyobj(["trigger", trigger_value, priority])
| 38.061856 | 85 | 0.401002 |
ecba3f40d7e24426706982e2d2fe940b464ca8f6 | 1,906 | py | Python | tensorflow_federated/python/aggregators/factory.py | iahsanujunda/federated | 109a5653a305dc9d4bcbafc259257add4dc70365 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/aggregators/factory.py | iahsanujunda/federated | 109a5653a305dc9d4bcbafc259257add4dc70365 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/aggregators/factory.py | iahsanujunda/federated | 109a5653a305dc9d4bcbafc259257add4dc70365 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base factory class for creation of `AggregationProcess`."""
import abc
from typing import Union
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.templates import aggregation_process
_ValueType = Union[computation_types.TensorType, computation_types.StructType]
class AggregationProcessFactory(abc.ABC):
"""Factory for `tff.templates.AggregationProcess`."""
@abc.abstractmethod
def create(self,
value_type: _ValueType) -> aggregation_process.AggregationProcess:
"""Creates a `tff.aggregators.AggregationProcess` aggregating `value_type`.
The provided `value_type` is a non-federated `tff.Type` object, that is,
`value_type.is_federated()` should return `False`. Provided `value_type`
must be a `tff.TensorType` or a `tff.StructType`.
The returned `tff.aggregators.AggregationProcess` will be created for
aggregation of values matching `value_type`. That is, its `next` method will
expect type `<S@SERVER, value_type@CLIENTS, *>`, where `S` is the unplaced
return type of its `initialize` method, and * stands for optional additional
input arguments.
Args:
value_type: A `tff.Type` without placement.
Returns:
A `tff.templates.AggregationProcess`.
"""
| 38.897959 | 80 | 0.752886 |
a578be22e1860695f55b8437bdc117528083ab27 | 7,508 | py | Python | tfx/dsl/components/base/base_driver_test.py | Saiprasad16/tfx | c1e0704b2a83232469f55598efcdb7808b6c909f | [
"Apache-2.0"
] | 1 | 2021-05-10T10:41:06.000Z | 2021-05-10T10:41:06.000Z | tfx/dsl/components/base/base_driver_test.py | Saiprasad16/tfx | c1e0704b2a83232469f55598efcdb7808b6c909f | [
"Apache-2.0"
] | null | null | null | tfx/dsl/components/base/base_driver_test.py | Saiprasad16/tfx | c1e0704b2a83232469f55598efcdb7808b6c909f | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.dsl.components.base.base_driver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from unittest import mock
import tensorflow as tf
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.io import fileio
from tfx.orchestration import data_types
from tfx.types import channel_utils
from tfx.types import standard_artifacts
from ml_metadata.proto import metadata_store_pb2
# Mock value for string artifact.
_STRING_VALUE = u'This is a string'
# Mock byte value for string artifact.
_BYTE_VALUE = b'This is a string'
def fake_read(self):
"""Mock read method for ValueArtifact."""
if not self._has_value:
self._has_value = True
self._value = self.decode(_BYTE_VALUE)
return self._value
class _InputArtifact(types.Artifact):
TYPE_NAME = 'InputArtifact'
class _OutputArtifact(types.Artifact):
TYPE_NAME = 'OutputArtifact'
class BaseDriverTest(tf.test.TestCase):
def setUp(self):
super(BaseDriverTest, self).setUp()
self._mock_metadata = tf.compat.v1.test.mock.Mock()
self._input_dict = {
'input_data':
types.Channel(
type=_InputArtifact, producer_component_id='c', output_key='k'),
'input_string':
types.Channel(
type=standard_artifacts.String,
producer_component_id='c2',
output_key='k2').set_artifacts(
[standard_artifacts.String(),
standard_artifacts.String()]),
}
input_dir = os.path.join(
os.environ.get('TEST_TMP_DIR', self.get_temp_dir()),
self._testMethodName, 'input_dir')
# valid input artifacts must have a uri pointing to an existing directory.
for key, input_channel in self._input_dict.items():
for index, artifact in enumerate(input_channel.get()):
artifact.id = index + 1
uri = os.path.join(input_dir, key, str(artifact.id))
artifact.uri = uri
fileio.makedirs(uri)
self._output_dict = {
'output_data': types.Channel(type=_OutputArtifact),
'output_multi_data': types.Channel(type=_OutputArtifact)
}
self._output_dict[
'output_multi_data'].matching_channel_name = 'input_string'
self._input_artifacts = channel_utils.unwrap_channel_dict(self._input_dict)
self._output_artifacts = channel_utils.unwrap_channel_dict(
self._output_dict)
self._exec_properties = {
'key': 'value',
}
self._execution_id = 100
self._execution = metadata_store_pb2.Execution()
self._execution.id = self._execution_id
self._context_id = 123
self._driver_args = data_types.DriverArgs(enable_cache=True)
self._pipeline_info = data_types.PipelineInfo(
pipeline_name='my_pipeline_name',
pipeline_root=os.environ.get('TEST_TMP_DIR', self.get_temp_dir()),
run_id='my_run_id')
self._component_info = data_types.ComponentInfo(
component_type='a.b.c',
component_id='my_component_id',
pipeline_info=self._pipeline_info)
@mock.patch(
'tfx.dsl.components.base.base_driver.BaseDriver.verify_input_artifacts')
@mock.patch.object(types.ValueArtifact, 'read', fake_read)
def testPreExecutionNewExecution(self, mock_verify_input_artifacts_fn):
self._mock_metadata.search_artifacts.return_value = list(
self._input_dict['input_string'].get())
self._mock_metadata.register_execution.side_effect = [self._execution]
self._mock_metadata.get_cached_outputs.side_effect = [None]
self._mock_metadata.register_run_context_if_not_exists.side_effect = [
metadata_store_pb2.Context()
]
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.pre_execution(
input_dict=self._input_dict,
output_dict=self._output_dict,
exec_properties=self._exec_properties,
driver_args=self._driver_args,
pipeline_info=self._pipeline_info,
component_info=self._component_info)
self.assertFalse(execution_decision.use_cached_results)
self.assertEqual(execution_decision.execution_id, self._execution_id)
self.assertCountEqual(execution_decision.exec_properties,
self._exec_properties)
self.assertEqual(
execution_decision.output_dict['output_data'][0].uri,
os.path.join(self._pipeline_info.pipeline_root,
self._component_info.component_id, 'output_data',
str(self._execution_id)))
self.assertLen(execution_decision.output_dict['output_multi_data'], 2)
for i in range(2):
self.assertEqual(
execution_decision.output_dict['output_multi_data'][i].uri,
os.path.join(self._pipeline_info.pipeline_root,
self._component_info.component_id, 'output_multi_data',
str(self._execution_id), str(i)))
self.assertEqual(execution_decision.input_dict['input_string'][0].value,
_STRING_VALUE)
@mock.patch(
'tfx.dsl.components.base.base_driver.BaseDriver.verify_input_artifacts')
@mock.patch.object(types.ValueArtifact, 'read', fake_read)
def testPreExecutionCached(self, mock_verify_input_artifacts_fn):
self._mock_metadata.search_artifacts.return_value = list(
self._input_dict['input_string'].get())
self._mock_metadata.register_run_context_if_not_exists.side_effect = [
metadata_store_pb2.Context()
]
self._mock_metadata.register_execution.side_effect = [self._execution]
self._mock_metadata.get_cached_outputs.side_effect = [
self._output_artifacts
]
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
execution_decision = driver.pre_execution(
input_dict=self._input_dict,
output_dict=self._output_dict,
exec_properties=self._exec_properties,
driver_args=self._driver_args,
pipeline_info=self._pipeline_info,
component_info=self._component_info)
self.assertTrue(execution_decision.use_cached_results)
self.assertEqual(execution_decision.execution_id, self._execution_id)
self.assertCountEqual(execution_decision.exec_properties,
self._exec_properties)
self.assertCountEqual(execution_decision.output_dict,
self._output_artifacts)
def testVerifyInputArtifactsOk(self):
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
driver.verify_input_artifacts(self._input_artifacts)
def testVerifyInputArtifactsNotExists(self):
driver = base_driver.BaseDriver(metadata_handler=self._mock_metadata)
with self.assertRaises(RuntimeError):
driver.verify_input_artifacts({'artifact': [_InputArtifact()]})
if __name__ == '__main__':
tf.test.main()
| 39.93617 | 80 | 0.722829 |
f6bffc41805c6e6524a5569d6db64273a9eb42f6 | 4,397 | py | Python | bot3.py | ankitp544/ZulipBot | 83806be3182a83f8dc2ccbe5a9b9caea6437afa3 | [
"Apache-2.0"
] | null | null | null | bot3.py | ankitp544/ZulipBot | 83806be3182a83f8dc2ccbe5a9b9caea6437afa3 | [
"Apache-2.0"
] | null | null | null | bot3.py | ankitp544/ZulipBot | 83806be3182a83f8dc2ccbe5a9b9caea6437afa3 | [
"Apache-2.0"
] | null | null | null | import pprint
import zulip
import sys
import re
import json
from random import randrange
import requests
from textblob import TextBlob
from random import shuffle, choice
import sem
BOT_MAIL = "bruh-bot@chiru.zulipchat.com"
class ZulipBot(object):
def __init__(self):
self.client = zulip.Client(site="https://chiru.zulipchat.com/api/")
self.subscribe()
print("Initialised!")
def subscribe(self):
self.client.add_subscriptions([{"name": "Bruh"}])
def get_query_category(self, content):
score = -1
chosen_category = -1
keywords_for_categories = {
0: ["hello", "hi", "hey"], #Greeting
1: ["joke", "jokes"], #Joke
2: ["quote", "quotes", "motivational", "inspirational"],
3: ["news"]
}
regex = re.compile('[^a-zA-Z]')
for category in keywords_for_categories:
this_score = 0
for word in content:
if (regex.sub('', word)).lower() in keywords_for_categories[category]:
this_score+=1
this_score /= len(content)
if this_score > score:
score = this_score
chosen_category = category
return chosen_category
def clean_message(self, message):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", message).split())
def is_message_extremely_negative(self, message):
"""message = self.clean_message(message)
print(message)
analysis = TextBlob(message)
print(analysis.sentiment.polarity)
if analysis.sentiment.polarity < -0.4:
return True
return False
"""
x, y = sem.call(message)
return x==-1 and y<-1
def is_message_positive(self, message):
"""
message = self.clean_message(message)
print(message)
analysis = TextBlob(message)
print(analysis.sentiment.polarity)
if analysis.sentiment.polarity > 0.2:
return True
return False
"""
x,y = sem.call(message)
return x==1
def process(self, msg):
content = msg["content"].split()
sender_email = msg["sender_email"]
if msg["display_recipient"]!="Bruh" or sender_email == BOT_MAIL:
return
print("Sucessfully heard.")
query_category = self.get_query_category(content)
#query_category = 2
print(query_category)
if (self.is_message_extremely_negative(msg['content'])):
quotes_file = open("quotes.txt")
quotes = quotes_file.read().splitlines()
#print(quotes)
self.client.send_message({
"type": "stream",
"subject": msg["subject"],
"to": msg["display_recipient"],
"content": "OMG! I think you should read this - \"" + quotes[randrange(len(quotes))] + "\""
})
elif query_category == 0: #Greeting
replies = ["Heyya!", "Hello, I'm Bruh!", "Namaste!", "Hi!"]
idx = randrange(len(replies))
self.client.send_message({
"type": "stream",
"subject": msg["subject"],
"to": msg["display_recipient"],
"content": replies[idx]
})
elif query_category == 1: #Joke
reply = requests.get("https://official-joke-api.appspot.com/jokes/random")
joke = reply.json()
#print(joke)
self.client.send_message({
"type": "stream",
"subject": msg["subject"],
"to": msg["display_recipient"],
"content": joke['setup'] + '\n' + joke['punchline']
})
elif query_category == 2: #Quote of the day
URL = "http://quotes.rest/qod.json"
parameters = {'category': 'inspire'}
reply = requests.get(url = URL, params = parameters)
reply = reply.json()
setup_line = ["", "Here's a quote for you!\n", "A wise man once said:\n"]
self.client.send_message({
"type": "stream",
"subject": msg["subject"],
"to": msg["display_recipient"],
"content": setup_line[randrange(len(setup_line))] + reply['contents']['quotes'][0]['quote']
})
elif query_category == 3: #positive news
URL = "https://newsapi.org/v2/top-headlines"
parameters = {'apiKey' : '619dc2b423c142a29f4000799231a282', "from" : "2019-03-01", "to": "2019-03-16",
'language': 'en'}
reply = requests.get(url = URL, params = parameters)
reply = reply.json()
shuffle(reply['articles'])
setup_line = "Here's a good news for you -"
for article in reply['articles']:
if (self.is_message_positive(article['title'])):
self.client.send_message({
"type": "stream",
"subject": msg["subject"],
"to": msg["display_recipient"],
"content": setup_line[randrange(len(setup_line))] + article['title']
})
break
def main():
bot = ZulipBot()
bot.client.call_on_each_message(bot.process)
if __name__ == "__main__":
main() | 28.927632 | 106 | 0.656357 |
a461ecf6ced0b525bb77409952c9ae1f151aca6e | 4,851 | py | Python | studies/upgrade_neutrino_reconstruction/modelling/train_model.py | BozianuLeon/graphnet | 54c41a9486ba35fa3700c5ade3fddd69620bc721 | [
"Apache-2.0"
] | null | null | null | studies/upgrade_neutrino_reconstruction/modelling/train_model.py | BozianuLeon/graphnet | 54c41a9486ba35fa3700c5ade3fddd69620bc721 | [
"Apache-2.0"
] | 1 | 2022-03-15T11:01:47.000Z | 2022-03-15T11:01:47.000Z | studies/upgrade_neutrino_reconstruction/modelling/train_model.py | BozianuLeon/graphnet | 54c41a9486ba35fa3700c5ade3fddd69620bc721 | [
"Apache-2.0"
] | null | null | null | import logging
import os
from timer import timer
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.loggers import WandbLogger
import torch
from torch.optim.adam import Adam
from graphnet.components.loss_functions import (
LogCoshLoss,
VonMisesFisher2DLoss,
)
from graphnet.components.utils import fit_scaler
from graphnet.data.constants import FEATURES, TRUTH
from graphnet.data.utils import get_equal_proportion_neutrino_indices
from graphnet.models import Model
from graphnet.models.detector.icecube import IceCubeUpgrade
from graphnet.models.gnn import DynEdge_V2
from graphnet.models.graph_builders import KNNGraphBuilder
from graphnet.models.task.reconstruction import (
EnergyReconstruction,
ZenithReconstructionWithKappa,
)
from graphnet.models.training.callbacks import ProgressBar, PiecewiseLinearLR
from graphnet.models.training.utils import (
get_predictions,
make_train_validation_dataloader,
save_results,
)
# Configurations
timer.set_level(logging.INFO)
logging.basicConfig(level=logging.INFO)
torch.multiprocessing.set_sharing_strategy("file_system")
# Constants
features = FEATURES.UPGRADE
truth = TRUTH.UPGRADE
# Initialise Weights & Biases (W&B) run
wandb_logger = WandbLogger(
project="upgrade-zenith",
entity="graphnet-team",
save_dir="./wandb/",
log_model=True,
)
# Configuration
config = {
"db": "/groups/icecube/asogaard/data/sqlite/dev_upgrade_step4_preselection_decemberv2/data/dev_upgrade_step4_preselection_decemberv2.db",
"pulsemaps": [
"IceCubePulsesTWSRT",
"I3RecoPulseSeriesMapRFCleaned_mDOM",
"I3RecoPulseSeriesMapRFCleaned_DEgg",
],
"batch_size": 256,
"num_workers": 30,
"gpus": [1],
"target": "zenith",
"n_epochs": 50,
"patience": 5,
"gnn/type": "DynEdge_V2",
}
# Main function definition
def main():
try:
del truth[truth.index("interaction_time")]
except ValueError:
# not found in list
pass
print(f"features: {features}")
print(f"truth: {truth}")
# Run management
archive = "/groups/icecube/asogaard/gnn/results/upgrade_test_1/"
run_name = "test_upgrade_{}_regression_v2".format(config["target"])
# Log configuration to W&B
wandb_logger.experiment.config.update(config)
# Common variables
train_selection, _ = get_equal_proportion_neutrino_indices(config["db"])
(
training_dataloader,
validation_dataloader,
) = make_train_validation_dataloader(
config["db"],
train_selection,
config["pulsemaps"],
features,
truth,
batch_size=config["batch_size"],
num_workers=config["num_workers"],
)
# Building model
detector = IceCubeUpgrade(
graph_builder=KNNGraphBuilder(nb_nearest_neighbours=8),
)
gnn = DynEdge_V2(
nb_inputs=detector.nb_outputs,
)
task = ZenithReconstructionWithKappa(
hidden_size=gnn.nb_outputs,
target_labels=config["target"],
loss_function=VonMisesFisher2DLoss(),
)
model = Model(
detector=detector,
gnn=gnn,
tasks=[task],
optimizer_class=Adam,
optimizer_kwargs={"lr": 1e-03, "eps": 1e-03},
scheduler_class=PiecewiseLinearLR,
scheduler_kwargs={
"milestones": [
0,
len(training_dataloader) / 2,
len(training_dataloader) * config["n_epochs"],
],
"factors": [1e-2, 1, 1e-02],
},
scheduler_config={
"interval": "step",
},
)
# Training model
callbacks = [
EarlyStopping(
monitor="val_loss",
patience=config["patience"],
),
ProgressBar(),
]
trainer = Trainer(
default_root_dir=archive,
gpus=config["gpus"],
max_epochs=config["n_epochs"],
callbacks=callbacks,
log_every_n_steps=1,
logger=wandb_logger,
)
try:
trainer.fit(model, training_dataloader, validation_dataloader)
except KeyboardInterrupt:
print("[ctrl+c] Exiting gracefully.")
pass
# Saving model
model.save(os.path.join(archive, f"{run_name}.pth"))
model.save_state_dict(os.path.join(archive, f"{run_name}_state_dict.pth"))
# Saving predictions to file
results = get_predictions(
trainer,
model,
validation_dataloader,
[config["target"] + "_pred", config["target"] + "_kappa"],
additional_attributes=[
config["target"],
"event_no",
"energy",
"n_pulses",
],
)
save_results(config["db"], run_name, results, archive, model)
# Main function call
if __name__ == "__main__":
main()
| 26.508197 | 141 | 0.661101 |
848cbe3a4e3f31640965f85e2ed00b82bde9802e | 639 | py | Python | package_monitor/migrations/0003_packageversion_next_version.py | yunojuno-archive/django-package-monitor | 5e387c1274b707050dcb441dbfd5b6c0aa7c57dc | [
"MIT"
] | 4 | 2019-07-22T18:28:26.000Z | 2020-08-03T15:06:33.000Z | package_monitor/migrations/0003_packageversion_next_version.py | yunojuno-archive/django-package-monitor | 5e387c1274b707050dcb441dbfd5b6c0aa7c57dc | [
"MIT"
] | 4 | 2020-09-12T12:15:02.000Z | 2020-09-13T09:38:26.000Z | package_monitor/migrations/0003_packageversion_next_version.py | yunojuno-archive/django-package-monitor | 5e387c1274b707050dcb441dbfd5b6c0aa7c57dc | [
"MIT"
] | 2 | 2019-07-22T18:50:54.000Z | 2020-06-30T03:19:03.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import semantic_version.django_fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("package_monitor", "0002_auto_20151126_1453"),
]
operations = [
migrations.AddField(
model_name="packageversion",
name="next_version",
field=semantic_version.django_fields.VersionField(
help_text="Next available version available from PyPI.",
max_length=200,
null=True,
blank=True,
),
),
]
| 24.576923 | 72 | 0.597809 |
3919aefd2e49bc4d43f2525a3237106ebfab6004 | 1,655 | py | Python | icc/mongodb-example.py | jinhopark8345/ICC | be0ee8fb36399b156427749c867d64524dff786e | [
"MIT"
] | null | null | null | icc/mongodb-example.py | jinhopark8345/ICC | be0ee8fb36399b156427749c867d64524dff786e | [
"MIT"
] | 7 | 2020-09-28T07:17:58.000Z | 2020-11-25T08:00:08.000Z | icc/mongodb-example.py | jinhopark8345/icc | be0ee8fb36399b156427749c867d64524dff786e | [
"MIT"
] | null | null | null | # https://stackoverflow.com/questions/61074297/how-to-create-schema-in-mongodb-using-python
from pymongo import MongoClient
from pymongo.errors import CollectionInvalid
from collections import OrderedDict
# db = MongoClient("mongodb://localhost:27019/")['mydatabase']
db = MongoClient("localhost")['test']
user_schema = {
'firstName': {
'type': 'string',
'minlength': 1,
'required': True,
},
'lastName': {
'type': 'string',
'minlength': 1,
'required': True,
},
'email': {
'type': 'string',
"required": False,
},
'phoneNo': {
'type': 'int',
'required': True,
},
'userId': {
'type': 'int',
'required': True,
},
'patientId': {
'type': 'int',
'required': True,
},
'age': {
'type': 'int'
},
"userStatus": {
"type": "int"
}
}
collection = 'Userinformation'
validator = {'$jsonSchema': {'bsonType': 'object', 'properties': {}}}
required = []
for field_key in user_schema:
field = user_schema[field_key]
properties = {'bsonType': field['type']}
minimum = field.get('minlength')
if type(minimum) == int:
properties['minimum'] = minimum
if field.get('required') is True: required.append(field_key)
validator['$jsonSchema']['properties'][field_key] = properties
if len(required) > 0:
validator['$jsonSchema']['required'] = required
query = [('collMod', collection),
('validator', validator)]
try:
db.create_collection(collection)
except CollectionInvalid:
pass
command_result = db.command(OrderedDict(query))
| 22.671233 | 91 | 0.586103 |
b9d2ffe6ed2fa269a38c6160d63c6feb0461776b | 6,254 | py | Python | tests/test_click.py | InvestWeMust/lean-cli | a7241a0af6202dc7d56c0f35d09e51798cc5d426 | [
"Apache-2.0"
] | 76 | 2021-02-03T02:32:32.000Z | 2022-03-28T17:04:03.000Z | tests/test_click.py | InvestWeMust/lean-cli | a7241a0af6202dc7d56c0f35d09e51798cc5d426 | [
"Apache-2.0"
] | 64 | 2021-02-28T23:14:17.000Z | 2022-03-30T23:22:24.000Z | tests/test_click.py | InvestWeMust/lean-cli | a7241a0af6202dc7d56c0f35d09e51798cc5d426 | [
"Apache-2.0"
] | 50 | 2021-02-11T01:25:24.000Z | 2022-03-17T03:56:29.000Z | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from datetime import datetime
from pathlib import Path
from typing import Optional
from unittest import mock
import click
import pytest
from click.testing import CliRunner
from dependency_injector import providers
from lean.click import DateParameter, LeanCommand, PathParameter
from lean.container import container
def test_lean_command_enables_verbose_logging_when_verbose_option_given() -> None:
@click.command(cls=LeanCommand)
def command() -> None:
pass
logger = mock.Mock()
container.logger.override(providers.Object(logger))
result = CliRunner().invoke(command, ["--verbose"])
assert result.exit_code == 0
assert logger.debug_logging_enabled
def test_lean_command_sets_default_lean_config_path_when_lean_config_option_given() -> None:
@click.command(cls=LeanCommand, requires_lean_config=True)
def command() -> None:
pass
lean_config_manager = mock.Mock()
container.lean_config_manager.override(providers.Object(lean_config_manager))
with (Path.cwd() / "custom-config.json").open("w+", encoding="utf-8") as file:
file.write("{}")
result = CliRunner().invoke(command, ["--lean-config", "custom-config.json"])
assert result.exit_code == 0
lean_config_manager.set_default_lean_config_path.assert_called_once_with(Path.cwd() / "custom-config.json")
def test_lean_command_fails_when_lean_config_not_available() -> None:
@click.command(cls=LeanCommand, requires_lean_config=True)
def command() -> None:
pass
os.chdir(Path.home())
result = CliRunner().invoke(command)
assert result.exit_code != 0
def test_lean_command_parses_unknown_options() -> None:
given_ctx: Optional[click.Context] = None
@click.command(cls=LeanCommand, allow_unknown_options=True)
@click.pass_context
def command(ctx: click.Context, **kwargs) -> None:
nonlocal given_ctx
given_ctx = ctx
result = CliRunner().invoke(command, ["--key1", "value1", "abc", "--key2=value2", "def", "--key3", "", "ghi"])
assert result.exit_code == 0
assert given_ctx is not None
assert given_ctx.params == {
"key1": "value1",
"key2": "value2",
"key3": ""
}
def test_lean_command_checks_for_cli_updates() -> None:
@click.command(cls=LeanCommand)
def command() -> None:
pass
update_manager = mock.Mock()
container.update_manager.override(providers.Object(update_manager))
result = CliRunner().invoke(command)
assert result.exit_code == 0
update_manager.warn_if_cli_outdated.assert_called_once()
def test_lean_command_does_not_check_for_cli_updates_when_command_raises() -> None:
@click.command(cls=LeanCommand)
def command() -> None:
raise RuntimeError("Oops")
update_manager = mock.Mock()
container.update_manager.override(providers.Object(update_manager))
result = CliRunner().invoke(command)
assert result.exit_code != 0
update_manager.warn_if_cli_outdated.assert_not_called()
def test_path_parameter_fails_when_input_not_valid_path() -> None:
@click.command()
@click.argument("arg", type=PathParameter(exists=False, file_okay=True, dir_okay=True))
def command(arg: Path) -> None:
pass
path_manager = mock.Mock()
path_manager.is_path_valid.return_value = False
container.path_manager.override(providers.Object(path_manager))
result = CliRunner().invoke(command, ["invalid-path.txt"])
assert result.exit_code != 0
def test_path_parameter_fails_when_input_not_existent_and_exists_required() -> None:
@click.command()
@click.argument("arg", type=PathParameter(exists=True, file_okay=True, dir_okay=True))
def command(arg: Path) -> None:
pass
result = CliRunner().invoke(command, ["fake-file.txt"])
assert result.exit_code != 0
def test_path_parameter_fails_when_input_is_file_and_file_not_okay() -> None:
@click.command()
@click.argument("arg", type=PathParameter(exists=True, file_okay=False, dir_okay=True))
def command(arg: Path) -> None:
pass
(Path.cwd() / "empty-file.txt").touch()
result = CliRunner().invoke(command, ["empty-file.txt"])
assert result.exit_code != 0
def test_path_parameter_fails_when_input_is_directory_and_directory_not_okay() -> None:
@click.command()
@click.argument("arg", type=PathParameter(exists=True, file_okay=True, dir_okay=False))
def command(arg: Path) -> None:
pass
(Path.cwd() / "Empty Directory").mkdir()
result = CliRunner().invoke(command, ["Empty Directory"])
assert result.exit_code != 0
@pytest.mark.parametrize("input", ["20201231", "2020-12-31"])
def test_date_parameter_returns_datetime_object(input: str) -> None:
given_arg: Optional[datetime] = None
@click.command()
@click.argument("arg", type=DateParameter())
def command(arg: datetime) -> None:
nonlocal given_arg
given_arg = arg
result = CliRunner().invoke(command, [input])
assert result.exit_code == 0
assert given_arg is not None
assert given_arg.year == 2020
assert given_arg.month == 12
assert given_arg.day == 31
@pytest.mark.parametrize("input", ["20203112", "2020-31-12", "yyyymmdd", "this is invalid input"])
def test_date_parameter_fails_when_input_not_formatted_as_yyyymmdd(input: str) -> None:
@click.command()
@click.argument("arg", type=DateParameter())
def command(arg: datetime) -> None:
pass
result = CliRunner().invoke(command, [input])
assert result.exit_code != 0
| 30.21256 | 114 | 0.713783 |
8cc739fefc22fd6323802ad6b7676ff7da7c74d1 | 3,164 | py | Python | HFSS-tests.py | jsidabras/GA-PMR | d0dbe823b5873e2a05176d405c42f0396c38f480 | [
"MIT"
] | null | null | null | HFSS-tests.py | jsidabras/GA-PMR | d0dbe823b5873e2a05176d405c42f0396c38f480 | [
"MIT"
] | null | null | null | HFSS-tests.py | jsidabras/GA-PMR | d0dbe823b5873e2a05176d405c42f0396c38f480 | [
"MIT"
] | null | null | null | # ----------------------------------------------
# Script Written by Jason W. Sidabras (jason.sidabras@cec.mpg.de)
# requires jsidabras/hycohanz as of 20-04-2017
# 20-04-2017: script runs 3m00s for 2490 element change. Hfss-Region
# (Model/NonModel change) Vacuum can produce non-solvable geometries. Meshed 1 pass
# 20-04-2017: script runs 3m12s for 2490 element change. Removed hfss-Region
# and replaced with a static subtracted vacuum volume (Model/NonModel change).
# 19-04-2017: script runs 4m44s for 2490 element change (material change).
# ----------------------------------------------
from random import *
import hycohanz as hfss
from datetime import datetime
startTime = datetime.now()
[oAnsoftApp, oDesktop] = hfss.setup_interface()
oProject = hfss.get_active_project(oDesktop)
oDesign = hfss.set_active_design(oProject, 'HFSSDesign1')
oEditor = hfss.set_active_editor(oDesign)
oFieldsReporter = hfss.get_module(oDesign, 'FieldsReporter')
oSolution = oDesign.GetModule("Solutions")
oDesktop.EnableAutoSave(False)
randBinList = lambda n: [randint(0,1) for b in range(1,n+1)]
thing = randBinList(1721)
print(thing)
index = 0
Vac = []
Silv = []
for i in thing:
if i == 1:
Silv.append("Elm_"+str(index))
index += 1
else:
Vac.append("Elm_"+str(index))
index += 1
oDesktop.ClearMessages("", "", 3)
if Vac:
# Check if list is empty
# hfss.assign_White(oEditor, Silv)
hfss.assign_material(oEditor, Vac, MaterialName="vacuum", SolveInside=True)
if Silv:
# hfss.assign_Orange(oEditor, Silv)
hfss.assign_material(oEditor, Silv, MaterialName="pec", SolveInside=False)
# oProject.Save()
oEditor.PurgeHistory(["NAME:Selections", "Selections:=", Silv, "NewPartsModelFlag:=", "Model"])
oEditor.PurgeHistory(["NAME:Selections", "Selections:=", Vac, "NewPartsModelFlag:=", "Model"])
try:
oDesign.Analyze("Setup1")
except:
print("Simulation Error Set Fitness to 0")
# return 0,
oFieldsReporter.CalcStack('clear')
# Load the pre solved calculator expressions. Some will delete when Fastlist is deleted
# Remember to set Ple to zero unless you are solving for the losses in the substrate
#oFieldsReporter.LoadNamedExpressions("E:\\MPI\\Maxwell\\Projects\\PersonalLib\\_Signal_14 - Xband - ICE.clc", "Fields", ["ImDieHold", "ImDieSam", "Frq", "H1r", "H1rMax", "IntH1r2dVs"])
oFieldsReporter.CopyNamedExprToStack("IntH1r2dVs")
# Is there a solution present? If so clc_eval if not, run the Analyze again
# if there is still no solution, send it to zero
if oSolution.HasFields("Setup1:LastAdaptive", "x_size=2mm") == 1:
hfss.clc_eval(
oFieldsReporter,
'Setup1',
'LastAdaptive',
9.7e9,
0,
{},
)
else:
oDesign.Analyze("Setup1")
hfss.clc_eval(
oFieldsReporter,
'Setup1',
'LastAdaptive',
9.7e9,
0,
{},
)
out = hfss.get_top_entry_value(
oFieldsReporter,
'Setup1',
'LastAdaptive',
9.7e9,
0,
{},
)
print(out[0])
print(datetime.now() - startTime)
| 31.959596 | 186 | 0.647914 |
5c491472dac56e65e28b6b6c7de60ae7a3865a38 | 2,221 | py | Python | testinfra/backend/salt.py | smarlowucf/pytest-testinfra | 468f1d002a1d5903d9063c4a4155594aff05d794 | [
"Apache-2.0"
] | null | null | null | testinfra/backend/salt.py | smarlowucf/pytest-testinfra | 468f1d002a1d5903d9063c4a4155594aff05d794 | [
"Apache-2.0"
] | null | null | null | testinfra/backend/salt.py | smarlowucf/pytest-testinfra | 468f1d002a1d5903d9063c4a4155594aff05d794 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import salt.client
except ImportError:
raise RuntimeError("You must install salt package to use the salt backend")
from testinfra.backend import base
class SaltBackend(base.BaseBackend):
HAS_RUN_SALT = True
NAME = "salt"
def __init__(self, host, *args, **kwargs):
self.host = host
self._client = None
super().__init__(self.host, *args, **kwargs)
@property
def client(self):
if self._client is None:
self._client = salt.client.LocalClient()
return self._client
def run(self, command, *args, **kwargs):
command = self.get_command(command, *args)
out = self.run_salt("cmd.run_all", [command])
return self.result(out["retcode"], command, out["stdout"], out["stderr"])
def run_salt(self, func, args=None):
out = self.client.cmd(self.host, func, args or [])
if self.host not in out:
raise RuntimeError(
"Error while running {}({}): {}. "
"Minion not connected ?".format(func, args, out)
)
return out[self.host]
@classmethod
def get_hosts(cls, host, **kwargs):
if host is None:
host = "*"
if any(c in host for c in "@*[?"):
client = salt.client.LocalClient()
if "@" in host:
hosts = client.cmd(host, "test.true", expr_form="compound").keys()
else:
hosts = client.cmd(host, "test.true").keys()
if not hosts:
raise RuntimeError("No host matching '{}'".format(host))
return sorted(hosts)
return super().get_hosts(host, **kwargs)
| 34.703125 | 82 | 0.615038 |
91150271775e1bcf188908a5352023d285ea5e40 | 363 | py | Python | src/python_package/__init__.py | microsoft/ai-python-package | 770f5167ebc32b5410739f04c5730e68f84785c9 | [
"MIT"
] | 3 | 2021-12-11T17:02:56.000Z | 2022-02-23T19:45:35.000Z | src/python_package/__init__.py | microsoft/ai-python-package | 770f5167ebc32b5410739f04c5730e68f84785c9 | [
"MIT"
] | 5 | 2022-03-24T13:21:21.000Z | 2022-03-31T13:21:39.000Z | src/python_package/__init__.py | microsoft/python-package-template | 770f5167ebc32b5410739f04c5730e68f84785c9 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in project root for information.
# -------------------------------------------------------------
"""Python Package Template"""
from __future__ import annotations
__version__ = "0.0.2"
| 40.333333 | 80 | 0.484848 |
77effa961f64acaefcd2d4dc9d0ee220f0605d74 | 3,891 | py | Python | textmining/firstname.py | michelcaradec/textmining | b8261730e2205d8c988fdbaea27e11025bb66b68 | [
"MIT"
] | null | null | null | textmining/firstname.py | michelcaradec/textmining | b8261730e2205d8c988fdbaea27e11025bb66b68 | [
"MIT"
] | null | null | null | textmining/firstname.py | michelcaradec/textmining | b8261730e2205d8c988fdbaea27e11025bb66b68 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import codecs
import utils
from singleton import Singleton
from firstnameinfo import FirstNameInfo
class FirstName(Singleton):
"""
First name manipulation class.
"""
count_threshold = 0
"""First name count threshold"""
year_threshold = 0
"""First name year threshold"""
confidence_threshold = 0
"""First name confidence threshold"""
def __init__(self):
self.__firstnames = None
self.__exclusion = set()
self.__filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"assets/firstnames.txt")
self.__encoding = "utf-8"
def __load(self):
"""
Load first names dictionary.
"""
firstnames = dict()
with codecs.open(self.__filename, "r", self.__encoding) as stream:
stream.next() # Header
for line in stream:
firstname, m_year, m_count, f_year, f_count = line.split("\t")
f_count = f_count.strip()
firstnames[firstname] = FirstNameInfo(
int(m_year) if m_year else 0,
int(m_count) if m_count else 0,
int(f_year) if f_year else 0,
int(f_count) if f_count else 0)
return firstnames
def __ensure_loaded(self):
"""
Load first names dictionary if necessary.
"""
if self.__firstnames is None:
self.__firstnames = self.__load()
def __clean_firstname(self, token):
"""
First name cleaning.
Remove accents, convert to lower case.
"""
return utils.to_lower(utils.substitute_accents(token))
def reset_thresholds(self):
"""
reset all thresholds.
"""
self.count_threshold = 0
self.year_threshold = 0
self.confidence_threshold = 0
def set_exclusion_list(self, firstnames, clean_token=True):
"""
Set first names exclusion list (ie first names to ignore).
"""
self.__exclusion = set(
(self.__clean_firstname(token) if clean_token else token for token in firstnames)
) if firstnames else set()
def get_exclusion_list(self):
"""
Get first names exclusion list (ie first names to ignore).
"""
return self.__exclusion if self.__exclusion else set()
def get_info(self, token, clean_token=True):
"""
Get first name infos.
"""
self.__ensure_loaded()
token = self.__clean_firstname(token) if clean_token else token
info = None if token in self.__exclusion else self.__firstnames.get(token)
return info \
if info \
and info.count >= self.count_threshold \
and info.confidence >= self.confidence_threshold \
and info.year >= self.year_threshold \
else None
def is_firstname(self, token, clean_token=True):
"""
Check if first name.
"""
info = self.get_info(token, clean_token)
return True if info else False
def get_gender(self, token, clean_token=True):
"""
Get first name gender:
GENDER_MALE = male
GENDER_FEMALE = female
GENDER_BOTH = used for both
None = Not a first name
"""
info = self.get_info(token, clean_token)
return info.gender if info else None
def get_gender_confidence(self, token, clean_token=True):
"""
Get first name gender and confidence:
GENDER_MALE = male
GENDER_FEMALE = female
GENDER_BOTH = used for both
None = Not a first name
"""
info = self.get_info(token, clean_token)
return (info.gender, info.confidence) if info else (None, None)
| 26.469388 | 93 | 0.583912 |
38943f437c9cbba19cd112b4b3411076e45d39d2 | 208 | py | Python | pages/urls.py | NizarAlsaeed/blogx | 6e834c6553010074d18243a64693e749e59a41c5 | [
"MIT"
] | null | null | null | pages/urls.py | NizarAlsaeed/blogx | 6e834c6553010074d18243a64693e749e59a41c5 | [
"MIT"
] | null | null | null | pages/urls.py | NizarAlsaeed/blogx | 6e834c6553010074d18243a64693e749e59a41c5 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import HomePageView, AboutPageView
urlpatterns = [
# path('', HomePageView.as_view(), name='home'),
path('about/', AboutPageView.as_view(), name='about'),
]
| 23.111111 | 58 | 0.692308 |
09154f715c718aa9ce03a606ae941c1521983c90 | 2,550 | py | Python | clients/python/tyckiting_client/ai/myai.py | HimanshuSingal/space_mission | 7d032f02b6144f412e23cd012d87965f68484fea | [
"MIT"
] | null | null | null | clients/python/tyckiting_client/ai/myai.py | HimanshuSingal/space_mission | 7d032f02b6144f412e23cd012d87965f68484fea | [
"MIT"
] | null | null | null | clients/python/tyckiting_client/ai/myai.py | HimanshuSingal/space_mission | 7d032f02b6144f412e23cd012d87965f68484fea | [
"MIT"
] | null | null | null | import random
from tyckiting_client.ai import base
from tyckiting_client import actions
class Ai(base.BaseAi):
a = 0
wait_list = []
def move(self, bots, events):
"""
Move the bot to a random legal positon.
Args:
bots: List of bot states for own team
events: List of events form previous round
Returns:
List of actions to perform this round.
"""
for e in events:
print e.__dict
response = []
if len(Ai.wait_list) != 0:
for wait in Ai.wait_list:
b_id = wait[0];pos = wait[-1]
b = (b for b in bots if b_id == b.bot_id).next()
bots.remove(b) # remove the intereted bot from event,bot,wait_list
Ai.wait_list.remove(wait)
for e in events:
if e.source == b_id or e.bot_id==b_id:
events.remove(e)
response.append(actions.Cannon(bot_id=b_id,x=pos.x,y=pos.y))
for e in events:
if e.event == 'see': # if see some bots
b = (b for b in bots if e.source == b.bot_id).next()
if b != None:
bots.remove(b)
events.remove(e)
response.append(self.on_see(e,b))
else:
print "I am none"
for bot in bots:
if not bot.alive:
continue
move_pos = random.choice(list(self.get_valid_moves(bot)))
response.append(actions.Move(bot_id=bot.bot_id,
x=move_pos.x,
y=move_pos.y))
return response
def get_far_pos(self,bot,tar_pos):
dis = []
pos_list = list(self.get_valid_moves(bot))
for pos in pos_list:
d = (pos.x - tar_pos.x)**2 + (pos.y - tar_pos.y)**2
dis.append(d)
pos = pos_list[dis.index(max(dis))]
return pos
#todo
def on_see(self,event,bot):
en_pos = event.pos
far_pos = self.get_far_pos(bot,en_pos) # get the farest pos, move
# and shoot next turn
Ai.wait_list.append([bot.bot_id,'cannon',en_pos])
print(Ai.wait_list)
return actions.Move(bot_id=bot.bot_id,x=far_pos.x,y=far_pos.y)
def on_radar_echo(self):
return
def on_detected(self):
return
def on_hit(self):
return
def give_priority(self,events,bots):
pass | 28.977273 | 82 | 0.512941 |
674d528199f5c0fc975fc3e3bb94ed21883bc0b4 | 10,335 | py | Python | screens.py | lutre69/ProjectZ | 09209883df16ce8588cfa2653b25e1fefab6800c | [
"MIT"
] | 1 | 2020-02-04T07:32:58.000Z | 2020-02-04T07:32:58.000Z | screens.py | lutre69/ProjectZ | 09209883df16ce8588cfa2653b25e1fefab6800c | [
"MIT"
] | null | null | null | screens.py | lutre69/ProjectZ | 09209883df16ce8588cfa2653b25e1fefab6800c | [
"MIT"
] | null | null | null | from kivy.uix.screenmanager import Screen
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.properties import ObjectProperty
from popups import ConfirmPopup
from functools import partial
import time
class StartScreen(Screen):
app_obj = ObjectProperty()
root_obj = ObjectProperty()
def open_class_popup(self):
root = self.root_obj
args = {'item': root.selected_class, 'item_list': root.class_list,
'attribute': 'class_', 'callback': self.select_class,
'title': 'Choisissez une classe', 'size_hint_y': 0.3}
return root.open_choice_popup(**args)
def select_class(self, a_class):
self.active_class_label.text = "{}".format(a_class.class_)
self.root_obj.selected_class = a_class
def open_skill_set_popup(self):
args = {'item': self.root_obj.selected_skill_set,
'item_list': self.root_obj.skill_set_list,
'attribute': 'set_name',
'callback': self.select_skill_set,
'title': 'Choisissez un jeu de compétences',
'size_hint_y': 0.3}
return self.root_obj.open_choice_popup(**args)
def select_skill_set(self, a_set):
self.active_skill_set_label.text = "{}".format(a_set.set_name)
self.root_obj.selected_skill_set = a_set
def display_behaviour(self):
self.display_header.clear_widgets()
self.display_label.clear_widgets()
output = self.app_obj.data_output
data = {}
header = ["prenom",
"Bavardage", "Insolence", "Inactivite", "Travail non fait"]
header_layout = GridLayout(cols=len(header))
[header_layout.add_widget(Label(text=item,
font_size='12sp')) for item in header]
grid_layout = GridLayout(cols=len(header), size_hint_y=None)
grid_layout.bind(minimum_height=grid_layout.setter('height'))
for key in output:
data[key] = output[key]
try:
data[key].pop('class')
data[key].pop('name')
except KeyError:
pass
row = [data[key]["surname"]]
for behaviour in header[1:]:
try:
item = data[key][behaviour]
row.append("\n".join(str(item[i]) for i in item))
except KeyError:
row.extend([" "])
[grid_layout.add_widget(Label(text=item, size_hint_y=None,
height='40dp',
font_size='12sp')) for item in row]
for label in header_layout.children:
if label.text != "prenom":
label.text = label.text[:4]
self.display_header.add_widget(header_layout)
self.display_label.add_widget(grid_layout)
def display_skills(self):
self.display_header.clear_widgets()
self.display_label.clear_widgets()
output = self.app_obj.data_output
data = {}
header = ["prenom"]
header.extend([skill.title for skill in self.root_obj.active_skills])
header_layout = GridLayout(cols=len(header))
[header_layout.add_widget(Label(text=item,
font_size='12sp')) for item in header]
grid_layout = GridLayout(cols=len(header), size_hint_y=None)
grid_layout.bind(minimum_height=grid_layout.setter('height'))
for key in output:
data[key] = output[key]
try:
data[key].pop('class')
data[key].pop('name')
except KeyError:
pass
row = [data[key]["surname"]]
for skill in header[1:]:
try:
item = data[key][skill]
row.append(", ".join(str(item[i]) for i in item))
except KeyError:
row.extend([" "])
[grid_layout.add_widget(Label(text=item, size_hint_y=None,
height='40dp',
font_size='12sp')) for item in row]
for label in header_layout.children:
if label.text != "prenom":
label.text = label.text[:4]
self.display_header.add_widget(header_layout)
self.display_label.add_widget(grid_layout)
class SkillsScreen(Screen):
root_obj = ObjectProperty()
app_obj = ObjectProperty()
selected_student = ObjectProperty()
selected_skill = ObjectProperty()
student_review = ObjectProperty(GridLayout())
confirm_popup = ObjectProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.bind(selected_student=self.display_stud_skills)
self.bind(selected_skill=self.display_stud_skills)
def open_student_popup(self):
args = {'item': self.selected_student,
'item_list': self.root_obj.active_students,
'attribute': 'surname',
'callback': self.select_student,
'title': 'Choisissez un élève',
'size_hint_y': 1}
return self.root_obj.open_choice_popup(**args)
def _on_answer(self, func, instance, answer):
self.confirm_popup.dismiss()
if answer == 'yes':
return func()
else:
return
def select_student(self, student):
self.student_name.text = "{} {}".format(student.surname,
student.name)
self.selected_student = student
def select_skill(self, skill):
self.skill_name.text = skill.title
self.skill_summary.text = skill.summary
self.selected_skill = skill
def open_skills_popup(self):
args = {'item': self.selected_skill,
'item_list': self.root_obj.active_skills,
'attribute': 'title',
'callback': self.select_skill,
'title': 'Choisissez une compétence',
'size_hint_y': 0.4}
return self.root_obj.open_choice_popup(**args)
def pre_set_student_skill(self, value):
try:
skill = self.selected_skill.title
student = self.selected_student
content = ConfirmPopup(text="Confirmez la note:\n\n{} {}\n\n"
"{} {}".format(
skill, value, student.surname,
student.name))
func = partial(self.set_student_skill, value)
__on_answer = partial(self._on_answer, func)
content.bind(on_answer=__on_answer)
self.confirm_popup = Popup(title="Confirmation",
content=content,
size_hint_y=.4,
auto_dismiss=False)
self.confirm_popup.open()
except AttributeError:
pass
def set_student_skill(self, value):
student = self.selected_student.id_
output = self.app_obj.data_output
skill = self.selected_skill.title
try:
length = len(output[student][skill])
output[student][skill][length] = value
except KeyError:
try:
output[student][skill] = {0: value}
except KeyError:
output[student] = self.students_data[student]
output[student][skill] = {0: value}
output._is_changed = True
output.store_sync()
def display_stud_skills(self, *ignore):
try:
assert self.selected_student and self.selected_skill
self.student_review.add_widget(Label(text='caca',
size_hint_y=.8))
except AssertionError:
pass
class BehaviourScreen(Screen):
app_obj = ObjectProperty()
root_obj = ObjectProperty()
selected_student = ObjectProperty()
confirm_popup = ObjectProperty()
def open_student_popup(self):
args = {'item': self.selected_student,
'item_list': self.root_obj.active_students,
'attribute': 'surname',
'callback': self.select_student,
'title': 'Choisissez un élève',
'size_hint_y': 1}
return self.root_obj.open_choice_popup(**args)
def select_student(self, student):
self.student_name.text = "{} {}".format(student.surname,
student.name)
self.selected_student = student
def _on_answer(self, func, instance, answer):
self.confirm_popup.dismiss()
if answer == 'yes':
return func()
else:
return
def pre_set_student_disobedience(self, value):
try:
student = self.selected_student
content = ConfirmPopup(text="Confirmez la sanction:\n\n{}\n\n"
"{} {}".format(
value, student.surname,
student.name))
func = partial(self.set_student_disobedience, value)
__on_answer = partial(self._on_answer, func)
content.bind(on_answer=__on_answer)
self.confirm_popup = Popup(title="Confirmation",
content=content,
size_hint_y=.4,
auto_dismiss=False)
self.confirm_popup.open()
except AttributeError:
pass
def set_student_disobedience(self, value):
time_ = time.strftime("%d %B %H:%M:%S")
student = self.selected_student.id_
output = self.app_obj.data_output
try:
length = len(output[student][value])
output[student][value][length] = time_
except KeyError:
try:
output[student][value] = {0: time_}
except KeyError:
output[student] = self.students_data[student]
output[student][value] = {0: time_}
output._is_changed = True
output.store_sync()
| 39.446565 | 78 | 0.551524 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.