id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
376260 | <filename>newsletter/models.py
from django.db import models
from model_utils.models import TimeStampedModel
class Signup(TimeStampedModel):
email = models.EmailField()
def __str__(self):
return self.email
| StarcoderdataPython |
6584827 | import socket
from math import sqrt, inf
serversocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
listOfGasStations = []
idDataMsg = 0
class Fuel:
def __init__(self, fuelType, fuelPrice):
self.fuelType = fuelType
self.fuelPrice = int(fuelPrice)
def __str__(self):
return (self.fuelType + ' ' + str(self.fuelPrice))
class GasStation:
def __init__(self, fuels, lat, long):
self.fuels = fuels # lista de combustíveis vendidos
self.lat = lat # latitude
self.long = long # longitude
def insertFuel(self, fuel):
self.fuels.append(fuel)
def __str__(self):
str = ""
for fuel in self.fuels:
str += (fuel + " " + str(self.lat) + " " + str(self.long) + "\n")
return str
@staticmethod
def saveOnDatabase(msg):
global idDataMsg
with open('../database/gas_stations.txt', 'a', encoding='utf-8') as database:
database.write(
"\nD " +
str(idDataMsg) + " " +
str(msg[1]) + " " +
str(msg[2]) + " " +
str(msg[3]) + " " +
str(msg[4])
)
idDataMsg += 1
@staticmethod
def getLowerGasPrice(fuelType, searchRadius, lat, long):
lowerGasPrice = 10000000
lowerGasPriceObject = None
stationOfLowerlowerGasPriceObject = None
success = False
searchRadius = float(searchRadius)
# Retornar lista de postos dentro do raio de busca
newListOfGasStations = list(filter(
lambda elem:
pointsDistance(elem.lat, lat, elem.long, long) <= searchRadius,
listOfGasStations
))
if len(newListOfGasStations) == 0:
response = "Nenhum posto dentro do raio requisitado."
else:
# Iterar na lista de combustíveis de cada posto em busca
# do menor preço e retorná-lo
for gs in newListOfGasStations:
for gsFuelRequired in gs.fuels:
if gsFuelRequired.fuelType == fuelType:
#print((gsFuelRequired.fuelPrice, lowerGasPrice))
if gsFuelRequired.fuelPrice < lowerGasPrice:
lowerGasPrice = gsFuelRequired.fuelPrice
lowerGasPriceObject = gsFuelRequired
stationOfLowerlowerGasPriceObject = gs
break
success = True
if stationOfLowerlowerGasPriceObject == None:
response = "\nNão há nenhum posto com este tipo de combustível."
else:
response = "\nResposta da busca: " + \
"\n - Tipo de combustível: " + fuelType + \
"\n - Raio de busca: " + str(searchRadius) + \
"\n - Coordenadas do centro: " + str((lat,long)) + \
"\n - Menor preco: " + str(lowerGasPrice) + \
"\n - Coordenadas do posto: " + str((stationOfLowerlowerGasPriceObject.lat, stationOfLowerlowerGasPriceObject.long))
return (success, response)
def pointsDistance(xA, xB, yA, yB):
absxAB = abs( float(xA) - float(xB) ) ** 2
absyAB = abs( float(yA) - float(yB) ) ** 2
return sqrt( absxAB + absyAB )
def initServer(port):
"""Método para inicializar o servidor e carrgar dados da database.
Parameters
----------
port : int
A porta a ser escutada pelo servidor.
"""
global serversocket
# pegar nome da maquina local
host = socket.gethostname()
# conectar e setar tamanho do backlog
serversocket.bind((host, port))
# carregar dados na memória
with open('../database/gas_stations.txt', 'r') as gasStations:
for gasStation in gasStations:
gasStationDetais = gasStation.replace('\n', '').split(' ')
gasStationDetais.pop(1) # Remover id da mensagem
insertIntoListOfGasStations(gasStationDetais)
print("Servidor operante!\n")
def searchGasStation(lat, long):
"""Função para procurar um posto de combustível na base de dados.
Parameters
----------
lat: double
Latitude do posto.
long: double
Longitude do posto.
Returns
-------
int
Índice do posto na lista de postos.
"""
sgIndex = None
# Iterar na lista de postos de cimbustível até encontrar
# o índice do posto requisitado
for idx, gs in enumerate(listOfGasStations):
if gs.lat == lat and gs.long == long:
sgIndex = idx
break
return sgIndex
def insertIntoListOfGasStations(gasStationDetais):
"""Método para inserir posto de combustível na lista de postos.
Parameters
----------
gasStationDetails : list
Detalhes do posto de combustível.
"""
# Procurar um posto de combustível existente
objGasStationIndex = searchGasStation(
lat=gasStationDetais[3],
long=gasStationDetais[4]
)
# print("Objetoooooooo")
# print((gasStationDetais[1], gasStationDetais[2]))
# combustivel a ser catalogado
newFuel = Fuel(gasStationDetais[1],
gasStationDetais[2])
# print("Objetoooooooo")
# print(newFuel)
if objGasStationIndex == None:
# Criar novo posto caso nao exista
objGasStation = GasStation(
[newFuel],
lat=gasStationDetais[3],
long=gasStationDetais[4]
)
# inserir na lista
listOfGasStations.append(objGasStation)
else:
listOfGasStations[objGasStationIndex].insertFuel(newFuel)
def checkInput(msg):
"""Função para checar entrada do client.
Parameters
----------
msg : str
Mensagem do client.
Returns
-------
boolean
Validez da mensagem
"""
return (
len(msg) == 5 and
(msg[0] == 'D' or msg[0] == 'P')
)
def consoleWarning(msg, success, address):
"""Método para mostrar a situação da operação.
Parameters
----------
msg : str
Mensagem do client.
success : boolean
Flag de sucesso/fracasso da operação.
client : socket
Objeto que representa o client da operação.
"""
successFlag = "Sucesso"
if not success:
successFlag = "Fracasso"
print(
"Client: " + address[0] + ":" + str(address[1]) +
"\nTipo de mensagem: " + msg[0] +
"\nStatus: " + successFlag + "\n"
)
def main():
print("Bem-vindo ao servidor do Sistema de Preços!")
port = int(input("Digite a porta a ser escutada pelo servidor para inicializá-lo: "))
initServer(port)
while True:
global listOfGasStations
global serversocket
success = False
msg, client = serversocket.recvfrom(1024)
msg = msg.decode('utf-8').split(' ')
if(checkInput(msg)):
# cadastro
if msg[0] == 'D':
print("Mensagem recebida!")
insertIntoListOfGasStations(msg)
GasStation.saveOnDatabase(msg)
success = True
consoleWarning(msg, success, client)
serversocket.sendto("Cadastro realizado!".encode('utf-8'), client)
# pesquisa
if msg[0] == 'P':
print("Mensagem recebida!")
success, resp = GasStation.getLowerGasPrice(
msg[1], msg[2], msg[3], msg[4]
)
consoleWarning(msg, success, client)
serversocket.sendto(resp.encode('utf-8'), client)
else:
consoleWarning(msg, success, client)
serversocket.sendto(
"Server - Erro: Entrada de dados inválida!".encode("utf-8"),
client
)
# Fechar conexao
serversocket.close()
if __name__ == "__main__":
main() | StarcoderdataPython |
6584299 | # Generated by Django 3.2.1 on 2021-05-05 15:31
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('PersonalApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='about_model',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('about_image', models.ImageField(blank=True, upload_to='about/images')),
('about_me', models.TextField(blank=True)),
('image_1', models.ImageField(blank=True, upload_to='about/photo_gallery')),
('image_2', models.ImageField(blank=True, upload_to='about/photo_gallery')),
('image_3', models.ImageField(blank=True, upload_to='about/photo_gallery')),
('image_4', models.ImageField(blank=True, upload_to='about/photo_gallery')),
('image_5', models.ImageField(blank=True, upload_to='about/photo_gallery')),
('image_6', models.ImageField(blank=True, upload_to='about/photo_gallery')),
('image_7', models.ImageField(blank=True, upload_to='about/photo_gallery')),
('image_8', models.ImageField(blank=True, upload_to='about/photo_gallery')),
],
),
migrations.CreateModel(
name='contactinfo_model',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('adress', models.CharField(max_length=250)),
('email', models.CharField(max_length=255)),
('working_hour', models.CharField(max_length=255)),
('phone_nr', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='contacts_model',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=250)),
('email', models.CharField(max_length=255)),
('subject', models.CharField(max_length=255)),
('phone_nr', models.CharField(max_length=255)),
('message', models.TextField(blank=True)),
('contact_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
],
),
migrations.AlterField(
model_name='home_model',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
| StarcoderdataPython |
5094432 | """
Basic building blocks of nnpackage models. Contains various basic and specialized network layers, layers for
cutoff functions, as well as several auxiliary layers and functions.
"""
from nnpackage.nn.acsf import *
from nnpackage.nn.activations import *
from nnpackage.nn.base import *
from nnpackage.nn.blocks import *
from nnpackage.nn.cfconv import *
from nnpackage.nn.cutoff import *
from nnpackage.nn.initializers import *
from nnpackage.nn.neighbors import *
| StarcoderdataPython |
1621442 | <filename>challenges/array_binary_search/array_binary_search.py<gh_stars>0
def binarySearch(list, int):
for i in range( 0, len(list), 1) :
if list[i] == int:
return i
return -1
print(binarySearch([4,8,15,16,23,42], 15))
| StarcoderdataPython |
1687341 | <filename>util/nputils.py<gh_stars>1-10
#!/usr/bin/env python3
import numpy as np
def nan_like(array):
tmp = np.empty_like(array)
tmp.fill(np.nan)
return tmp
def no_op() -> None:
# The no-op function; it should obviously be empty!
pass
if __name__ == "__main__":
no_op()
| StarcoderdataPython |
6545141 | # Import necessary packages
from image_text_model.im_text_rnn_model import oasis_evaluation
checkpoint_dir = 'image_text_model/deep_sentiment_model'
scores = oasis_evaluation(checkpoint_dir)
# Save output and parameters to text file in the localhost node, which is where the computation is performed.
#with open('/data/localhost/not-backed-up/ahu/jobname_' + str(slurm_id) + '_' + str(slurm_parameter) + '.txt', 'w') as text_file:
#text_file.write('Parameters: {0} Result: {1}\n'.format(parameter, output)) | StarcoderdataPython |
398295 | <reponame>ganler/LEMON
# -*-coding:UTF-8-*-
"""get prediction for each backend
"""
import sys
import os
import redis
import pickle
import argparse
import configparser
from scripts.tools.utils import DataUtils
from scripts.logger.lemon_logger import Logger
import warnings
main_logger = Logger()
def custom_objects():
def no_activation(x):
return x
def leakyrelu(x):
import keras.backend as K
return K.relu(x, alpha=0.01)
objects = {}
objects['no_activation'] = no_activation
objects['leakyrelu'] = leakyrelu
return objects
def _get_prediction(bk, x, y, model_path,batch_size):
"""
Get prediction of models on different backends
"""
test_x, test_y = x[:flags.test_size],y[:flags.test_size]
predict_model = keras.models.load_model(model_path,custom_objects=custom_objects())
# predict_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
main_logger.info("INFO:load model and compile done!")
res = predict_model.predict(test_x,batch_size=batch_size)
main_logger.info("SUCCESS:Get prediction for {} successfully on {}!".format(mut_model_name,bk))
"""Store prediction result to redis"""
redis_conn.hset("prediction_{}".format(mut_model_name),bk,pickle.dumps(res))
if __name__ == "__main__":
"""Parser of command args"""
parse = argparse.ArgumentParser()
parse.add_argument("--backend", type=str, help="name of backends")
parse.add_argument("--exp", type=str, help="experiments identifiers")
parse.add_argument("--test_size", type=int, help="amount of testing image")
parse.add_argument("--model", type=str, help="path of the model to predict")
parse.add_argument("--redis_db", type=int)
parse.add_argument("--config_name", type=str)
flags, unparsed = parse.parse_known_args(sys.argv[1:])
"""Load Configuration"""
warnings.filterwarnings("ignore")
lemon_cfg = configparser.ConfigParser()
lemon_cfg.read(f"./config/{flags.config_name}")
pool = redis.ConnectionPool(host=lemon_cfg['redis']['host'], port=lemon_cfg['redis']['port'],db=flags.redis_db)
redis_conn = redis.Redis(connection_pool=pool)
parameters = lemon_cfg['parameters']
gpu_ids = parameters['gpu_ids']
gpu_list = parameters['gpu_ids'].split(",")
"""Init cuda"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_ids
warnings.filterwarnings("ignore")
batch_size= 32
"""Switch backend"""
bk_list = ['tensorflow', 'theano', 'cntk','mxnet']
bk = flags.backend
os.environ['KERAS_BACKEND'] = bk
os.environ['PYTHONHASHSEED'] = '0'
if bk == 'tensorflow':
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2' # 只显示 warning 和 Error
import tensorflow as tf
main_logger.info(tf.__version__)
batch_size = 128
import keras
if bk == 'theano':
if len(gpu_list) == 2:
os.environ['THEANO_FLAGS'] = f"device=cuda,contexts=dev{gpu_list[0]}->cuda{gpu_list[0]};dev{gpu_list[1]}->cuda{gpu_list[1]}," \
f"force_device=True,floatX=float32,lib.cnmem=1"
else:
os.environ['THEANO_FLAGS'] = f"device=cuda,contexts=dev{gpu_list[0]}->cuda{gpu_list[0]}," \
f"force_device=True,floatX=float32,lib.cnmem=1"
import theano as th
import keras
main_logger.info(th.__version__)
if bk == "cntk":
from cntk.device import try_set_default_device,gpu
try_set_default_device(gpu(int(gpu_list[0])))
import cntk as ck
main_logger.info(ck.__version__)
import keras
if bk == "mxnet":
import mxnet as mxnet
main_logger.info(f"mxnet_version {mxnet.__version__}")
import keras
batch_size = 16
from keras import backend as K
try:
"""Get model prediction"""
main_logger.info("INFO:Using {} as backend for states extraction| {} is wanted".format(K.backend(),bk))
x, y = DataUtils.get_data_by_exp(flags.exp)
mut_model_name = os.path.split(flags.model)[-1]
_get_prediction(bk=bk, x=x, y=y, model_path=flags.model,batch_size=batch_size)
except Exception:
import traceback
traceback.print_exc()
sys.exit(-1)
| StarcoderdataPython |
6523666 |
from keras.layers import Concatenate, Input, Lambda, UpSampling2D
from keras.models import Model
from utils.utils import compose
from nets.attention import cbam_block, eca_block, se_block
from nets.CSPdarknet53_tiny import (DarknetConv2D, DarknetConv2D_BN_Leaky,
darknet_body)
from nets.yolo_training import yolo_loss
attention = [se_block, cbam_block, eca_block]
#---------------------------------------------------#
# 特征层->最后的输出
#---------------------------------------------------#
def yolo_body(input_shape, anchors_mask, num_classes, phi = 0):
inputs = Input(input_shape)
#---------------------------------------------------#
# 生成CSPdarknet53_tiny的主干模型
# feat1的shape为26,26,256
# feat2的shape为13,13,512
#---------------------------------------------------#
feat1, feat2 = darknet_body(inputs)
if phi >= 1 and phi <= 3:
feat1 = attention[phi - 1](feat1, name='feat1')
feat2 = attention[phi - 1](feat2, name='feat2')
# 13,13,512 -> 13,13,256
P5 = DarknetConv2D_BN_Leaky(256, (1,1))(feat2)
# 13,13,256 -> 13,13,512 -> 13,13,255
P5_output = DarknetConv2D_BN_Leaky(512, (3,3))(P5)
P5_output = DarknetConv2D(len(anchors_mask[0]) * (num_classes+5), (1,1))(P5_output)
# 13,13,256 -> 13,13,128 -> 26,26,128
P5_upsample = compose(DarknetConv2D_BN_Leaky(128, (1,1)), UpSampling2D(2))(P5)
if phi >= 1 and phi <= 3:
P5_upsample = attention[phi - 1](P5_upsample, name='P5_upsample')
# 26,26,256 + 26,26,128 -> 26,26,384
P4 = Concatenate()([P5_upsample, feat1])
# 26,26,384 -> 26,26,256 -> 26,26,255
P4_output = DarknetConv2D_BN_Leaky(256, (3,3))(P4)
P4_output = DarknetConv2D(len(anchors_mask[1]) * (num_classes+5), (1,1))(P4_output)
return Model(inputs, [P5_output, P4_output])
def get_train_model(model_body, input_shape, num_classes, anchors, anchors_mask, label_smoothing):
y_true = [Input(shape = (input_shape[0] // {0:32, 1:16, 2:8}[l], input_shape[1] // {0:32, 1:16, 2:8}[l], \
len(anchors_mask[l]), num_classes + 5)) for l in range(len(anchors_mask))]
model_loss = Lambda(
yolo_loss,
output_shape = (1, ),
name = 'yolo_loss',
arguments = {'input_shape' : input_shape, 'anchors' : anchors, 'anchors_mask' : anchors_mask,
'num_classes' : num_classes, 'label_smoothing' : label_smoothing}
)([*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
| StarcoderdataPython |
9688121 | from typing import Any, List, Optional
class Node:
"""Encapsulate the tree of a Python (or JSON) data structure."""
@staticmethod
def is_scalar(value):
"""Return True iff 'value' should be represented by a leaf node."""
return not isinstance(value, (dict, list, tuple, set))
@classmethod
def build(cls, obj, name="", parent=None, **kwargs):
if cls.is_scalar(obj):
return cls(name, type(obj), obj, parent=parent, **kwargs)
else:
children = []
ret = cls(
name,
type(obj),
obj,
parent=parent,
_children=children,
**kwargs,
)
if isinstance(obj, dict):
children.extend(
cls.build(v, f"{name}.{k}", parent=ret, key=k)
for k, v in obj.items()
)
else:
children.extend(
cls.build(v, f"{name}[{i}]", parent=ret)
for i, v in enumerate(obj)
)
return ret
def __init__(self, name, kind, value, **kwargs):
self.name: str = name
self.kind: type = kind
self.value: Any = value
self.parent: "Optional[Node]" = None
self._children: "Optional[List[Node]]" = None
self.__dict__.update(kwargs)
def __str__(self):
num_children = "*" if self.is_leaf else len(self._children)
return f"{self.name}/{self.kind.__name__}/{num_children}"
def __repr__(self):
args = [f"{k}={v!r}" for k, v in self.__dict__.items()]
return f"{self.__class__.__name__}({', '.join(args)})"
def __eq__(self, other):
assert isinstance(other, Node), repr(other)
result = self.name == other.name and self.value == other.value
if result:
assert self.kind is other.kind, f"{self} != {other}"
return result
@property
def is_leaf(self):
"""Return True iff this is a leaf node (i.e. cannot have any children).
This is different from an empty container, i.e. an "internal" node
whose list of children is empty."""
return self._children is None
@property
def children(self):
"""Return this node's children.
Return an empty list for leaf nodes, as a convenience for callers that
typically iterated over this methods return value."""
return [] if self._children is None else self._children
@property
def is_child(self):
return self.parent is not None
@property
def is_first_child(self):
return self.is_child and self is self.parent.children[0]
@property
def is_last_child(self):
return self.is_child and self is self.parent.children[-1]
@property
def level(self):
return 0 if self.parent is None else (self.parent.level + 1)
@property
def has_key(self):
return hasattr(self, "key")
def ancestors(self, include_self=False):
"""Yield transitive parents of this node."""
if include_self:
yield self
if self.parent is not None:
yield from self.parent.ancestors(include_self=True)
def yield_node(node):
yield node
def dfwalk(self, preorder=yield_node, postorder=None):
"""Depth-first walk, yields values yielded from visitor function."""
if preorder is not None:
yield from preorder(self)
for child in self.children:
yield from child.dfwalk(preorder, postorder)
if postorder is not None:
yield from postorder(self)
| StarcoderdataPython |
5044518 | import tkinter as tk
import time
BASE_BACKGROUND = '#292929'
HEADER_BASE_COLOR = '#F5F5F5'
HEADER_BASE_COLOR = '#F5F5F5'
HEADER_COLOR_PRIMARY = '#1c1c1c'
HEADER_COLOR_SECONDARY = '#8ec63e'
def Header_Menu_Animation(app):
Header_Frame = tk.Frame(app, background = HEADER_BASE_COLOR)
Header_Frame.place(relx=0.0 , rely= 0.0 , relheight= 0.2 ,relwidth= 1 )
n = 0.0
while True:
n += 0.08
time.sleep(0.04)
Header_Frame.place(relx=0.0 , rely= 0.0 , relheight= n ,relwidth= 1 )
Header_Frame.update()
if n >= 1.0:
return Header_Frame
break | StarcoderdataPython |
9732164 | import torch.nn.functional
import typing as _typing
import dgl
from dgl.nn.pytorch.conv import SAGEConv
from .. import base_encoder, encoder_registry
from ... import _utils
class _SAGE(torch.nn.Module):
def __init__(
self, input_dimension: int,
dimensions: _typing.Sequence[int],
act: _typing.Optional[str],
dropout: _typing.Optional[float],
agg: str
):
super(_SAGE, self).__init__()
if agg not in ("gcn", "pool", "mean", "lstm"):
raise ValueError("Unsupported aggregator type")
self.__convolution_layers: torch.nn.ModuleList = torch.nn.ModuleList()
for layer, _dimension in enumerate(dimensions):
self.__convolution_layers.append(
SAGEConv(
input_dimension if layer == 0 else dimensions[layer - 1],
_dimension, agg
)
)
self._act: _typing.Optional[str] = act
self._dropout: _typing.Optional[float] = dropout
def forward(self, graph: dgl.DGLGraph, *args, **kwargs):
x: torch.Tensor = graph.ndata['feat']
x = torch.nn.functional.dropout(x, self._dropout, self.training)
results: _typing.MutableSequence[torch.Tensor] = [x]
for _layer in range(len(self.__convolution_layers)):
x = self.__convolution_layers[_layer](graph, x)
if _layer < len(self.__convolution_layers) - 1:
x = _utils.activation.activation_func(x, self._act)
x = torch.nn.functional.dropout(x, self._dropout, self.training)
results.append(x)
return results
@encoder_registry.EncoderUniversalRegistry.register_encoder('sage')
@encoder_registry.EncoderUniversalRegistry.register_encoder('sage_encoder')
class SAGEEncoderMaintainer(base_encoder.AutoHomogeneousEncoderMaintainer):
def __init__(
self,
input_dimension: _typing.Optional[int] = ...,
final_dimension: _typing.Optional[int] = ...,
device: _typing.Union[torch.device, str, int, None] = ...,
*args, **kwargs
):
super(SAGEEncoderMaintainer, self).__init__(
input_dimension, final_dimension, device, *args, **kwargs
)
self.hyper_parameter_space = [
{
"parameterName": "num_layers",
"type": "DISCRETE",
"feasiblePoints": "2,3,4",
},
{
"parameterName": "hidden",
"type": "NUMERICAL_LIST",
"numericalType": "INTEGER",
"length": 3,
"minValue": [8, 8, 8],
"maxValue": [128, 128, 128],
"scalingType": "LOG",
"cutPara": ("num_layers",),
"cutFunc": lambda x: x[0] - 1,
},
{
"parameterName": "dropout",
"type": "DOUBLE",
"maxValue": 0.8,
"minValue": 0.2,
"scalingType": "LINEAR",
},
{
"parameterName": "act",
"type": "CATEGORICAL",
"feasiblePoints": ["leaky_relu", "relu", "elu", "tanh"],
},
{
"parameterName": "agg",
"type": "CATEGORICAL",
"feasiblePoints": ["mean", "add", "max"],
},
]
self.hyper_parameters = {
"num_layers": 3,
"hidden": [64, 32],
"dropout": 0.5,
"act": "relu",
"agg": "mean",
}
def _initialize(self) -> _typing.Optional[bool]:
dimensions = list(self.hyper_parameters["hidden"])
if (
self.final_dimension not in (Ellipsis, None) and
isinstance(self.final_dimension, int) and
self.final_dimension > 0
):
dimensions.append(self.final_dimension)
self._encoder = _SAGE(
self.input_dimension, dimensions,
self.hyper_parameters["act"],
self.hyper_parameters["dropout"],
self.hyper_parameters["agg"]
).to(self.device)
return True
| StarcoderdataPython |
3267807 | #Desenvolva um programa que leia o primeiro termo e a razão de uma PA.
# No final, mostre os 10 primeiros termos dessa progressão.
cores = {'limpa':'\033[m',
'bverde':'\033[1;32m',
'roxo':'\033[35m',
'bvermelho': '\033[1;31m',
'pretoebranco':'\033[7:30m'}
print('-=-'*8)
print(cores['pretoebranco']+'_____INICIO_____'+cores['limpa'])
print('-=-'*8)
print('{:=^40}'.format(' PROGRESSAO ARITIMETICA ')) # centraliza(^) o texto no meio do caracteres de '='
pri = int(input('Primeiro termo: '))
raz = int(input('Razão: '))
dec = pri + (10 - 1) * raz
for c in range(pri,dec,raz):
print('{}'.format(c), end=' -> ')
print('ACABOU')
print('-=-'*8)
print(cores['pretoebranco']+'______FIM_______'+cores['limpa'])
print(cores['pretoebranco']+'_Code by Rafael_'+cores['limpa'])
print('-=-'*8) | StarcoderdataPython |
1613539 | import requests
from utility import *
from sql.database import DBOperations
from geolocation.iplocation import IPLocation
import json
from flask_caching import Cache
class Report:
"""
This is the class where all weather report is created for a user request.
Attributes:
request_ip: IP address the client is using to access the api.
city: city dervied from the ip geolocation api
state: state dervied from the ip geolocation api
"""
def __init__(self, request_ip, city, state):
"""
Constructor for the Report class. Initializes the attributes specified for the class.
Parameters:
request_ip: IP address the client is using to access the api.
city: city dervied from the ip geolocation api
state: state dervied from the ip geolocation api
"""
self.request_ip = request_ip
self.city = city
self.state = state
"""
For processing third party api data and creating a response object
"""
def create_response_from_api(self, data, location_id):
response = {}
json_data = json.loads(data)
response['city'] = json_data['today']['city']
response['state'] = json_data['today']['state']
response['day0'] = {}
response['day0']['description'] = json_data['today']['description']
response['day0']['high_temp'] = float(json_data['today']['highTemperature'])
response['day0']['low_temp'] = float(json_data['today']['lowTemperature'])
response['day0']['humidity'] = int(json_data['today']['humidity'])
counter = 1
while(counter <= 3):
response['day'+str(counter)] = {}
response['day'+str(counter)]['description'] = json_data['daily'][counter]['description']
response['day'+str(counter)]['high_temp'] = float(json_data['daily'][counter]['highTemperature'])
response['day'+str(counter)]['low_temp'] = float(json_data['daily'][counter]['lowTemperature'])
response['day'+str(counter)]['humidity'] = int(json_data['daily'][counter]['humidity'])
counter = counter + 1
success = self.insert_or_update_db(response, location_id)
if(success):
return response
else:
return None
"""
For inserting new location into the database or updating the same depending on
time period. If time of new call exceeds 24 hrs, data needs to be updated
"""
def insert_or_update_db(self, response, location_id):
self.db = DBOperations()
insert_or_update_db_flag = self.db.insert_or_update_db(response, location_id)
return insert_or_update_db_flag
"""
For checking if a location exists in the database. Also checks if data for
that location is expired
"""
def check_location(self, city, state):
self.db = DBOperations()
check_location_flag, location_id, exception_flag = self.db.check_location(city, state)
return check_location_flag, location_id, exception_flag
"""
For weather data from database for a location
"""
def get_weather_data_from_db(self, location_id):
self.db = DBOperations()
location_data, exception_flag = self.db.get_location_data(location_id)
if(not exception_flag):
return location_data
else:
return None
"""
For creating a response object based on data from database
"""
def create_response_from_db(self, db_data):
response = {}
area_flag = True
for row in db_data:
if(area_flag == True):
response['city'] = row['city']
response['state'] = row['state']
area_flag = False
response['day'+str(row['day'])] = {}
response['day'+str(row['day'])]['description'] = row['description']
response['day'+str(row['day'])]['high_temp'] = float(round(row['high_temp'], 2))
response['day'+str(row['day'])]['low_temp'] = float(round(row['low_temp'], 2))
response['day'+str(row['day'])]['humidity'] = int(row['humidity'])
return response
"""
For inserting into logs
"""
def log_insert(self, request_ip, city, state):
self.db = DBOperations()
log_insert_check, log_exception_flag =self.db.insert_into_logs(request_ip, city, state)
if(not log_exception_flag):
if(not log_insert_check):
return "Unable to insert log request"
else:
return "Inserted"
else:
return "DB connection issue. Unable to insert log request"
"""
For starting the report process. It goes from checking if location exists to
fetching data from 3rd party api.
"""
def process(self):
# Check if location exists in DB (based on timestamp)
check_location_flag, location_id, exception_flag = self.check_location(self.city, self.state)
weather_report = None
if(not exception_flag):
if(check_location_flag):
# Location exists
location_data = self.get_weather_data_from_db(location_id)
if(location_data):
weather_report = self.create_response_from_db(location_data)
else:
weather_report = {"error": "Unable to find this location"}
else:
# Location does not exist or needs to be updated
response = requests.get(url = API_ENDPOINT, headers = {"x-forwarded-for": self.request_ip})
if(response):
weather_report = self.create_response_from_api(response.text, location_id)
if(weather_report is None):
weather_report = {"error": "Unable to update or insert new data"}
else:
weather_report = {"error": "Unable to fetch weather data from API"}
else:
weather_report = {"error": "DB Connection issue. Try again later"}
log_insert_message = self.log_insert(self.request_ip, self.city, self.state)
if(log_insert_message != "Inserted"):
weather_report = {"error": log_insert_message}
return weather_report | StarcoderdataPython |
1892054 | """
This test will initialize the display using displayio
and draw a solid red background
"""
import board
import displayio
from adafruit_st7735r import ST7735R
spi = board.SPI()
tft_cs = board.D5
tft_dc = board.D6
displayio.release_displays()
display_bus = displayio.FourWire(spi, command=tft_dc, chip_select=tft_cs, reset=board.D9)
display = ST7735R(display_bus, width=128, height=160, bgr=True)
# Make the display context
splash = displayio.Group(max_size=10)
display.show(splash)
color_bitmap = displayio.Bitmap(128, 160, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0xFF0000
bg_sprite = displayio.TileGrid(color_bitmap,
pixel_shader=color_palette,
x=0, y=0)
splash.append(bg_sprite)
while True:
pass
| StarcoderdataPython |
12826307 | <reponame>sethmccauley/cohort4
def divide(divident, divisor):
return divident / divisor | StarcoderdataPython |
4952454 | import numpy as np
a = np.array([[1,1],[1.5,4.0]])
b = np.array([2200,5050])
x = np.linalg.inv(a).dot(b) #analog way to solve a equation
print(x)
p = np.linalg.solve(a,b)
print(p) | StarcoderdataPython |
1677384 | <filename>tensorflow/python/autograph/pyct/cfg.py<gh_stars>1-10
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow graph (CFG) structure for Python AST representation.
The CFG is a digraph with edges representing valid control flow. Each
node is associated with exactly one AST node, but not all AST nodes may have
a corresponding CFG counterpart.
Once built, the CFG itself is immutable, but the values it holds need not be;
they are usually annotated with information extracted by walking the graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
from enum import Enum
# pylint:disable=g-bad-import-order
import gast
# pylint:enable=g-bad-import-order
from tensorflow.python.autograph.pyct import compiler
class Node(object):
"""A node in the CFG.
Although new instances of this class are mutable, the objects that a user
finds in the CFG are typically not.
The nodes represent edges in the CFG graph, and maintain pointers to allow
efficient walking in both forward and reverse order. The following property
holds for all nodes: "child in node.next" iff "node in child.prev".
Attributes:
next: FrozenSet[Node, ...], the nodes that follow this node, in control
flow order
prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse
control flow order
ast_node: ast.AST, the AST node corresponding to this CFG node
"""
def __init__(self, next_, prev, ast_node):
self.next = next_
self.prev = prev
self.ast_node = ast_node
def freeze(self):
self.next = frozenset(self.next)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
self.prev = weakref.WeakSet(self.prev)
def __repr__(self):
if isinstance(self.ast_node, gast.FunctionDef):
return 'def %s' % self.ast_node.name
elif isinstance(self.ast_node, gast.withitem):
return compiler.ast_to_source(self.ast_node.context_expr).strip()
return compiler.ast_to_source(self.ast_node).strip()
class Graph(
collections.namedtuple(
'Graph',
['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):
"""A Control Flow Graph.
The CFG maintains an index to allow looking up a CFG node by the AST node to
which it is associated. The index can also be enumerated in top-down, depth
first order.
Walking the graph in forward or reverse order is supported by double
parent-child links.
Note: the error nodes are not wired to their corresponding finally guards,
because these are shared, and wiring them would create a reverse path from
normal control flow into the error nodes, which we want to avoid.
The graph also maintains edges corresponding to higher level statements
like for-else loops. A node is considered successor of a statement if there
is an edge from a node that is lexically a child of that statement to a node
that is not. Statement predecessors are analogously defined.
Attributes:
entry: Node, the entry node
exit: FrozenSet[Node, ...], the exit nodes
error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised
error (errors propagated from function calls are not accounted)
index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG
node
stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
nodes to their predecessor CFG nodes
stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
nodes to their successor CFG nodes
"""
def __repr__(self):
result = 'digraph CFG {\n'
for node in self.index.values():
result += ' %s [label="%s"];\n' % (id(node), node)
for node in self.index.values():
for next_ in node.next:
result += ' %s -> %s;\n' % (id(node), id(next_))
result += '}'
return result
class _WalkMode(Enum):
FORWARD = 1
REVERSE = 2
# TODO(mdan): Rename to DataFlowAnalyzer.
# TODO(mdan): Consider specializations that use gen/kill/transfer abstractions.
class GraphVisitor(object):
"""Base class for a CFG visitors.
This implementation is not thread safe.
The visitor has some facilities to simplify dataflow analyses. In particular,
it allows revisiting the nodes at the decision of the subclass. This can be
used to visit the graph until the state reaches a fixed point.
For more details on dataflow analysis, see
https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
Note: the literature generally suggests visiting successor nodes only when the
state of the current node changed, regardless of whether that successor has
ever been visited. This implementation visits every successor at least once.
Attributes:
graph: Graph
in_: Dict[Node, Any], stores node-keyed state during a visit
out: Dict[Node, Any], stores node-keyed state during a visit
"""
def __init__(self, graph):
self.graph = graph
self.reset()
def init_state(self, node):
"""State initialization function. Optional to overload.
An in/out state slot will be created for each node in the graph. Subclasses
must overload this to control what that is initialized to.
Args:
node: Node
"""
raise NotImplementedError('Subclasses must implement this.')
# TODO(mdan): Rename to flow?
def visit_node(self, node):
"""Visitor function.
Args:
node: Node
Returns:
bool, whether the node should be revisited; subclasses can visit every
reachable node exactly once by always returning False
"""
raise NotImplementedError('Subclasses must implement this.')
def reset(self):
self.in_ = {
node: self.init_state(node) for node in self.graph.index.values()
}
self.out = {
node: self.init_state(node) for node in self.graph.index.values()
}
def _visit_internal(self, mode):
"""Visits the CFG, depth-first."""
assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)
if mode == _WalkMode.FORWARD:
open_ = [self.graph.entry]
elif mode == _WalkMode.REVERSE:
open_ = list(self.graph.exit)
closed = set()
while open_:
node = open_.pop(0)
closed.add(node)
should_revisit = self.visit_node(node)
if mode == _WalkMode.FORWARD:
children = node.next
elif mode == _WalkMode.REVERSE:
children = node.prev
for next_ in children:
if should_revisit or next_ not in closed:
open_.append(next_)
def visit_forward(self):
self._visit_internal(_WalkMode.FORWARD)
def visit_reverse(self):
self._visit_internal(_WalkMode.REVERSE)
class GraphBuilder(object):
"""Builder that constructs a CFG from a given AST.
This GraphBuilder facilitates constructing the DAG that forms the CFG when
nodes
are supplied in lexical order (i.e., top-down, depth first). Under these
conditions, it supports building patterns found in typical structured
programs.
This builder ignores the flow generated by exceptions, which are assumed to
always be catastrophic and present purely for diagnostic purposes (e.g. to
print debug information). Statements like raise and try/catch sections are
allowed and will generate control flow edges, but ordinaty statements are
assumed not to raise exceptions.
Finally sections are also correctly interleaved between break/continue/return
nodes and their subsequent statements.
Important concepts:
* nodes - nodes refer refer to CFG nodes; AST nodes are qualified explicitly
* leaf set - since the graph is constructed gradually, a leaf set maintains
the CFG nodes that will precede the node that the builder expects to
receive next; when an ordinary node is added, it is connected to the
existing leaves and it in turn becomes the new leaf
* jump nodes - nodes that should generate edges other than what
ordinary nodes would; these correspond to break, continue and return
statements
* sections - logical delimiters for subgraphs that require special
edges; there are various types of nodes, each admitting various
types of jump nodes; sections are identified by their corresponding AST
node
"""
# TODO(mdan): Perhaps detail this in a markdown doc.
# TODO(mdan): Add exception support.
def __init__(self, parent_ast_node):
self.reset()
self.parent = parent_ast_node
def reset(self):
"""Resets the state of this factory."""
self.head = None
self.errors = set()
self.node_index = {}
# TODO(mdan): Too many primitives. Use classes.
self.leaves = set()
# Note: This mechanism requires that nodes are added in lexical order (top
# to bottom, depth first).
self.active_stmts = set()
self.owners = {} # type: Set[any]
self.forward_edges = set() # type: Tuple[Node, Node] # (from, to)
self.finally_sections = {}
# Dict values represent (entry, exits)
self.finally_section_subgraphs = {
} # type: Dict[ast.AST, Tuple[Node, Set[Node]]]
# Whether the guard section can be reached from the statement that precedes
# it.
self.finally_section_has_direct_flow = {}
# Finally sections that await their first node.
self.pending_finally_sections = set()
# Exit jumps keyed by the section they affect.
self.exits = {}
# The entry of loop sections, keyed by the section.
self.section_entry = {}
# Continue jumps keyed by the section they affect.
self.continues = {}
# The entry of conditional sections, keyed by the section.
self.cond_entry = {}
# Lists of leaf nodes corresponding to each branch in the section.
self.cond_leaves = {}
def _connect_nodes(self, first, second):
"""Connects nodes to signify that control flows from first to second.
Args:
first: Union[Set[Node, ...], Node]
second: Node
"""
if isinstance(first, Node):
first.next.add(second)
second.prev.add(first)
self.forward_edges.add((first, second))
else:
for node in first:
self._connect_nodes(node, second)
def _add_new_node(self, ast_node):
"""Grows the graph by adding a CFG node following the current leaves."""
if ast_node is self.node_index:
raise ValueError('%s added twice' % ast_node)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
node = Node(next_=set(), prev=weakref.WeakSet(), ast_node=ast_node)
self.node_index[ast_node] = node
self.owners[node] = frozenset(self.active_stmts)
if self.head is None:
self.head = node
for leaf in self.leaves:
self._connect_nodes(leaf, node)
# If any finally section awaits its first node, populate it.
for section_id in self.pending_finally_sections:
self.finally_section_subgraphs[section_id][0] = node
self.pending_finally_sections = set()
return node
def begin_statement(self, stmt):
"""Marks the beginning of a statement.
Args:
stmt: Hashable, a key by which the statement can be identified in
the CFG's stmt_prev and stmt_next attributes
"""
self.active_stmts.add(stmt)
def end_statement(self, stmt):
"""Marks the end of a statement.
Args:
stmt: Hashable, a key by which the statement can be identified in
the CFG's stmt_prev and stmt_next attributes; must match a key
previously passed to begin_statement.
"""
self.active_stmts.remove(stmt)
def add_ordinary_node(self, ast_node):
"""Grows the graph by adding an ordinary CFG node.
Ordinary nodes are followed by the next node, in lexical order, that is,
they become the new leaf set.
Args:
ast_node: ast.AST
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set((node,))
return node
def _add_jump_node(self, ast_node, guards):
"""Grows the graph by adding a jump node.
Jump nodes are added to the current leaf set, and the leaf set becomes
empty. If the jump node is the last in a cond section, then it may be added
back to the leaf set by a separate mechanism.
Args:
ast_node: ast.AST
guards: Tuple[ast.AST, ...], the finally sections active for this node
Returns:
Node
"""
node = self._add_new_node(ast_node)
self.leaves = set()
# The guards themselves may not yet be complete, and will be wired later.
self.finally_sections[node] = guards
return node
def _connect_jump_to_finally_sections(self, node):
"""Connects a jump node to the finally sections protecting it."""
cursor = set((node,))
for guard_section_id in self.finally_sections[node]:
guard_begin, guard_ends = self.finally_section_subgraphs[guard_section_id]
self._connect_nodes(cursor, guard_begin)
cursor = guard_ends
del self.finally_sections[node]
# TODO(mdan): Should garbage-collect finally_section_subgraphs.
return cursor
def add_exit_node(self, ast_node, section_id, guards):
"""Grows the graph by adding an exit node.
This node becomes an exit for the current section.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered
to be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.exits[section_id].add(node)
def add_continue_node(self, ast_node, section_id, guards):
"""Grows the graph by adding a reentry node.
This node causes control flow to go back to the loop section's entry.
Args:
ast_node: ast.AST
section_id: Hashable, the node for which ast_node should be considered
to be an exit node
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.continues[section_id].add(node)
def add_error_node(self, ast_node, guards):
"""Grows the graph by adding an error node.
This node becomes an exit for the entire graph.
Args:
ast_node: ast.AST
guards: Tuple[ast.AST, ...], the finally sections that guard ast_node
"""
node = self._add_jump_node(ast_node, guards)
self.errors.add(node)
self.leaves = set()
def enter_section(self, section_id):
"""Enters a regular section.
Regular sections admit exit jumps, which end the section.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_exit_node
"""
assert section_id not in self.exits
self.exits[section_id] = set()
def exit_section(self, section_id):
"""Exits a regular section."""
# Exits are jump nodes, which may be protected.
for exit_ in self.exits[section_id]:
self.leaves |= self._connect_jump_to_finally_sections(exit_)
del self.exits[section_id]
def enter_loop_section(self, section_id, entry_node):
"""Enters a loop section.
Loop sections define an entry node. The end of the section always flows back
to the entry node. These admit continue jump nodes which also flow to the
entry node.
Args:
section_id: Hashable, the same node that will be used in calls to the
ast_node arg passed to add_continue_node
entry_node: ast.AST, the entry node into the loop (e.g. the test node
for while loops)
"""
assert section_id not in self.section_entry
assert section_id not in self.continues
self.continues[section_id] = set()
node = self.add_ordinary_node(entry_node)
self.section_entry[section_id] = node
def exit_loop_section(self, section_id):
"""Exits a loop section."""
self._connect_nodes(self.leaves, self.section_entry[section_id])
# continues are jump nodes, which may be protected.
for reentry in self.continues[section_id]:
guard_ends = self._connect_jump_to_finally_sections(reentry)
self._connect_nodes(guard_ends, self.section_entry[section_id])
# Loop nodes always loop back.
self.leaves = set((self.section_entry[section_id],))
del self.continues[section_id]
del self.section_entry[section_id]
def enter_cond_section(self, section_id):
"""Enters a conditional section.
Conditional sections define an entry node, and one or more branches.
Args:
section_id: Hashable, the same node that will be used in calls to the
section_id arg passed to new_cond_branch
"""
assert section_id not in self.cond_entry
assert section_id not in self.cond_leaves
self.cond_leaves[section_id] = []
def new_cond_branch(self, section_id):
"""Begins a new branch in a cond section."""
assert section_id in self.cond_leaves
if section_id in self.cond_entry:
# Subsequent splits move back to the split point, and memorize the
# current leaves.
self.cond_leaves[section_id].append(self.leaves)
self.leaves = self.cond_entry[section_id]
else:
# If this is the first time we split a section, just remember the split
# point.
self.cond_entry[section_id] = self.leaves
def exit_cond_section(self, section_id):
"""Exits a conditional section."""
for split in self.cond_leaves[section_id]:
self.leaves |= split
del self.cond_entry[section_id]
del self.cond_leaves[section_id]
def enter_finally_section(self, section_id):
"""Enters a finally section."""
# TODO(mdan): This, not the caller, should track the active sections.
self.finally_section_subgraphs[section_id] = [None, None]
if self.leaves:
self.finally_section_has_direct_flow[section_id] = True
else:
self.finally_section_has_direct_flow[section_id] = False
self.pending_finally_sections.add(section_id)
def exit_finally_section(self, section_id):
"""Exits a finally section."""
assert section_id not in self.pending_finally_sections, 'Empty finally?'
self.finally_section_subgraphs[section_id][1] = self.leaves
# If the guard can only be reached by a jump, then it will not flow
# into the statement that follows it.
if not self.finally_section_has_direct_flow[section_id]:
self.leaves = set()
del self.finally_section_has_direct_flow[section_id]
def build(self):
"""Returns the CFG accumulated so far and resets the builder.
Returns:
Graph
"""
# Freeze the nodes.
for node in self.node_index.values():
node.freeze()
# Build the statement edges.
stmt_next = {}
stmt_prev = {}
for node, _ in self.forward_edges:
for stmt in self.owners[node]:
if stmt not in stmt_next:
stmt_next[stmt] = set()
if stmt not in stmt_prev:
stmt_prev[stmt] = set()
for first, second in self.forward_edges:
stmts_exited = self.owners[first] - self.owners[second]
for stmt in stmts_exited:
stmt_next[stmt].add(second)
stmts_entered = self.owners[second] - self.owners[first]
for stmt in stmts_entered:
stmt_prev[stmt].add(first)
for stmt in stmt_next:
stmt_next[stmt] = frozenset(stmt_next[stmt])
for stmt in stmt_prev:
stmt_prev[stmt] = frozenset(stmt_prev[stmt])
# Construct the final graph object.
result = Graph(
entry=self.head,
exit=self.leaves,
error=self.errors,
index=self.node_index,
stmt_prev=stmt_prev,
stmt_next=stmt_next)
# Reset the state.
self.reset()
return result
class AstToCfg(gast.NodeVisitor):
"""Converts an AST to CFGs.
A separate CFG will be constructed for each function.
"""
def __init__(self):
super(AstToCfg, self).__init__()
self.builder_stack = []
self.builder = None
self.cfgs = {}
self.lexical_scopes = []
def _enter_lexical_scope(self, node):
self.lexical_scopes.append(node)
def _exit_lexical_scope(self, node):
leaving_node = self.lexical_scopes.pop()
assert node == leaving_node
def _get_enclosing_scopes(self, include, stop_at):
included = []
for node in reversed(self.lexical_scopes):
if isinstance(node, include):
included.append(node)
if isinstance(node, stop_at):
return node, included
return None, included
def _process_basic_statement(self, node):
self.generic_visit(node)
self.builder.add_ordinary_node(node)
def _process_exit_statement(self, node, *exits_nodes_of_type):
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_scopes(
include=(gast.Try,),
stop_at=tuple(exits_nodes_of_type),
)
if try_node is None:
raise ValueError(
'%s that is not enclosed by any of %s' % (node, exits_nodes_of_type))
self.builder.add_exit_node(node, try_node, guards)
def _process_continue_statement(self, node, *loops_to_nodes_of_type):
# Note: this is safe because we process functions separately.
try_node, guards = self._get_enclosing_scopes(
include=(gast.Try,),
stop_at=tuple(loops_to_nodes_of_type),
)
if try_node is None:
raise ValueError('%s that is not enclosed by any of %s' %
(node, loops_to_nodes_of_type))
self.builder.add_continue_node(node, try_node, guards)
def visit_FunctionDef(self, node):
# We also keep the FunctionDef node in the CFG. This allows us to determine
# things like reaching definitions via closure. Note that the function body
# will be stored in a separate graph, because function definitions are not
# the same as function calls.
if self.builder is not None:
self.builder.add_ordinary_node(node)
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self._process_basic_statement(node.args)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_section(node)
self._exit_lexical_scope(node)
self.cfgs[node] = self.builder.build()
self.builder = self.builder_stack.pop()
def visit_Lambda(self, node):
# TODO(mdan): Treat like FunctionDef? That would be a separate CFG.
raise NotImplementedError()
def visit_Return(self, node):
self._process_exit_statement(node, gast.FunctionDef)
def visit_Expr(self, node):
self._process_basic_statement(node)
def visit_Assign(self, node):
self._process_basic_statement(node)
def visit_AnnAssign(self, node):
self._process_basic_statement(node)
def visit_AugAssign(self, node):
self._process_basic_statement(node)
def visit_Print(self, node):
self._process_basic_statement(node)
def visit_Raise(self, node):
try_node, guards = self._get_enclosing_scopes(
include=(gast.Try,),
stop_at=(gast.FunctionDef,),
)
if try_node is None:
raise ValueError('%s that is not enclosed by any FunctionDef' % node)
self.builder.add_error_node(node, guards)
def visit_Assert(self, node):
# Ignoring the effect of exceptions.
self._process_basic_statement(node)
def visit_Delete(self, node):
self._process_basic_statement(node)
def visit_If(self, node):
# No need to track ifs as lexical scopes, for now.
# Lexical scopes are generally tracked in order to be able to resolve the
# targets of jump statements like break/continue/etc. Since there is no
# statement that can interrupt a conditional, we don't need to track their
# lexical scope. That may change in the future.
self.builder.begin_statement(node)
self.builder.enter_cond_section(node)
self._process_basic_statement(node.test)
self.builder.new_cond_branch(node)
for stmt in node.body:
self.visit(stmt)
self.builder.new_cond_branch(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_cond_section(node)
self.builder.end_statement(node)
def visit_While(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
self.builder.enter_loop_section(node, node.test)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# the statements inside it don't affect the loop itself. For example, a
# break in the loop's orelse will not affect the loop itself.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_For(self, node):
self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
# TODO(mdan): Strictly speaking, this should be node.target + node.iter.
# A blind dataflow analysis would have to process both node.target and
# node.iter to properly process read and write access.
self.builder.enter_loop_section(node, node.iter)
for stmt in node.body:
self.visit(stmt)
self.builder.exit_loop_section(node)
# Note: although the orelse is technically part of the loop node,
# they don't count as loop bodies. For example, a break in the loop's
# orelse will affect the parent loop, not the current one.
self._exit_lexical_scope(node)
for stmt in node.orelse:
self.visit(stmt)
self.builder.exit_section(node)
self.builder.end_statement(node)
def visit_Break(self, node):
self._process_exit_statement(node, gast.While, gast.For)
def visit_Continue(self, node):
self._process_continue_statement(node, gast.While, gast.For)
def visit_Try(self, node):
self._enter_lexical_scope(node)
for stmt in node.body:
self.visit(stmt)
# Unlike loops, the orelse is a simple continuation of the body.
for stmt in node.orelse:
self.visit(stmt)
if node.handlers:
# TODO(mdan): Should we still support bare try/except? Might be confusing.
raise NotImplementedError('exceptions are not yet supported')
self._exit_lexical_scope(node)
self.builder.enter_finally_section(node)
for stmt in node.finalbody:
self.visit(stmt)
self.builder.exit_finally_section(node)
def visit_With(self, node):
# TODO(mdan): Mark the context manager's exit call as exit guard.
for item in node.items:
self._process_basic_statement(item)
for stmt in node.body:
self.visit(stmt)
def build(node):
visitor = AstToCfg()
visitor.visit(node)
return visitor.cfgs
| StarcoderdataPython |
134378 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities that are useful for Mephisto-related scripts.
"""
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.abstractions.databases.local_singleton_database import MephistoSingletonDB
from mephisto.operations.utils import get_mock_requester, get_root_data_dir
from omegaconf import DictConfig, OmegaConf
import argparse
from typing import Tuple, Dict, Any, TYPE_CHECKING
import os
if TYPE_CHECKING:
from mephisto.abstractions.database import MephistoDB
def load_db_and_process_config(
cfg: DictConfig, print_config=False
) -> Tuple["MephistoDB", DictConfig]:
"""
Using a Hydra DictConfig built from a RunScriptConfig,
load the desired MephistoDB and
validate the config against the database contents, then
return the database and validated config.
Takes in an option to print out the configuration before returning
"""
db = get_db_from_config(cfg)
valid_config = augment_config_from_db(cfg, db)
if print_config:
print(OmegaConf.to_yaml(valid_config))
return db, valid_config
def get_db_from_config(cfg: DictConfig) -> "MephistoDB":
"""
Get a MephistoDB from the given configuration. As of now
this defaults to a LocalMephistoDB
"""
datapath = cfg.mephisto.get("datapath", None)
if datapath is None:
datapath = get_root_data_dir()
database_path = os.path.join(datapath, "database.db")
database_type = cfg.mephisto.database._database_type
if database_type == "local":
return LocalMephistoDB(database_path=database_path)
elif database_type == "singleton":
return MephistoSingletonDB(database_path=database_path)
else:
raise AssertionError(f"Provided database_type {database_type} is not valid")
def augment_config_from_db(script_cfg: DictConfig, db: "MephistoDB") -> DictConfig:
"""
Check the database for validity of the incoming MephistoConfig, ensure
that the config has all the necessary fields set.
"""
cfg = script_cfg.mephisto
requester_name = cfg.provider.get("requester_name", None)
provider_type = cfg.provider.get("_provider_type", None)
architect_type = cfg.architect.get("_architect_type", None)
if requester_name is None:
if provider_type is None:
print("No requester specified, defaulting to mock")
provider_type = "mock"
if provider_type == "mock":
req = get_mock_requester(db)
requester_name = req.requester_name
else:
reqs = db.find_requesters(provider_type=provider_type)
# TODO (#93) proper logging
if len(reqs) == 0:
print(
f"No requesters found for provider type {provider_type}, please "
f"register one. You can register with `mephisto register {provider_type}`, "
f"or `python mephisto/client/cli.py register {provider_type}` if you haven't "
"installed Mephisto using poetry."
)
exit(1)
elif len(reqs) == 1:
req = reqs[0]
requester_name = req.requester_name
print(
f"Found one `{provider_type}` requester to launch with: {requester_name}"
)
else:
req = reqs[-1]
requester_name = req.requester_name
print(
f"Found many `{provider_type}` requesters to launch with, "
f"choosing the most recent: {requester_name}"
)
else:
# Ensure provided requester exists
reqs = db.find_requesters(requester_name=requester_name)
if len(reqs) == 0:
print(
f"No requesters found under name {requester_name}, "
"have you registered with `mephisto register`?"
)
exit(1)
provider_type = reqs[0].provider_type
if provider_type in ["mturk"]:
input(
f"This task is going to launch live on {provider_type}, press enter to continue: "
)
if provider_type in ["mturk_sandbox", "mturk"] and architect_type not in [
"heroku",
"ec2",
]:
input(
f"This task is going to launch live on {provider_type}, but your "
f"provided architect is {architect_type}, are you sure you "
"want to do this? : "
)
cfg.provider.requester_name = requester_name
cfg.provider._provider_type = provider_type
return script_cfg
| StarcoderdataPython |
1908617 | class Types:
STRING = str
INTEGER = int
FLOAT = float
| StarcoderdataPython |
4911750 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A module that implements the Trainer class, the main class responsible for
the process of training a neural network.
"""
import sys
import logging
from time import time
import h5py
import numpy
import theano
from theanolm.backend import IncompatibleStateError
from theanolm.parsing import ShufflingBatchIterator, LinearBatchIterator
from theanolm.training.stoppers import create_stopper
class Trainer(object):
"""Training Process
Saves a history of validation costs and decreases learning rate when the
cost does not decrease anymore.
"""
def __init__(self, training_options, vocabulary, training_files, sampling):
"""Creates the optimizer and initializes the training process.
Creates empty member variables for the perplexities list and the
training state at the validation point. Training state is saved at only
one validation point at a time, so validation interval is at least the
the number of samples used per validation.
:type training_options: dict
:param training_options: a dictionary of training options
:type vocabulary: Vocabulary
:param vocabulary: vocabulary that provides mapping between words and
word IDs
:type training_files: list of file objects
:param training_files: list of files to be used as training data
:type sampling: list of floats
:param sampling: specifies a fraction for each training file, how much
to sample on each epoch
"""
self._vocabulary = vocabulary
print("Computing the number of mini-batches in training data.")
linear_iter = LinearBatchIterator(
training_files,
vocabulary,
batch_size=training_options['batch_size'],
max_sequence_length=training_options['sequence_length'],
map_oos_to_unk=True)
sys.stdout.flush()
self._updates_per_epoch = 0
for _, _, _ in linear_iter:
self._updates_per_epoch += 1
if self._updates_per_epoch < 1:
raise ValueError("Training data does not contain any sentences.")
logging.debug("One epoch of training data contains %d mini-batch "
"updates.",
self._updates_per_epoch)
self.class_prior_probs = vocabulary.get_class_probs()
logging.debug("Class unigram log probabilities are in the range [%f, "
"%f].",
numpy.log(self.class_prior_probs.min()),
numpy.log(self.class_prior_probs.max()))
self._training_iter = ShufflingBatchIterator(
training_files,
sampling,
vocabulary,
batch_size=training_options['batch_size'],
max_sequence_length=training_options['sequence_length'],
map_oos_to_unk=True)
self._stopper = create_stopper(training_options, self)
self._options = training_options
# iterator to cross-validation data, or None for no cross-validation
self._validation_iter = None
# a text scorer for performing cross-validation
self._scorer = None
# number of perplexity samples per validation
self._samples_per_validation = 7
# function for combining validation samples
self._statistic_function = lambda x: numpy.median(numpy.asarray(x))
# the stored validation samples
self._local_perplexities = []
# the state at the center of validation samples
self._validation_state = None
# number of mini-batch updates between log messages
self._log_update_interval = 0
# the network to be trained
self._network = None
# the optimization function
self._optimizer = None
# current candidate for the minimum validation cost state
self._candidate_state = None
# index to the cost history that corresponds to the current candidate
# state
self._candidate_index = None
# current training epoch
self.epoch_number = 0
# number of mini-batch updates performed in this epoch
self.update_number = 0
# total number of mini-batch updates performed (after restart)
self._total_updates = 0
# validation set cost history
self._cost_history = None
# function for averaging cross-validation measurements
self._statistics_function = None
# duration of the last mini-batch update
self._update_duration = None
def set_validation(self, validation_iter, scorer,
samples_per_validation=None, statistics_function=None):
"""Sets cross-validation iterator and parameters.
:type validation_iter: BatchIterator
:param validation_iter: an iterator for computing validation set
perplexity
:type scorer: TextScorer
:param scorer: a text scorer for computing validation set perplexity
:type samples_per_validation: int
:param samples_per_validation: number of perplexity samples to compute
per cross-validation
:type statistic_function: Python function
:param statistic_function: a function to be performed on a list of
consecutive perplexity measurements to compute the validation cost
(median by default)
"""
self._validation_iter = validation_iter
self._scorer = scorer
if samples_per_validation is not None:
self._samples_per_validation = samples_per_validation
if statistics_function is not None:
self._statistics_function = statistics_function
def set_logging(self, log_interval):
"""Sets logging parameters.
:type log_interval: int
:param log_interval: number of mini-batch updates between log messages
"""
self._log_update_interval = log_interval
def initialize(self, network, state, optimizer, load_and_train=False):
"""Sets the network and the HDF5 file that stores the network state,
optimizer, and validation scorer and iterator.
If the HDF5 file contains a network state, initializes the network with
that state.
:type network: Network
:param network: the network, which will be used to retrieve state when
saving
:type state: h5py.File
:param state: HDF5 file where initial training state will be possibly
read from, and candidate states will be saved to
:type optimizer: BasicOptimizer
:param optimizer: one of the optimizer implementations
"""
self._network = network
self._optimizer = optimizer
self._candidate_state = state
if 'trainer' in self._candidate_state and load_and_train:
print("Restoring initial network state from {} partially.".format(
self._candidate_state.filename))
sys.stdout.flush()
self._reset_partial_state()
self._candidate_index = None
self.epoch_number = 1
self.update_number = 0
self._cost_history = numpy.asarray([], dtype=theano.config.floatX)
elif 'trainer' in self._candidate_state:
print("Restoring initial network state from {}.".format(
self._candidate_state.filename))
sys.stdout.flush()
self._reset_state()
else:
self._candidate_index = None
self.epoch_number = 1
self.update_number = 0
self._cost_history = numpy.asarray([], dtype=theano.config.floatX)
self._total_updates = 0
def train(self):
"""Trains a neural network.
If cross-validation has been configured using ``set_validation()``,
computes the validation set perplexity as many times per epoch as
specified by the _validation_frequency_ option and saves the model when
the perplexity improves. Otherwise saves the model after each epoch.
"""
if (self._network is None) or (self._optimizer is None) or \
(self._candidate_state is None):
raise RuntimeError("Trainer has not been initialized before "
"calling train().")
start_time = time()
while self._stopper.start_new_epoch():
epoch_start_time = time()
for word_ids, file_ids, mask in self._training_iter:
self.update_number += 1
self._total_updates += 1
class_ids = self._vocabulary.word_id_to_class_id[word_ids]
update_start_time = time()
self._optimizer.update_minibatch(word_ids, class_ids, file_ids, mask)
self._update_duration = time() - update_start_time
if (self._log_update_interval >= 1) and \
(self._total_updates % self._log_update_interval == 0):
self._log_update()
self._validate()
if not self._stopper.start_new_minibatch():
break
if self._validation_iter is None:
self._set_candidate_state()
epoch_duration = time() - epoch_start_time
epoch_minutes = epoch_duration / 60
epoch_time_h, epoch_time_m = divmod(epoch_minutes, 60)
message = "Finished training epoch {} in {:.0f} hours {:.1f} minutes." \
.format(self.epoch_number, epoch_time_h, epoch_time_m)
best_cost = self.candidate_cost()
if best_cost is not None:
message += " Best validation perplexity {:.2f}.".format(
best_cost)
print(message)
self.epoch_number += 1
self.update_number = 0
duration = time() - start_time
minutes = duration / 60
time_h, time_m = divmod(minutes, 60)
print("Training finished in {:.0f} hours {:.1f} minutes." \
.format(time_h, time_m))
def get_state(self, state):
"""Pulls parameter values from Theano shared variables and updates a
HDF5 file with all the network and training state variables.
For consistency, all the parameter values are returned as numpy types,
since state read from a model file also contains numpy types. This also
ensures the cost history will be copied into the returned dictionary.
:type state: h5py.File
:param state: HDF5 file for storing the current state
"""
h5_trainer = state.require_group('trainer')
h5_trainer.attrs['epoch_number'] = self.epoch_number
h5_trainer.attrs['update_number'] = self.update_number
if 'cost_history' in h5_trainer:
h5_trainer['cost_history'].resize(self._cost_history.shape)
h5_trainer['cost_history'][:] = self._cost_history
else:
h5_trainer.create_dataset(
'cost_history', data=self._cost_history, maxshape=(None,),
chunks=(1000,))
if self._network is not None:
self._network.get_state(state)
self._training_iter.get_state(state)
if self._optimizer is not None:
self._optimizer.get_state(state)
def _reset_state(self):
"""Resets the values of Theano shared variables to the current candidate
state.
Sets candidate state index point to the last element in the loaded cost
history.
Requires that if ``state`` is set, it contains values for all the
training parameters.
:type state: h5py.File
:param state: if a HDF5 file is given, reads the the training parameters
from this file, and assumes this is the state of minimum
cost found so far
"""
self._network.set_state(self._candidate_state)
if 'trainer' not in self._candidate_state:
raise IncompatibleStateError("Training state is missing.")
h5_trainer = self._candidate_state['trainer']
if 'epoch_number' not in h5_trainer.attrs:
raise IncompatibleStateError("Current epoch number is missing from "
"training state.")
self.epoch_number = int(h5_trainer.attrs['epoch_number'])
if 'update_number' not in h5_trainer.attrs:
raise IncompatibleStateError("Current update number is missing "
"from training state.")
self.update_number = int(h5_trainer.attrs['update_number'])
logging.info("[%d] (%.2f %%) of epoch %d",
self.update_number,
self.update_number / self._updates_per_epoch * 100,
self.epoch_number)
if 'cost_history' in h5_trainer:
self._cost_history = h5_trainer['cost_history'].value
if self._cost_history.size == 0:
print("Validation set cost history is empty in the training state.")
self._candidate_index = None
else:
self._candidate_index = self._cost_history.size - 1
self._log_validation()
else:
print("Warning: Validation set cost history is missing from "
"training state. Initializing to empty cost history.")
self._cost_history = numpy.asarray([], dtype=theano.config.floatX)
self._candidate_index = None
self._training_iter.set_state(self._candidate_state)
self._optimizer.set_state(self._candidate_state)
def _reset_partial_state(self):
"""Resets the values of some of the Theano shared variables to the current candidate
state.
Requires that if ``state`` is set, it contains values for these values the
training parameters.
:type state: h5py.File
:param state: if a HDF5 file is given, reads the the training parameters
from this file, and assumes this is the state of minimum
cost found so far
"""
self._network.set_state(self._candidate_state)
if 'trainer' not in self._candidate_state:
raise IncompatibleStateError("Training state is missing.")
h5_trainer = self._candidate_state['trainer']
#self._optimizer.set_state(self._candidate_state)
def num_validations(self):
"""Returns the number of validations performed.
:rtype: int
:returns: size of cost history
"""
return self._cost_history.size
def validations_since_candidate(self):
"""Returns the number of times the validation set cost has been computed
since the current candidate for optimal state was obtained.
:rtype: int
:returns: number of validations since the current candidate state (0
means the current candidate is the last validation)
"""
if self._cost_history.size == 0:
raise RuntimeError("BasicTrainer.validations_since_candidate() "
"called with empty cost history.")
return self._cost_history.size - 1 - self._candidate_index
def candidate_cost(self):
"""Returns the validation set cost given by the current candidate for
the minimum cost state.
:rtype: float
:returns: current candidate state cost, or None if the candidate state
has not been set yet
"""
if self._candidate_index is None:
return None
return self._cost_history[self._candidate_index]
def _decrease_learning_rate(self):
"""Called when the validation set cost stops decreasing.
"""
# Current learning rate might be smaller than the one stored in the
# state, so set the new value after restoring optimizer to the old
# state.
old_value = self._optimizer.learning_rate
new_value = old_value / 2
self._reset_state()
self._stopper.improvement_ceased()
self._optimizer.learning_rate = new_value
print("Model performance stopped improving. Decreasing learning rate "
"from {} to {} and resetting state to {:.0f} % of epoch {}."
.format(old_value,
new_value,
self.update_number / self._updates_per_epoch * 100,
self.epoch_number))
def _has_improved(self):
"""Tests whether the previously computed validation set cost was
significantly better than the cost given by the current candidate state.
TODO: Implement a test for statistical significance.
:rtype: bool
:returns: True if validation set cost decreased enough, or there was no
previous candidate state; False otherwise
"""
if self._cost_history.size == 0:
raise RuntimeError("BasicTrainer._has_improved() called with empty "
"cost history.")
if self._candidate_index is None:
return True
return self._cost_history[-1] < 0.999 * self.candidate_cost()
def _log_update(self):
"""Logs information about the previous mini-batch update.
"""
logging.info("[%d] (%.1f %%) of epoch %d -- lr = %.1g, "
"duration = %.1f ms",
self.update_number,
self.update_number / self._updates_per_epoch * 100,
self.epoch_number,
self._optimizer.learning_rate,
self._update_duration * 100)
def _log_validation(self):
"""Prints the validation set cost history (or its tail), highlighting
the candidate for the minimum cost.
"""
str_costs = ["%.1f" % x for x in self._cost_history]
if self._candidate_index is not None:
str_costs[self._candidate_index] = \
'[' + str_costs[self._candidate_index] + ']'
logging.debug("[%d] Validation set cost history: %s",
self.update_number,
' '.join(str_costs[-20:]))
def _set_candidate_state(self, state=None):
"""Sets neural network and training state as the candidate for the
minimum validation cost state, and writes to disk.
:type state: h5py.File
:param state: if a HDF5 file is given, reads the state from this file,
instead of the current state
"""
if state is None:
self.get_state(self._candidate_state)
else:
state.flush()
for name in state:
if name in self._candidate_state:
del self._candidate_state[name]
self._candidate_state.copy(state[name], name, expand_refs=True)
for name in state.attrs:
self._candidate_state.attrs[name] = state.attrs[name]
if self._cost_history.size == 0:
self._candidate_index = None
else:
self._candidate_index = self._cost_history.size - 1
self._candidate_state.flush()
logging.info("New candidate for optimal state saved to %s.",
self._candidate_state.filename)
def _validate(self):
"""If at or just before the actual validation point, computes perplexity
and adds to the list of samples. At the actual validation point we have
`self._samples_per_validation` values and combine them using
`self._statistic_function`. If the model performance has improved, the
state at the center of the validation samples will be saved using
`self._set_candidate_state()`.
:type perplexity: float
:param perplexity: computed perplexity at a validation point, None
elsewhere
"""
if self._validation_iter is None:
return # Validation has not been configured.
if not self._is_scheduled(self._options['validation_frequency'],
self._samples_per_validation - 1):
return # We don't have to validate now.
perplexity = self._scorer.compute_perplexity(self._validation_iter)
self._local_perplexities.append(perplexity)
if len(self._local_perplexities) == 1:
logging.debug("[%d] First validation sample, perplexity %.2f.",
self.update_number,
perplexity)
# The rest of the function will be executed only at and after the center
# of sampling points.
if not self._is_scheduled(self._options['validation_frequency'],
self._samples_per_validation // 2):
return
# The first sampling point within samples_per_validation / 2 of the
# actual validation point is the center of the sampling points. This
# will be saved in case the model performance has improved.
if self._validation_state is None:
logging.debug("[%d] Center of validation, perplexity %.2f.",
self.update_number,
perplexity)
self._validation_state = h5py.File(
name='validation-state', driver='core', backing_store=False)
self.get_state(self._validation_state)
# The rest of the function will be executed only at the final sampling
# point.
if not self._is_scheduled(self._options['validation_frequency']):
return
logging.debug("[%d] Last validation sample, perplexity %.2f.",
self.update_number,
perplexity)
if len(self._local_perplexities) < self._samples_per_validation:
# After restoring a previous validation state, which is at the
# center of the sampling points, the trainer will collect again half
# of the samples. Don't take that as a validation.
logging.debug("[%d] Only %d samples collected. Ignoring this "
"validation.",
self.update_number,
len(self._local_perplexities))
self._local_perplexities = []
self._validation_state.close()
self._validation_state = None
return
statistic = self._statistic_function(self._local_perplexities)
self._cost_history = numpy.append(self._cost_history, statistic)
if self._has_improved():
# Take the state at the actual validation point and replace the cost
# history with the current cost history that also includes this
# latest statistic.
h5_cost_history = self._validation_state['trainer/cost_history']
h5_cost_history.resize(self._cost_history.shape)
h5_cost_history[:] = self._cost_history
self._set_candidate_state(self._validation_state)
self._log_validation()
if (self._options['patience'] >= 0) and \
(self.validations_since_candidate() > self._options['patience']):
# Too many validations without finding a new candidate state.
# If any validations have been done, the best state has been found
# and saved. If training has been started from previous state,
# _candidate_state has been set to the initial state.
assert self._candidate_state is not None
self._decrease_learning_rate()
self._local_perplexities = []
self._validation_state.close()
self._validation_state = None
def _is_scheduled(self, frequency, within=0):
"""Checks if an event is scheduled to be performed within given number
of updates after this point.
For example, updates_per_epoch=9, frequency=2:
update_number: 1 2 3 4 [5] 6 7 8 [9] 10 11 12
* frequency: 2 4 6 8 10 12 14 16 18 20 22 24
modulo: 2 4 6 8 1 3 5 7 0 2 4 6
within: 4 3 2 1 0 3 2 1 0 4 3 2
* frequency: 8 6 4 2 0 6 4 2 0 8 6 4
:type frequency: int
:param frequency: how many times per epoch the event should be
performed
:type within: int
:param within: if zero, returns True if the event should be performed
now; otherwise returns True if the event should be
performed within this many updates in the future
:rtype: bool
:returns: whether the operation is scheduled to be performed
"""
modulo = self.update_number * frequency % self._updates_per_epoch
return modulo < frequency or \
self._updates_per_epoch - modulo <= within * frequency
| StarcoderdataPython |
153774 | class Fib:
def __init__(self,nn):
print("inicjujemy")
self.__n=nn
self.__i=0
self.__p1=self.__p2=1
def __iter__(self):
print('iter')
return self
def __next__(self):
print('next')
self.__i+=1
if self.__i>self.__n:
raise StopIteration
if self.__i in[1,2]:
return 1
ret = self.__p1 + self.__p2
self.__p1,self.__p2 = self.__p2,ret
return ret
for i in Fib(10):
print(i) | StarcoderdataPython |
8049106 | """Serializers for venues"""
from rest_framework import serializers
from django_countries.serializers import CountryFieldMixin
from .fields import TimeZoneField
from . import models
class VenueSerializer(CountryFieldMixin, serializers.ModelSerializer):
time_zone = TimeZoneField(
required=False, allow_blank=False, allow_null=False,
)
latitude = serializers.DecimalField(
max_digits=10, decimal_places=8, min_value=-90, max_value=90,
required=False, allow_null=True,
)
longitude = serializers.DecimalField(
max_digits=11, decimal_places=8, min_value=-180, max_value=180,
required=False, allow_null=True,
)
class Meta:
model = models.Venue
fields = '__all__'
class VenueBySlugSerializer(VenueSerializer):
class Meta(VenueSerializer.Meta):
lookup_field = 'slug'
| StarcoderdataPython |
3261810 | # To split the given images in dataset into respective Dya & Night Instances.
import numpy as np
from PIL import Image
import glob
BUFFER_SIZE = 400
BATCH_SIZE = 1
IMG_WIDTH = 256
IMG_HEIGHT = 256
counter = 1
for filename in glob.glob("training/*.jpg"):
im = Image.open(filename)
im_arr = np.array(im)
width,height = im.size
print(width,height)
width = width//2
a = im_arr[:, width:]
b = im_arr[:, :width]
im1 = Image.fromarray(a)
im2 = Image.fromarray(b)
file_str = str(counter)
file1_path = "day/"+file_str+".png"
file2_path = "night/"+file_str+".png"
im1.save(file1_path, 'PNG')
im2.save(file2_path, 'PNG')
counter+=1 | StarcoderdataPython |
9686266 | import scrapeconfig as cng
import pandas as pd
from get_osebx_html_files import get_htmlfile
from get_yahoo_data import get_keystats
from datawrangle import merge_bors_and_yahoo_dfs
from borsscraper import SCRAPE_OSLOBORS_TITLE
if __name__ == '__main__':
# Obtain HTML pages of Oslo Bors
# print('Obtaining HTML files from Oslo Bors')
get_htmlfile(url=cng.BORS_QUOTES_URL, targetfile=cng.QUOTES_TARGET_FILE, wait_target_class=cng.QUOTES_WAIT_TARGET_CLASS)
get_htmlfile(url=cng.BORS_RETURNS_URL, targetfile=cng.RETURNS_TARGET_FILE, wait_target_class=cng.RETURNS_WAIT_TARGET_CLASS)
# Scrape HTML files
print('Scraping HTML files')
df = SCRAPE_OSLOBORS_TITLE(cng.QUOTES_TARGET_FILE, cng.RETURNS_TARGET_FILE, verbose=False)
df.to_csv(cng.BORS_CSV_NAME, index=False)
# Obtain key statistics from YahooFinancials
# This part requires that the files from the previous step
# are available in order to get the tickers
# May take some time
print('Obtaining key statistics from Yahoo Financials')
tickers = pd.read_csv(cng.BORS_CSV_NAME).ticker
df = get_keystats(tickers)
df.to_csv(cng.YAHOO_CSV_NAME, index=False)
print('Compiling data')
# Get a combined dataset consisting of data from Oslo Bors and YahooFinancials
merge_bors_and_yahoo_dfs(cng.BORS_CSV_NAME, cng.YAHOO_CSV_NAME, cng.FINALDATASET_FILENAME)
print('Done!')
| StarcoderdataPython |
5112942 | import load_data as ld
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
### where is this coming from?
input_shape = (1025,44,2)
(train_dataset, test_dataset, num_categories) = ld.prepare_tensorflow_datasets()
(train_dataset, test_dataset) = ld.shuffle(train_dataset, test_dataset)
model = tf.keras.Sequential([
Conv2D(32, (3, 1), #0
activation='relu',
padding='same',
input_shape=input_shape),
MaxPooling2D((2, 1)),
Conv2D(32, (3, 1), #1
activation='relu',
padding='same',
input_shape=input_shape),
MaxPooling2D((2, 1)),
Conv2D(32, (3, 1), #2
activation='relu',
padding='same',
input_shape=input_shape),
MaxPooling2D((2, 1)),
Conv2D(32, (3, 1), #3
activation='relu',
padding='same',
input_shape=input_shape),
MaxPooling2D((2, 1)),
Conv2D(32, (3, 1), #4
activation='relu',
padding='same',
input_shape=input_shape),
MaxPooling2D((2, 1)),
Conv2D(32, (1, 3), #5
activation='relu',
padding='same',
input_shape=input_shape),
MaxPooling2D((1, 2)),
Conv2D(32, (1, 3), #6
activation='relu',
padding='same',
input_shape=input_shape),
MaxPooling2D((1, 2)),
Conv2D(32, (1, 3), #7
activation='relu',
padding='same',
input_shape=input_shape),
MaxPooling2D((1, 2)),
Conv2D(32, (1, 3), #8
activation='relu',
padding='same',
input_shape=input_shape),
MaxPooling2D((1, 2)),
Conv2D(64, (3, 3), #9
activation='relu',
padding='same',
input_shape=input_shape),
MaxPooling2D((2, 2)),
Flatten(),
Dense(64, activation='relu'),
Dropout(.1),
Dense(num_categories, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
model.summary()
history = model.fit(train_dataset,
epochs=4
)
plt.plot(history.history['sparse_categorical_accuracy'], label='accuracy')
plt.plot(history.history['loss'], label='loss')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
## testing
print("Testing:")
test_loss, test_acc = model.evaluate(test_dataset,
verbose=2)
print(test_loss, test_acc)
model.save("song_model.h5")
| StarcoderdataPython |
9674170 | from .graph_export import export_scenegraph, export_subtree
from .graph_import import import_scenegraph, import_subtree
| StarcoderdataPython |
3438490 | <reponame>Napchat/mineapp
from flask import Blueprint, render_template, abort, g, redirect, url_for, session, request
from jinja2 import TemplateNotFound
from mineapp.forms import NameForm
blue1 = Blueprint('blue1', __name__, template_folder='templates', static_folder='static')
@blue1.before_app_first_request
def before_app_first_request():
session['logged_in'] = False
@blue1.route('/login', methods=['GET', 'POST'])
def login():
try:
form = NameForm()
if request.method=='POST' and form.validate_on_submit():
session['name'] = form.name.data
session['logged_in'] = True
return redirect(url_for('blue1.index'))
return render_template('blue1/login.html', form=form, name=session.get('name'))
except TemplateNotFound:
abort(404)
@blue1.route('/')
@blue1.route('/index')
def index():
try:
if not session.get('logged_in'):
return redirect(url_for('blue1.login'))
return render_template('blue1/index.html', name=session.get('name'))
except TemplateNotFound:
abort(404) | StarcoderdataPython |
373169 | <gh_stars>0
def featureNormalize(X):
import numpy as np
np.asarray(X)
mu=np.ndarray.mean(X,axis=0)
X_norm=X-mu
sigma=np.ndarray.std(X_norm,axis=0)
X_norm=X_norm/sigma
print('the mean is',mu)
print('and sigma is',sigma)
return X_norm
| StarcoderdataPython |
11387825 | <reponame>MartinoMensio/allennlp<gh_stars>10-100
# pylint: disable=no-self-use,invalid-name
import numpy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token
from allennlp.data.fields import TextField, IndexField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestIndexField(AllenNlpTestCase):
def setUp(self):
super(TestIndexField, self).setUp()
self.text = TextField([Token(t) for t in ["here", "is", "a", "sentence", "."]],
{"words": SingleIdTokenIndexer("words")})
def test_as_tensor_converts_field_correctly(self):
index_field = IndexField(4, self.text)
tensor = index_field.as_tensor(index_field.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_equal(tensor, numpy.array([4]))
def test_index_field_raises_on_incorrect_label_type(self):
with pytest.raises(ConfigurationError):
_ = IndexField("hello", self.text)
def test_index_field_empty_field_works(self):
index_field = IndexField(4, self.text)
empty_index = index_field.empty_field()
assert empty_index.sequence_index == -1
def test_printing_doesnt_crash(self):
print(self.text)
| StarcoderdataPython |
6471640 | '''tests for the registry module'''
import pytest
from lima import exc, schema, registry
@pytest.fixture
def reg():
return registry.Registry()
# mock Schema class to register later on
class Schema:
pass
def test_register1(reg):
'''Test if mock Schema class can be registered without raising.'''
reg.register(Schema)
def test_get1(reg):
'''Test if registered classes can be retrieved again (1).'''
reg.register(Schema)
assert reg.get('Schema') is Schema # via qualname
assert reg.get(__name__ + '.Schema') is Schema # via fullname
def test_register2(reg):
'''Test if real Schema exist alongside mock one without raising'''
reg.register(Schema)
reg.register(schema.Schema)
def test_get2(reg):
'''Test if registered classes can be retrieved again (2).'''
reg.register(Schema)
reg.register(schema.Schema)
# try to get via full names
# (note that lima.Schema's full name is lima.schema.Schema)
assert reg.get(__name__ + '.Schema') is Schema # mock Schema
assert reg.get('lima.schema.Schema') is schema.Schema # real Schema
def test_class_not_found_error(reg):
'''Test if registry throws an error when not finding anything.'''
with pytest.raises(exc.ClassNotFoundError):
reg.get('NonExistentClass')
def test_ambiguous_class_name_error(reg):
'''Test if registry throws an error when finding >1 classes w/qualname.'''
reg.register(Schema)
reg.register(schema.Schema)
with pytest.raises(exc.AmbiguousClassNameError):
reg.get('Schema')
def test_register_local_class_error(reg):
'''Test if registry rejects classes defined in local namespaces.'''
class LocallyDefinedClass:
pass
with pytest.raises(exc.RegisterLocalClassError):
reg.register(LocallyDefinedClass)
with pytest.raises(exc.ClassNotFoundError):
reg.get('LocallyDefinedClass')
| StarcoderdataPython |
1763616 | import unittest
from pyportfolio.models import TradeList
from pyportfolio.trades.load import load
from pyportfolio.utils.testing import get_data_path
class TestImport(unittest.TestCase):
def test_csv(self):
trade_list = load.load_csv(get_data_path() + 'test.csv')
self.assertIsInstance(trade_list, TradeList)
#trade_list.to_csv(get_data_path() + 'out.csv')
def test_excel(self):
trade_list = load.load_excel(get_data_path() + 'test.xlsx')
self.assertIsInstance(trade_list, TradeList)
trade_list.to_csv(get_data_path() + 'out.csv')
| StarcoderdataPython |
8010427 | from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from mainwindow import Ui_MainWindow
from aboutdialog import Ui_aboutDialog
from rpg_tools.PyDiceroll import roll
import sys
die_types = ['D4', 'D6', 'D8', 'D10', 'D12', 'D20', 'D30', 'D66', 'D100']
class aboutDialog(QDialog, Ui_aboutDialog):
def __init__(self):
super().__init__()
self.setWindowFlags(Qt.Drawer | Qt.WindowStaysOnTopHint)
self.setupUi(self)
self.aboutOKButton.clicked.connect(self.acceptOKButtonClicked)
def acceptOKButtonClicked(self):
self.close()
class DiceWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.diceCount.valueChanged.connect(self.diceCount_changed)
for i in range(len(die_types)):
self.diceType.addItem(die_types[i])
self.dice_type = 'D6'
self.diceType.setCurrentIndex(1)
self.diceType.currentIndexChanged.connect(self.diceType_changed)
self.diceDM.valueChanged.connect(self.diceDM_changed)
self.rollButton.clicked.connect(self.rollButton_clicked)
self.actionRoll_Dice.triggered.connect(self.rollButton_clicked)
self.clearButton.clicked.connect(self.clearButton_clicked)
self.actionClear_All.triggered.connect(self.clearButton_clicked)
self.actionAbout_Dice_Roll.triggered.connect(self.actionAbout_triggered)
self.popAboutDialog=aboutDialog()
self.quitButton.clicked.connect(self.quitButton_clicked)
self.actionQuit.triggered.connect(self.quitButton_clicked)
self.rollInput.returnPressed.connect(self.manual_roll)
self.dice_to_roll = ''
def diceCount_changed(self):
'''
Clear die modifier and last roll result
'''
self.diceDM.setValue(0)
self.diceRoll.setText('')
self.rollInput.clear()
def diceType_changed(self):
'''
Enable/disable the dice count and die modifier fields
depending on the dice type chosen.
And clear fields as needed.
'''
self.dice_type = die_types[self.diceType.currentIndex()]
if self.diceType.currentIndex() <= 4:
self.countLabel.setEnabled(1)
self.diceCount.setEnabled(1)
self.dmLabel.setEnabled(1)
self.diceDM.setEnabled(1)
if self.diceType.currentIndex() >= 5 and self.diceType.currentIndex() <= 6 or self.diceType.currentIndex() >= 8:
self.diceCount.setValue(1)
self.countLabel.setEnabled(0)
self.diceCount.setEnabled(0)
self.dmLabel.setEnabled(1)
self.diceDM.setEnabled(1)
if self.diceType.currentIndex() == 7:
self.diceCount.setValue(1)
self.countLabel.setEnabled(0)
self.diceCount.setEnabled(0)
self.dmLabel.setEnabled(0)
self.diceDM.setEnabled(0)
self.diceDM.setValue(0)
self.diceRoll.setText('')
self.rollInput.clear()
def diceDM_changed(self):
'''
Clear last roll result if die modifier is changed
'''
self.diceRoll.setText('')
self.rollInput.clear()
def rollButton_clicked(self):
'''
Roll button was clicked.
Construct the string argument needed for roll().
'''
if self.diceDM.value() >= 0:
math_op = '+'
else:
math_op = ''
if self.diceType.currentIndex() > 4:
self.dice_to_roll = ''
else:
self.dice_to_roll = str(self.diceCount.value())
self.dice_to_roll += self.dice_type
if self.diceType.currentIndex() != 7:
self.dice_to_roll += math_op + str(self.diceDM.value())
self.diceRoll.setText(str(roll(self.dice_to_roll)))
self.rollBrowser.append(self.dice_to_roll + ' = ' + self.diceRoll.text())
self.rollInput.clear()
def manual_roll(self):
'''
A roll was inputed manually
'''
dice_entered = self.rollInput.text()
roll_returned = roll(dice_entered)
# Was the roll a valid one?
if roll_returned == -9999:
returned_line = dice_entered + ' = ' + '<span style=" color:#ff0000;">' + str(roll_returned) + '</span>'
else:
returned_line = dice_entered + ' = ' + str(roll_returned)
# Display the roll result inside the text browser
self.rollBrowser.append(returned_line)
def clearButton_clicked(self):
'''
Clear/reset all fields
'''
self.diceCount.setValue(1)
self.diceDM.setValue(0)
self.diceRoll.setText('')
self.rollInput.clear()
self.rollBrowser.clear()
def actionAbout_triggered(self):
'''
Display the About window
'''
self.popAboutDialog.show()
def quitButton_clicked(self):
'''
Exit this app
'''
self.close()
def activate(self, reason):
# if reason == QSystemTrayIcon.Trigger: # systray icon clicked.
# if self.isVisible():
# self.hide()
# else:
# self.show()
if reason == QSystemTrayIcon.Trigger: # systray icon clicked.
if self.isVisible():
self.hide()
else:
self.show()
def display_app(self, reason):
self.show()
def hide_app(self, reason):
self.hide()
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
# Use print(QStyleFactory.keys()) to find a setStyle you like, instead of 'Fusion'
app.setStyle('Fusion')
darkPalette = QPalette()
darkPalette.setColor(QPalette.Window, QColor(53, 53, 53))
darkPalette.setColor(QPalette.WindowText, Qt.white)
darkPalette.setColor(QPalette.Disabled, QPalette.WindowText, QColor(127, 127, 127))
darkPalette.setColor(QPalette.Base, QColor(42, 42, 42))
darkPalette.setColor(QPalette.AlternateBase, QColor(66, 66, 66))
darkPalette.setColor(QPalette.ToolTipBase, Qt.white)
darkPalette.setColor(QPalette.ToolTipText, Qt.white)
darkPalette.setColor(QPalette.Text, Qt.white)
darkPalette.setColor(QPalette.Disabled, QPalette.Text, QColor(127, 127, 127))
darkPalette.setColor(QPalette.Dark, QColor(35, 35, 35))
darkPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))
darkPalette.setColor(QPalette.Button, QColor(53, 53, 53))
darkPalette.setColor(QPalette.ButtonText, Qt.white)
darkPalette.setColor(QPalette.Disabled, QPalette.ButtonText, QColor(127, 127, 127))
darkPalette.setColor(QPalette.BrightText, Qt.red)
darkPalette.setColor(QPalette.Link, QColor(42, 130, 218))
darkPalette.setColor(QPalette.Highlight, QColor(42, 130, 218))
darkPalette.setColor(QPalette.Disabled, QPalette.Highlight, QColor(80, 80, 80))
darkPalette.setColor(QPalette.HighlightedText, Qt.white)
darkPalette.setColor(QPalette.Disabled, QPalette.HighlightedText, QColor(127, 127, 127))
MainApp = DiceWindow()
MainApp.show()
app.setPalette(darkPalette)
# Create the systray icon
icon = QIcon(":/icons/die")
# Create the systray
tray = QSystemTrayIcon()
tray.setIcon(icon)
tray.setVisible(True)
# Create the systray menu
menu = QMenu()
showApp = QAction("Show App")
showApp.triggered.connect(MainApp.display_app)
menu.addAction(showApp)
hideApp = QAction("Hide App")
hideApp.triggered.connect(MainApp.hide_app)
menu.addAction(hideApp)
quit = QAction("Quit")
quit.triggered.connect(app.quit)
menu.addAction(quit)
tray.setToolTip("Dice Roll")
# Add the menu to the tray
tray.setContextMenu(menu)
tray.activated.connect(MainApp.activate)
app.exec_()
| StarcoderdataPython |
1831897 | <reponame>geoffjay/shrt
from django.shortcuts import render
from django.views import View
from django.shortcuts import redirect
from shrt.url.models import Url
class UrlView(View):
"""Redirect view.
Handles a shortened URL by using the redirect shortcut.
"""
def get(self, request, tag):
url = Url.objects.get(tag=tag)
response = redirect(url.original)
return response
| StarcoderdataPython |
8160429 | from typing import Any, ClassVar, Dict, Type
import dataclasses
from bidict import bidict
def _compile_node_value(value: Any, **compile_options) -> Any:
if isinstance(value, Node):
return value.compile(**compile_options)
elif isinstance(value, list):
return [_compile_node_value(item, **compile_options) for item in value]
elif isinstance(value, dict):
return {k: _compile_node_value(v, **compile_options) for k, v in value.items()}
else:
return value
def _parse_node_dict(d: Dict, **fields) -> "Node":
state_types = Node._NODE_CLASSES
if "Type" in d:
state_cls = state_types[d["Type"]]
elif "type" in fields:
assert isinstance(fields["type"], str)
state_cls = state_types[fields["type"]]
else:
state_cls = Node
for attr_name, sl_name in state_cls._FIELDS.items():
if sl_name in d:
fields[attr_name] = d[sl_name]
state_cls.parse_dict(d, fields)
try:
return state_cls(**fields)
except TypeError as e:
raise TypeError(f"Failed to instantiate {state_cls} because of: {e!r}")
@dataclasses.dataclass
class Node:
"""
Base class for all nodes in the state machine object tree.
"""
_FIELDS: ClassVar[bidict] = bidict()
_OUR_FIELDS: ClassVar[bidict] = bidict()
_NODE_CLASSES: ClassVar[Dict[str, Type]] = {}
type: str = "Node"
def __init_subclass__(cls, **kwargs):
Node._NODE_CLASSES[cls.__name__] = cls
@classmethod
def name_from_sl(cls, name):
"""
Translate a field name from States Language.
"""
if name in cls._FIELDS.inv:
return cls._FIELDS.inv[name]
elif name in cls._OUR_FIELDS.inv:
return cls._OUR_FIELDS.inv[name]
raise KeyError(name)
@classmethod
def name_to_sl(cls, name):
"""
Translate an attribute name to States Language.
"""
return cls._FIELDS[name]
@classmethod
def parse(cls, raw: Any, **fields) -> "Node":
if isinstance(raw, Node):
# Do not recreate Node instance if it is being parsed without any changes
if not fields:
return raw
else:
return cls.parse(raw.compile(), **fields)
if isinstance(raw, list):
raise TypeError()
# TODO Add an explicit test for this
fields.setdefault("type", cls.__name__)
# TODO None of the below belongs to Node class! Move to State.
field_names = [f.name for f in dataclasses.fields(cls)]
if isinstance(raw, dict):
if "name" in field_names:
if "Name" in raw:
fields.setdefault("name", raw["Name"])
elif "Resource" in raw:
fields.setdefault("name", raw["Resource"])
elif "Comment" in raw:
fields.setdefault("name", raw["Comment"])
return _parse_node_dict(raw, **fields)
if "name" in field_names:
fields.setdefault("name", str(raw))
if "obj" in field_names:
fields.setdefault("obj", raw)
# TODO Create instance of the specified type!
instance = cls(**fields)
return instance
def compile(self, **compile_options) -> Dict:
c = {}
for f in self._FIELDS.keys():
value = getattr(self, f, None)
if value is not None:
c[self._FIELDS[f]] = _compile_node_value(value, **compile_options)
self.compile_dict(c)
return c
@classmethod
def parse_dict(cls, d: Dict, fields: Dict) -> None:
"""
A hook for custom Node classes.
``fields`` to be modified in place with the values parsed from ``d``.
DO NOT call super().
"""
pass
def compile_dict(self, c: Dict) -> None:
"""
A hook for custom Node class to add its own compile logic.
DO NOT call super().
The dictionary should be modified in place.
This is called before applying external handlers (state_visitor).
"""
pass
| StarcoderdataPython |
11347313 | <filename>test/connector/test_google_cloud_connector.py
import unittest
import os
import json
from datetime import datetime, timedelta
from spaceone.tester import TestCase
from spaceone.core.unittest.runner import RichTestRunner
from spaceone.core import config
from spaceone.core.unittest.result import print_data
from spaceone.core.transaction import Transaction
from spaceone.monitoring.connector.google_cloud_connector import GoogleCloudConnector
from spaceone.monitoring.manager.google_cloud_manager import GoogleCloudManager
from pprint import pprint
GOOGLE_APPLICATION_CREDENTIALS_PATH = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', None)
if GOOGLE_APPLICATION_CREDENTIALS_PATH is None:
print("""
##################################################
# ERROR
#
# Configure your GCP credential first for test
# https://console.cloud.google.com/apis/credentials
##################################################
example)
export GOOGLE_APPLICATION_CREDENTIALS="<PATH>"
""")
exit
def _get_credentials():
with open(GOOGLE_APPLICATION_CREDENTIALS_PATH) as json_file:
json_data = json.load(json_file)
return json_data
class TestGoogleCloudStackDriverConnector(TestCase):
@classmethod
def setUpClass(cls):
config.init_conf(package='spaceone.monitoring')
cls.secret_data = _get_credentials() if _get_credentials() is not None else {}
cls.gcp_connector = GoogleCloudConnector(Transaction(), {})
super().setUpClass()
@classmethod
def tearDownClass(cls) -> None:
super().tearDownClass()
def test_get_connect_with_google_service_key(self):
options = {}
secret_data = self.secret_data
self.gcp_connector.set_connect({}, options, secret_data)
def test_list_metrics(self):
options = {}
secret_data = self.secret_data
self.gcp_connector.set_connect({}, options, secret_data)
resource = {
'type': 'gce_instance',
'filters': [{
'key': 'metric.labels.instance_name',
'value': 'stackdriver-jhsong-01'
}]
}
metrics_info = self.gcp_connector.list_metrics(resource)
print_data(metrics_info, 'test_list_metrics')
def test_get_metric_data(self):
end = datetime.utcnow()
start = end - timedelta(minutes=60)
options = {}
secret_data = self.secret_data
self.gcp_connector.set_connect({}, options, secret_data)
options = {'metric': 'compute.googleapis.com/instance/cpu/utilization',
'resource': {
'type': 'gce_instance',
'filters': [{
'key': 'resource.labels.instance_id',
'value': '1873022307818018997'
}]
},
'aligner': 'ALIGN_SUM',
'start': start,
'end': end,
'interval': '360s'
}
metrics_info = self.gcp_connector.get_metric_data(
options.get('resource'),
options.get('metric'),
options.get('start'),
options.get('end'),
options.get('interval'),
options.get('aligner'),
)
print_data(metrics_info, 'test_list_metrics')
def test_all_metric_data(self):
options = {}
secret_data = self.secret_data
self.gcp_connector.set_connect({}, options, secret_data)
resource = {
'type': 'gce_instance',
'filters': [{
'key': 'metric.labels.instance_name',
'value': 'stackdriver-jhsong-01'
}]
}
metrics_info = self.gcp_connector.list_metrics(resource)
end = datetime.utcnow()
start = end - timedelta(days=4)
gcp_mgr = GoogleCloudManager()
period = gcp_mgr._make_period_from_time_range(start, end)
stat = gcp_mgr._convert_stat('SUM')
for metric_info in metrics_info.get('metrics', []):
metric_data_info = self.gcp_connector.get_metric_data(
resource,
metric_info.get('key', ''),
start,
end,
period,
stat,
)
print_data(metric_data_info, f'test_all_metric_data.{metric_info.get("type")}')
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner)
| StarcoderdataPython |
8081316 | <reponame>ZhangHCFJEA/bbp
#!/usr/bin/env python
"""
CB 08 NGA model
"""
from utils import *
class CB08_nga():
"""
Class of NGA model of Campbell and Bozorgnia 2008
"""
def __init__(self):
"""
Model initialization
"""
# ============
# NGA models (parameters and coefficients)
# ============
# 0. period independent parameters
self.c = 1.88
self.n = 1.18
# 1. List of periods with defined coefficients (PGA is -1; PGV is -2)
self.periods = [0.01, 0.02, 0.03, 0.05, 0.075, 0.10, 0.15, 0.20, 0.25, 0.30, 0.40,
0.50, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 7.5, 10.0, -1.0, -2.0 ]
# ===============================
# period-dependent coefficients
# ===============================
c0s = [ -1.715, -1.68, -1.552, -1.209, -0.657, -0.314, -0.133, -0.486, -0.89,
-1.171, -1.466, -2.569, -4.844, -6.406, -8.692, -9.701, -10.556, -11.212,
-11.684, -12.505, -13.087, -1.715, 0.954]
c1s = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.656, 0.972,
1.196, 1.513, 1.6, 1.6, 1.6, 1.6, 1.6, 1.6, 0.5, 0.696]
c2s = [-0.53, -0.53, -0.53, -0.53, -0.53, -0.53, -0.53, -0.446, -0.362, -0.294,
-0.186, -0.304, -0.578, -0.772, -1.046, -0.978, -0.638, -0.316, -0.07,
-0.07, -0.07, -0.53, -0.309]
c3s = [-0.262, -0.262, -0.262, -0.267, -0.302, -0.324, -0.339, -0.398, -0.458,
-0.511, -0.592, -0.536, -0.406, -0.314, -0.185, -0.236, -0.491, -0.77,
-0.986, -0.656, -0.422, -0.262, -0.019]
c4s = [-2.118, -2.123, -2.145, -2.199, -2.277, -2.318, -2.309, -2.22, -2.146,
-2.095, -2.066, -2.041, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2.118, -2.016]
c5s = [0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17,
0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17, 0.17]
c6s = [5.6, 5.6, 5.6, 5.74, 7.09, 8.05, 8.79, 7.6, 6.58, 6.04, 5.3, 4.73, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5.6, 4]
c7s = [0.28, 0.28, 0.28, 0.28, 0.28, 0.28, 0.28, 0.28, 0.28, 0.28, 0.28, 0.28,
0.28, 0.255, 0.161, 0.094, 0, 0, 0, 0, 0, 0.28, 0.245]
c8s = [-0.12, -0.12, -0.12, -0.12, -0.12, -0.099, -0.048, -0.012, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, -0.12, 0]
c9s = [0.49, 0.49, 0.49, 0.49, 0.49, 0.49, 0.49, 0.49, 0.49, 0.49, 0.49, 0.49,
0.49, 0.49, 0.49, 0.371, 0.154, 0, 0, 0, 0, 0.49, 0.358]
c10s = [1.058, 1.102, 1.174, 1.272, 1.438, 1.604, 1.928, 2.194, 2.351, 2.46,
2.587, 2.544, 2.133, 1.571, 0.406, -0.456, -0.82, -0.82, -0.82, -0.82,
-0.82, 1.058, 1.694]
c11s = [0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04,
0.077, 0.15, 0.253, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.04, 0.092]
c12s = [0.61, 0.61, 0.61, 0.61, 0.61, 0.61, 0.61, 0.61, 0.7, 0.75, 0.85, 0.883,
1, 1, 1, 1, 1, 1, 1, 1, 1, 0.61, 1]
k1s = [865, 865, 908, 1054, 1086, 1032, 878, 748, 654, 587, 503, 457, 410, 400,
400, 400, 400, 400, 400, 400, 400, 865, 400]
k2s = [-1.186, -1.219, -1.273, -1.346, -1.471, -1.624, -1.931, -2.188, -2.381,
-2.518, -2.657, -2.669, -2.401, -1.955, -1.025, -0.299, 0, 0, 0, 0, 0,
-1.186, -1.955]
k3s = [1.839, 1.84, 1.841, 1.843, 1.845, 1.847, 1.852, 1.856, 1.861, 1.865, 1.874,
1.883, 1.906, 1.929, 1.974, 2.019, 2.11, 2.2, 2.291, 2.517, 2.744, 1.839, 1.929]
# aleatory uncertainty models
# page 149 of CB 08 ES paper
self.sigma_lnY = [0.478, 0.480, 0.489, 0.510, 0.520, 0.531, 0.532, 0.534, 0.534,
0.544, 0.541, 0.550, 0.568, 0.568, 0.564, 0.571, 0.558, 0.576,
0.601, 0.628, 0.667, 0.478, 0.484, 0.667]
# intra-event residual standard deviation
self.tau_lnY = [0.219, 0.219, 0.235, 0.258, 0.292, 0.286, 0.280, 0.249, 0.240,
0.215, 0.217, 0.214, 0.227, 0.255, 0.296, 0.296, 0.326, 0.297,
0.359, 0.428, 0.485, 0.219, 0.203 ]
self.sigma_C = [0.166, 0.166, 0.165, 0.162, 0.158, 0.170, 0.180, 0.186, 0.191,
0.198, 0.206, 0.208, 0.221, 0.225, 0.222, 0.226, 0.229, 0.237,
0.237, 0.271, 0.290, 0.166, 0.190 ]
self.rho = [ 1.000, 0.999, 0.989, 0.963, 0.922, 0.898, 0.890, 0.871, 0.852,
0.831, 0.785, 0.735, 0.628, 0.534, 0.411, 0.331, 0.289, 0.261,
0.200, 0.174, 0.174, 1.000, 0.691 ]
# Old Coefs (period match)
self.Coefs = {}
for i in xrange(len(self.periods)):
T1 = self.periods[i]
Tkey = GetKey(T1)
self.Coefs[Tkey] = {}
self.Coefs[Tkey]['c0'] = c0s[i]
self.Coefs[Tkey]['c1'] = c1s[i]
self.Coefs[Tkey]['c2'] = c2s[i]
self.Coefs[Tkey]['c3'] = c3s[i]
self.Coefs[Tkey]['c4'] = c4s[i]
self.Coefs[Tkey]['c5'] = c5s[i]
self.Coefs[Tkey]['c6'] = c6s[i]
self.Coefs[Tkey]['c7'] = c7s[i]
self.Coefs[Tkey]['c8'] = c8s[i]
self.Coefs[Tkey]['c9'] = c9s[i]
self.Coefs[Tkey]['c10'] = c10s[i]
self.Coefs[Tkey]['c11'] = c11s[i]
self.Coefs[Tkey]['c12'] = c12s[i]
self.Coefs[Tkey]['k1'] = k1s[i]
self.Coefs[Tkey]['k2'] = k2s[i]
self.Coefs[Tkey]['k3'] = k3s[i]
self.CoefKeys = self.Coefs[self.Coefs.keys()[0]].keys()
# Call to get the SA value
def __call__(self,M,Rjb,Vs30,T, rake, Ftype=None, \
Rrup=None,dip=None,Ztor=None,Z25=None, \
W=None,Zhypo=None,azimuth=None,Fhw=0,\
Z10=None,Z15=None, Arb=0, \
CoefTerms={'terms':(1,1,1,1,1,1),'NewCoefs':None}):
"""
Call the class to compute median ground-motion intensity
You have to call the function here to make the class rich
"""
# Those inputs have to be specified
self.M = M # moment magnitude
self.Rjb = float(Rjb) # Joyner-Boore distance (km)
self.Vs30 = float(Vs30) # time-averaged shear wave velocity over 30m subsurface depth (m/s)
self.T = T # select period (sec)
self.rake = rake # rake could be None then you have to give the W and dip
terms = CoefTerms['terms']
NewCoefs = CoefTerms['NewCoefs']
# check inputs
if T in self.periods:
self.T = T
else:
print 'T is not in periods list, try to interpolate'
raise ValueError
if self.M == None or self.M < 0:
print 'Moment magnitude must be a postive number'
raise ValueError
if self.Rjb == None or self.Rjb < 0:
print 'Joyner-Boore distance must be a non-negative number'
raise ValueError
if self.Vs30 == None or self.Vs30 < 0:
print 'Vs30 must be a positive number'
raise ValueError
# Determine the Fault-related parameters (if necessary)
if Ftype != None:
self.Fnm = 1*(Ftype == 'NM')
self.Frv = 1*(Ftype == 'RV')
else:
if rake == None or rake < -180 or rake > 180.:
print 'rake angle should be within [-180,180]'
raise ValueError
else:
self.Frv, self.Fnm = rake2ftype_CB( self.rake )
if W == None:
if self.rake == None:
print 'you should give either the fault width W or the rake angle'
raise ValueError
else:
W = calc_W(self.M,self.rake)
else:
self.W = W
if dip == None:
if self.rake == None:
print 'you should give either the fault dip angle or the rake angle'
raise ValueError
else:
self.dip = calc_dip( self.rake )
else:
self.dip = dip
if Ztor == None:
if Zhypo == None:
if self.rake == None:
print 'you should give either the Ztor or the rake angle'
raise ValueError
else:
Zhypo = calc_Zhypo( self.M, self.rake )
self.Ztor = calc_Ztor( W, self.dip, Zhypo )
else:
self.Ztor = Ztor
# Determine Site-Source related parameters (if necessary)
if Rrup == None:
if azimuth == None:
if Fhw != None:
if Fhw == 1:
azimuth = 50 # hanging wall site
else:
azimuth = -50. # footwall site
else:
azimuth = -50.
if self.Rjb == 0:
Fhw = 1
azimuth = 90
Rx = calc_Rx( self.Rjb,self.Ztor, W, self.dip, azimuth, Rrup=Rrup )
self.Rrup = calc_Rrup( Rx, self.Ztor, W, self.dip, azimuth, Rjb = self.Rjb )
else:
self.Rrup = Rrup
# Determine Site-Specific parameters
if Z25 == None:
self.Z25 = calc_Z25(self.Vs30,Z1model='CY')
else:
self.Z25 = Z25 # input Z25 should be in km
# update coeficient (use updated coefficients)
if NewCoefs != None:
NewCoefKeys = NewCoefs.keys()
Tkey = GetKey(self.T)
for key in NewCoefKeys:
self.Coefs[Tkey][key] = NewCoefs[key]
# Compute IM and Standard deviation
IM = self.compute_im(terms=terms)
sigma, tau, sigmaT, sigmaArb = self.sd_calc()
if Arb == 0:
return IM, sigmaT, tau, sigma
else:
return IM, sigmaArb, tau, sigma
# ============================
# Function used in this class
# ============================
def moment_function(self,M=None,Tother=None):
"""
Moment term
"""
if Tother != None:
Ti = GetKey( Tother )
else:
Ti = GetKey( self.T )
c0 = self.Coefs[Ti]['c0']
c1 = self.Coefs[Ti]['c1']
c2 = self.Coefs[Ti]['c2']
c3 = self.Coefs[Ti]['c3']
if M != None:
self.M = M
if self.M <= 5.5:
return c0 + c1 * self.M
elif 5.5<self.M<=6.5:
return c0 + c1 * self.M + c2 * (self.M-5.5)
else:
return c0 + c1 * self.M + c2 * (self.M-5.5) + c3*(self.M-6.5)
def distance_function(self,M=None,Rrup=None,Tother=None):
"""
Distance term
"""
if Tother != None:
Ti = GetKey( Tother )
else:
Ti = GetKey( self.T )
if M != None:
self.M = M
if Rrup != None:
self.Rrup = Rrup
c4 = self.Coefs[Ti]['c4']
c5 = self.Coefs[Ti]['c5']
c6 = self.Coefs[Ti]['c6']
Rtmp = np.sqrt( self.Rrup**2 + c6**2)
return (c4+c5*self.M)*np.log(Rtmp)
def fault_function(self,Tother=None):
"""
Fault mechanism term
or style of the fault
"""
if Tother != None:
Ti = GetKey( Tother )
else:
Ti = GetKey( self.T )
c7 = self.Coefs[Ti]['c7']
c8 = self.Coefs[Ti]['c8']
if self.Ztor < 1:
f_fltz = self.Ztor
else:
f_fltz = 1
return c7*self.Frv*f_fltz+c8*self.Fnm
def hw_function(self,Tother=None):
"""
Hanging Wall term
"""
if Tother != None:
Ti = GetKey( Tother )
else:
Ti = GetKey( self.T )
c9 = self.Coefs[Ti]['c9']
if self.Rjb == 0:
f_hngr = 1
elif self.Rjb > 0 and self.Ztor < 1:
f_hngr = (max(self.Rrup,np.sqrt( self.Rjb**2+1 ))-self.Rjb) / max(self.Rrup,np.sqrt(self.Rjb**2+1))
elif self.Rjb >0 and self.Ztor >= 1:
f_hngr = (self.Rrup-self.Rjb)/self.Rrup
else:
print 'Rjb should be larger or equal to 0'
raise ValueError
if self.M <= 6.0:
f_hngm = 0
elif 6.0<self.M<6.5:
f_hngm = 2*(self.M-6.0)
else:
f_hngm = 1
if self.Ztor >= 20:
f_hngz = 0
elif 0 <= self.Ztor < 20:
f_hngz = (20-self.Ztor)/20
else:
#print 'Ztor is less than 0' # R code cannot handle this
f_hngz = 0
if self.dip <= 70:
f_hngd = 1
else:
f_hngd = (90-self.dip)/20
return c9*f_hngr*f_hngm*f_hngz*f_hngd
def basin_function(self,Tother=None,Z25=None):
"""
Basin-effect term
"""
if Tother != None:
Ti = GetKey( Tother )
else:
Ti = GetKey( self.T )
if Z25 != None:
self.Z25 = Z25
c11 = self.Coefs[Ti]['c11']
c12 = self.Coefs[Ti]['c12']
k3 = self.Coefs[Ti]['k3']
if self.Z25 < 1:
return c11 * (self.Z25-1)
elif 1 <= self.Z25 <= 3:
return 0
else:
return c12 * k3*np.exp(-0.75)*(1-np.exp(-0.25*(self.Z25-3)))
def A1100_calc(self):
Tother = -1.0
A1100 = np.exp( self.moment_function(Tother)+
self.distance_function(Tother)+
self.fault_function(Tother)+
self.hw_function(Tother)+
self.basin_function(Tother=Tother)+
self.site_function(A1100=0,Vs30=1100.,Tother = Tother) )
return A1100
def site_function(self,A1100=None,Vs30=None,Tother=None):
"""
Shallow site effect term
Be careful to the input variables (they are keys, not arguments)
"""
# PGA at reference rock that has Vs30 = 1100 (unit: m/s)
if A1100 == None:
A1100 = self.A1100_calc()
if Vs30 == None:
Vs30 = self.Vs30
if Tother != None:
Ti = GetKey(Tother)
else:
Ti = GetKey(self.T)
c10 = self.Coefs[Ti]['c10']
k1 = self.Coefs[Ti]['k1']
k2 = self.Coefs[Ti]['k2']
if Vs30 < k1:
return c10 * np.log(Vs30/k1) + k2*(np.log(A1100+self.c*(Vs30/k1)**self.n)-np.log(A1100+self.c))
elif k1 <= Vs30 < 1100.:
return (c10+k2*self.n)*np.log(Vs30 /k1)
else:
return (c10+k2*self.n)*np.log(1100./k1)
# Final function to compute Sa, PGA, PGV
def compute_im(self,terms=(1,1,1,1,1,1)):
"""
Compute IM based on functional form of CB08 model
"""
IM = np.exp(terms[0]*self.moment_function()+
terms[3]*self.distance_function()+
terms[1]*self.fault_function()+
terms[2]*self.hw_function()+
terms[5]*self.site_function()+
terms[4]*self.basin_function())
if self.T <= 0.25: # and self.T != -1.0:
Tother=-1.0
# This is PGA itself
IM1 = np.exp(terms[0]*self.moment_function(Tother)+
terms[3]*self.distance_function(Tother)+
terms[1]*self.fault_function(Tother)+
terms[2]*self.hw_function(Tother)+
terms[5]*self.site_function(Tother=Tother)+
terms[4]*self.basin_function(Tother=Tother))
if IM < IM1:
# This is for SA (not for PGA, since PGA is computed above)
IM = IM1
return IM
# function used to compute standard deviation terms
def alpha_calc( self, Vs30=None, Tother=None ):
if Vs30 == None:
Vs30 = self.Vs30
if Tother == None:
Ti = GetKey( self.T )
else:
Ti = GetKey( Tother )
k1 = self.Coefs[Ti]['k1']
k2 = self.Coefs[Ti]['k2']
A1100 = self.A1100_calc()
# compute alpha
if Vs30 < k1:
alpha = k2 * A1100 * (1./(A1100+self.c*(Vs30/k1)**self.n)-1./(A1100+self.c))
else:
alpha = 0
return alpha
def sigma_calc( self, Vs30=None, Tother=None ):
"""
Intra-event residual standard deviation
"""
sigma_lnAF = 0.3
sigma_lnYb = np.sqrt(self.sigma_lnY[(np.array(self.periods)==self.T).nonzero()[0]]**2-sigma_lnAF**2)
sigma_lnAb = np.sqrt(self.sigma_lnY[(np.array(self.periods)==-1.0).nonzero()[0]]**2-sigma_lnAF**2) # PGA
alpha = self.alpha_calc()
sigma = np.sqrt(sigma_lnYb**2+sigma_lnAF**2 + alpha**2*sigma_lnAb**2 + \
2*alpha*self.rho[(np.array(self.periods)==self.T).nonzero()[0]]*sigma_lnYb*sigma_lnAb ) # Eqn (15) CB08 ES
return sigma
def sd_calc(self):
# compute SD at interested period self.T
indT = (np.array(self.periods)==self.T).nonzero()[0]
tau = self.tau_lnY[indT]
sigma = self.sigma_calc()
sigmaT = np.sqrt( sigma**2 + tau**2 )
sigmaArb = np.sqrt( sigmaT**2 + self.sigma_C[indT]**2 )
# standard deviations are in logarithm scale !!!
return (sigma, tau, sigmaT, sigmaArb)
def CB08nga_test(T, CoefTerms):
"""
Test CB nga model
"""
M = 4.0
Vs30 = 748.0,1200.,345.,
Vs30 = 760.
#Vs30 = c(748.0,1200.,345.,160.)
Z25 = None
Ztor = 3
dip = 90
Rjb = np.arange(1,200,5)
Rrup = Rjb
Ftype = 'SS'
rake = 0
Arb = 0
W = 10.
# How to use it
CBnga = CB08_nga()
kwds = {'Ftype':Ftype,'Z25':Z25,'Rrup':Rrup,'W':W,'Ztor':Ztor,'dip':dip,'Arb':Arb,'CoefTerms':CoefTerms}
values = mapfunc( CBnga, M, Rjb, Vs30, T, rake,**kwds )
for i in xrange( len(values) ):
print Rrup[i], values[i]
return CBnga
if __name__ == '__main__':
T = 2.0; NewCoefs = {'c1':1.6,'c2':-0.978}
T = 2.0; NewCoefs = {'c1':1.7,'c2':-0.648}
T = 2.0; NewCoefs = None
CoefTerms = {'terms':(1,1,1,1,1,1),'NewCoefs':NewCoefs}
Ts = [2.0, 3.0, 4.0, 5.0, 7.5, 10.0]
Ts = [0.3]
for T in Ts:
print 'CB SA at %s'%('%3.2f'%T)
CBnga = CB08nga_test(T,CoefTerms)
T = -1.0
print 'CB PGA'
CBnga = CB08nga_test(T,CoefTerms)
| StarcoderdataPython |
3249572 | <reponame>coxmediagroup/nodemeister
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field groups on 'Group'
db.delete_table('enc_group_groups')
# Adding M2M table for field parents on 'Group'
db.create_table(u'enc_group_parents', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_group', models.ForeignKey(orm[u'enc.group'], null=False)),
('to_group', models.ForeignKey(orm[u'enc.group'], null=False))
))
db.create_unique(u'enc_group_parents', ['from_group_id', 'to_group_id'])
def backwards(self, orm):
# Adding M2M table for field groups on 'Group'
db.create_table(u'enc_group_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_group', models.ForeignKey(orm[u'enc.group'], null=False)),
('to_group', models.ForeignKey(orm[u'enc.group'], null=False))
))
db.create_unique(u'enc_group_groups', ['from_group_id', 'to_group_id'])
# Removing M2M table for field parents on 'Group'
db.delete_table('enc_group_parents')
models = {
u'enc.classexclusion': {
'Meta': {'unique_together': "(('node', 'exclusion'),)", 'object_name': 'ClassExclusion'},
'exclusion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'excluded_classes'", 'to': u"orm['enc.Node']"})
},
u'enc.group': {
'Meta': {'object_name': 'Group'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups'", 'blank': 'True', 'to': u"orm['enc.Group']"})
},
u'enc.groupclass': {
'Meta': {'unique_together': "(('group', 'classname'),)", 'object_name': 'GroupClass'},
'classname': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'classparams': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes'", 'to': u"orm['enc.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'enc.groupparameter': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupParameter'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parameters'", 'to': u"orm['enc.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
u'enc.node': {
'Meta': {'object_name': 'Node'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'excluded_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excluded_nodes'", 'blank': 'True', 'to': u"orm['enc.Group']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'nodes'", 'blank': 'True', 'to': u"orm['enc.Group']"}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'enc.nodeclass': {
'Meta': {'unique_together': "(('node', 'classname'),)", 'object_name': 'NodeClass'},
'classname': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'classparams': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes'", 'to': u"orm['enc.Node']"})
},
u'enc.nodeparameter': {
'Meta': {'unique_together': "(('node', 'key'),)", 'object_name': 'NodeParameter'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parameters'", 'to': u"orm['enc.Node']"}),
'value': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
u'enc.paramexclusion': {
'Meta': {'unique_together': "(('node', 'exclusion'),)", 'object_name': 'ParamExclusion'},
'exclusion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'excluded_params'", 'to': u"orm['enc.Node']"})
}
}
complete_apps = ['enc'] | StarcoderdataPython |
1762442 | # third-party imports
import numpy as np
import logging
# OBSERVATION PREPROCESSING ==================================
def obs_preprocessor_tm_act_in_obs(obs):
"""
This takes the output of gym as input
Therefore the output of the memory must be the same as gym
"""
obs = (obs[0], obs[1], obs[2], obs[3], *obs[4:]) # >= 1 action
# logging.debug(f" (not same as old): preprocessed obs:{obs}")
return obs
def obs_preprocessor_tm_lidar_act_in_obs(obs):
"""
This takes the output of gym as input
Therefore the output of the memory must be the same as gym
"""
obs = (obs[0], np.ndarray.flatten(obs[1]), *obs[2:]) # >= 1 action
# logging.debug(f" (not same as old): preprocessed obs:{obs}")
return obs
def obs_preprocessor_cognifly(obs):
"""
This takes the output of gym as input
Therefore the output of the memory must be the same as gym
"""
return obs
# SAMPLE PREPROCESSING =======================================
# these can be called when sampling from the replay memory, on the whole sample after observation preprocesing
# this is useful in particular for data augmentation
# be careful whatever you do here is consistent, because consistency after this will NOT be checked by CRC
def sample_preprocessor_tm_lidar_act_in_obs(last_obs, act, rew, new_obs, done):
return last_obs, act, rew, new_obs, done
| StarcoderdataPython |
111350 | <reponame>antopen/alipay-sdk-python-all<filename>alipay/aop/api/response/AlipayCommerceLogisticsWaybillIstddetailQueryResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceLogisticsWaybillIstddetailQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceLogisticsWaybillIstddetailQueryResponse, self).__init__()
self._reach_duration = None
self._rider_lat = None
self._rider_lng = None
self._rider_mobile_no = None
self._rider_name = None
self._status = None
@property
def reach_duration(self):
return self._reach_duration
@reach_duration.setter
def reach_duration(self, value):
self._reach_duration = value
@property
def rider_lat(self):
return self._rider_lat
@rider_lat.setter
def rider_lat(self, value):
self._rider_lat = value
@property
def rider_lng(self):
return self._rider_lng
@rider_lng.setter
def rider_lng(self, value):
self._rider_lng = value
@property
def rider_mobile_no(self):
return self._rider_mobile_no
@rider_mobile_no.setter
def rider_mobile_no(self, value):
self._rider_mobile_no = value
@property
def rider_name(self):
return self._rider_name
@rider_name.setter
def rider_name(self, value):
self._rider_name = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceLogisticsWaybillIstddetailQueryResponse, self).parse_response_content(response_content)
if 'reach_duration' in response:
self.reach_duration = response['reach_duration']
if 'rider_lat' in response:
self.rider_lat = response['rider_lat']
if 'rider_lng' in response:
self.rider_lng = response['rider_lng']
if 'rider_mobile_no' in response:
self.rider_mobile_no = response['rider_mobile_no']
if 'rider_name' in response:
self.rider_name = response['rider_name']
if 'status' in response:
self.status = response['status']
| StarcoderdataPython |
4811091 | <reponame>reberhardt7/sofa
import logging
log = logging.getLogger(__name__)
# TODO: ResourceCreated, ResourceUpdated, and ResourceDeleted can be combined
# into one class: ResourceResponse
class ResourceCreated(object):
"""
Usage: return ResourceCreated()
"""
def __init__(self, resource_id, message='Resource created.', data="null"):
self.status_code = 201
self.resource_id = resource_id
self.message = message
self.data = data
def __json__(self, request):
request.response.status_int = self.status_code
request.response.status = "201 Created"
return {'statusCode': self.status_code,
'resourceID': self.resource_id,
'message': self.message}
class ResourceUpdated(object):
"""
Usage: return ResourceUpdated()
"""
def __init__(self, message='Resource updated.'):
self.status_code = 200
self.message = message
def __json__(self, request):
request.response.status_int = 200
request.response.status = "200 OK"
return {'statusCode': 200,
'errorID': 'resource_updated',
'message': self.message}
class ResourceDeleted(object):
"""
Usage: return ResourceDeleted()
"""
def __init__(self, message='Resource deleted.'):
self.status_code = 200
self.message = message
def __json__(self, request):
request.response.status_int = 200
request.response.status = "200 OK"
return {'statusCode': 200,
'errorID': 'resource_deleted',
'message': self.message}
class ResourceException(Exception):
"""
Usage: raise ResourceException(status_code, 'error_id', 'message') in views,
return ResourceException(status_code, 'error_id', 'message') in models
"""
def __init__(self, status_code, error_id, message):
log.debug("ResourceException({}, {}): {}".format(status_code, error_id, message))
if status_code not in (304, 400, 401, 403, 404, 422, 500):
raise ValueError("%s is not a valid status code" % status_code)
self.status_code = status_code
self.error_id = error_id
self.message = message
| StarcoderdataPython |
11236925 | import urllib
def get_sentence_from_source(source):
# The next two lines are absolutely the most horrible lines I've written in five years!
direct = [{**t, "direct": True} for t in [{k: sentence[k] for k in ('text', 'id', 'lang')} for sentence in source[1]]]
indirect = [{**t, "direct": False} for t in [{k: sentence[k] for k in ('text', 'id', 'lang')} for sentence in source[2]]]
return {
"translations": direct + indirect,
"text": source[0]['text'],
"lang": source[0]['lang'],
"id": source[0]['id'],
}
def build_search_url_from_request_data(request_data):
searchText = request_data.get("text")
if searchText is None:
return None
params = {
"from": request_data.get("from", "und"),
"to": request_data.get("to", "und"),
"page": request_data.get("page", "1"),
"user": request_data.get("user", ""),
"orphans": request_data.get("orphans", "no"),
"unapproved": request_data.get("unapproved", "no"),
"has_audio": request_data.get("has_audio", ""),
"tags": request_data.get("tags", ""),
"list": request_data.get("list", ""),
"native": request_data.get("native", ""),
"trans_filter": request_data.get("trans_filter", "limit"),
"trans_to": request_data.get("trans_to") or request_data.get("to", "und"),
"trans_link": request_data.get("trans_link", ""),
"trans_user": request_data.get("trans_user", ""),
"trans_orphan": request_data.get("trans_orphan", ""),
"trans_unapproved": request_data.get("trans_unapproved", ""),
"trans_has_audio": request_data.get("trans_has_audio", ""),
"sort": request_data.get("sort", "relevance"),
"sort_reverse": request_data.get("sort_reverse", ""),
}
fragments = ["&{}={}".format(k, urllib.parse.quote(params[k])) for k in params]
url = "https://www.tatoeba.org/eng/sentences/search?query=" + urllib.parse.quote(searchText)
for f in fragments:
url += f
return url
| StarcoderdataPython |
219844 | # read numbers
numbers = []
with open("nums.txt", "r") as f:
lines = f.readlines()
numbers = [int(i) for i in lines]
print(numbers)
# part 1 soln
for x,el in enumerate(numbers):
for i in range(x+1, len(numbers)):
if el + numbers[i] == 2020:
print("YAY: {} {}".format(el, numbers[i]))
# part 2 soln
for x,el in enumerate(numbers):
for i in range(x+1, len(numbers)):
for j in range(i+1, len(numbers)):
if el + numbers[i] + numbers[j] == 2020:
print("YAY: {} {} {}".format(el, numbers[i], numbers[j]))
| StarcoderdataPython |
8033908 | <filename>olea/core/blueprints/pit/query.py
from flask import g
from models import Pit, Role
from core.auth import check_opt_duck, check_scopes
from core.base import single_query
from core.errors import AccessDenied
class PitQuery():
@staticmethod
def single(id_):
return single_query(model=Pit,
id_or_obj=id_,
condiction=lambda obj: obj.pink_id == g.pink_id or obj.status in
{Pit.S.working, Pit.S.past_due, Pit.S.delayed, Pit.S.auditing})
@classmethod
def checks(cls, deps):
if not check_scopes(deps):
raise AccessDenied(cls_=Pit)
pits = Pit.query.join(Role). \
filter(Pit.status == Pit.S.auditing). \
filter(Role.dep.in_(deps)).all()
return pits
@classmethod
def in_dep(cls, dep, status):
if not check_scopes(dep):
raise AccessDenied(cls_=Pit)
pits = Pit.query.join(Role). \
filter(Role.dep == dep). \
filter(Pit.status.in_(status)).all()
return pits
@classmethod
def search(cls, deps, status_set, pink_id=''):
if (not pink_id or pink_id != g.pink_id) and not check_opt_duck(scopes=deps):
raise AccessDenied(cls_=Pit)
query = Pit.query.join(Role)
if deps:
query = query.filter(Role.dep.in_(deps))
if status_set:
query = query.filter(Pit.status.in_(status_set))
if pink_id:
query = query.filter(Pit.pink_id == pink_id)
return query.all()
| StarcoderdataPython |
173673 | <reponame>dizcza/entropy-estimators<gh_stars>1-10
from .NPEET.npeet.entropy_estimators import mi as npeet_mi
from .NPEET.npeet.entropy_estimators import entropy as npeet_entropy
from .NPEET.npeet.entropy_estimators import entropyd as discrete_entropy
from .NPEET.npeet.entropy_estimators import midd as discrete_mi
from .gcmi.python.gcmi import gcmi_cc as gcmi_mi
from .gcmi.python.gcmi import ent_g as gcmi_entropy
from .mine import mine_mi
from ._micd import micd
| StarcoderdataPython |
11251134 | <gh_stars>0
from flask import Blueprint, render_template, request, redirect, url_for, Response
from app import mongo
from bson import ObjectId
import json
mod_main = Blueprint('main', __name__)
@mod_main.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
name = ["DEA", "DEA1"]
reports = mongo.db.reports.find()
return render_template('mod_main/index.html', reports=reports);
@mod_main.route('/add_audit_form', methods=['GET', 'POST'])
def add_audit_form():
form = AddAuditForm()
if request.method == 'GET':
audits = mongo.db.audits.find() #mongo.db.audits.find()
return render_template('mod_main/add_audit_form.html', audits=audits, form=form)
elif request.method == 'POST':
print request.form
mongo.db.reports.insert({
"audit_title": request.form['audit_title'],
"audit_ref_num": request.form['audit_ref_num'],
"audit_date": request.form['audit_date']
#"name": request.form['name'],
#"last_name": request.form['last_name']
#audit_title eshte emri i inputit audit_title te request_form duhet me kan audit_title
})
return redirect(url_for('main.audit_list'))
@mod_main.route('/remove/audit', methods=['POST'])
def remove_audit():
if request.method == 'POST':
audit_id = request.form['id']
mongo.db.reports.remove({"_id":ObjectId(report_id)})
return Response(json.dumps({"removed": True}), mimetype='application/json')
@mod_main.route('/remove/<string:report_id>', methods=['GET'])
def remove(report_id):
if request.method == 'GET':
mongo.db.reports.remove({"_id":ObjectId(report_id)})
return Response(json.dumps({"removed": True}), mimetype='application/json')
@mod_main.route('/add-people', methods=['GET', 'POST'])
def add_people():
#TODO: Implement POST REQUEST
# if fail:
# build logic
# if success:
# build logic
return "JSON RESULT" | StarcoderdataPython |
215927 | import os
import json
from io import BytesIO
from localstack.utils import testutil
from localstack.utils.common import *
from localstack.utils.aws import aws_stack
from localstack.services.awslambda import lambda_api
from localstack.services.awslambda.lambda_api import (LAMBDA_RUNTIME_NODEJS,
LAMBDA_RUNTIME_PYTHON27, LAMBDA_RUNTIME_JAVA8, use_docker)
THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_LAMBDA_PYTHON = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.py')
TEST_LAMBDA_NODEJS = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.js')
TEST_LAMBDA_JAVA = os.path.join(LOCALSTACK_ROOT_FOLDER, 'localstack', 'ext', 'java', 'target',
'localstack-utils-tests.jar')
TEST_LAMBDA_NAME_PY = 'test_lambda_py'
TEST_LAMBDA_NAME_JS = 'test_lambda_js'
TEST_LAMBDA_NAME_JAVA = 'test_lambda_java'
TEST_LAMBDA_JAR_URL = ('https://repo.maven.apache.org/maven2/cloud/localstack/' +
'localstack-utils/0.1.1/localstack-utils-0.1.1-tests.jar')
TEST_LAMBDA_LIBS = ['localstack', 'requests', 'psutil']
def test_upload_lambda_from_s3():
s3_client = aws_stack.connect_to_service('s3')
lambda_client = aws_stack.connect_to_service('lambda')
lambda_name = 'test_lambda_%s' % short_uid()
bucket_name = 'test_bucket_lambda'
bucket_key = 'test_lambda.zip'
# upload zip file to S3
zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
s3_client.create_bucket(Bucket=bucket_name)
s3_client.upload_fileobj(BytesIO(zip_file), bucket_name, bucket_key)
# create lambda function
lambda_client.create_function(
FunctionName=lambda_name, Handler='handler.handler',
Runtime=lambda_api.LAMBDA_RUNTIME_PYTHON27, Role='r1',
Code={
'S3Bucket': bucket_name,
'S3Key': bucket_key
}
)
# invoke lambda function
data_before = b'{"foo": "bar"}'
result = lambda_client.invoke(FunctionName=lambda_name, Payload=data_before)
data_after = result['Payload'].read()
assert json.loads(to_str(data_before)) == json.loads(to_str(data_after))
def test_lambda_runtimes():
lambda_client = aws_stack.connect_to_service('lambda')
# deploy and invoke lambda - Python
zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True,
libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27)
response = testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_PY,
zip_file=zip_file, runtime=LAMBDA_RUNTIME_PYTHON27)
result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_PY, Payload=b'{}')
assert result['StatusCode'] == 200
result_data = result['Payload'].read()
assert to_str(result_data).strip() == '{}'
# deploy and invoke lambda - Java
if not os.path.exists(TEST_LAMBDA_JAVA):
mkdir(os.path.dirname(TEST_LAMBDA_JAVA))
download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA)
zip_file = testutil.create_zip_file(TEST_LAMBDA_JAVA, get_content=True)
response = testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_JAVA, zip_file=zip_file,
runtime=LAMBDA_RUNTIME_JAVA8, handler='cloud.localstack.sample.LambdaHandler')
result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA, Payload=b'{}')
assert result['StatusCode'] == 200
result_data = result['Payload'].read()
assert to_str(result_data).strip() == '{}'
if use_docker():
# deploy and invoke lambda - Node.js
zip_file = testutil.create_zip_file(TEST_LAMBDA_NODEJS, get_content=True)
testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_JS,
zip_file=zip_file, handler='lambda_integration.handler', runtime=LAMBDA_RUNTIME_NODEJS)
result = lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JS, Payload=b'{}')
assert result['StatusCode'] == 200
result_data = result['Payload'].read()
assert to_str(result_data).strip() == '{}'
| StarcoderdataPython |
3476373 | <gh_stars>0
# Copyright (c) 2021 <NAME>. All Rights Reserved.
"""Emmental parsing args unit tests."""
import logging
import shutil
from emmental import Meta, init
from emmental.utils.parse_args import parse_args, parse_args_to_config
logger = logging.getLogger(__name__)
def test_parse_args(caplog):
"""Unit test of parsing args."""
caplog.set_level(logging.INFO)
parser = parse_args()
args = parser.parse_args(
[
"--seed",
"0",
"--checkpoint_all",
"True",
"--reset_state",
"True",
"--gradient_accumulation_steps",
"3",
]
)
assert args.seed == 0
config = parse_args_to_config(args)
assert config == {
"meta_config": {
"seed": 0,
"verbose": True,
"log_path": "logs",
"use_exact_log_path": False,
},
"data_config": {"min_data_len": 0, "max_data_len": 0},
"model_config": {
"model_path": None,
"device": 0,
"dataparallel": True,
"distributed_backend": "nccl",
},
"learner_config": {
"optimizer_path": None,
"scheduler_path": None,
"fp16": False,
"fp16_opt_level": "O1",
"local_rank": -1,
"epochs_learned": 0,
"n_epochs": 1,
"steps_learned": 0,
"n_steps": None,
"train_split": ["train"],
"valid_split": ["valid"],
"test_split": ["test"],
"ignore_index": None,
"online_eval": False,
"optimizer_config": {
"optimizer": "adam",
"lr": 0.001,
"l2": 0.0,
"grad_clip": None,
"gradient_accumulation_steps": 3,
"asgd_config": {"lambd": 0.0001, "alpha": 0.75, "t0": 1000000.0},
"adadelta_config": {"rho": 0.9, "eps": 1e-06},
"adagrad_config": {
"lr_decay": 0,
"initial_accumulator_value": 0,
"eps": 1e-10,
},
"adam_config": {"betas": (0.9, 0.999), "amsgrad": False, "eps": 1e-08},
"adamw_config": {"betas": (0.9, 0.999), "amsgrad": False, "eps": 1e-08},
"adamax_config": {"betas": (0.9, 0.999), "eps": 1e-08},
"lbfgs_config": {
"max_iter": 20,
"max_eval": None,
"tolerance_grad": 1e-07,
"tolerance_change": 1e-09,
"history_size": 100,
"line_search_fn": None,
},
"rms_prop_config": {
"alpha": 0.99,
"eps": 1e-08,
"momentum": 0,
"centered": False,
},
"r_prop_config": {"etas": (0.5, 1.2), "step_sizes": (1e-06, 50)},
"sgd_config": {"momentum": 0, "dampening": 0, "nesterov": False},
"sparse_adam_config": {"betas": (0.9, 0.999), "eps": 1e-08},
"bert_adam_config": {"betas": (0.9, 0.999), "eps": 1e-08},
},
"lr_scheduler_config": {
"lr_scheduler": None,
"lr_scheduler_step_unit": "batch",
"lr_scheduler_step_freq": 1,
"warmup_steps": None,
"warmup_unit": "batch",
"warmup_percentage": None,
"min_lr": 0.0,
"reset_state": True,
"exponential_config": {"gamma": 0.9},
"plateau_config": {
"metric": "model/train/all/loss",
"mode": "min",
"factor": 0.1,
"patience": 10,
"threshold": 0.0001,
"threshold_mode": "rel",
"cooldown": 0,
"eps": 1e-08,
},
"step_config": {"step_size": 1, "gamma": 0.1, "last_epoch": -1},
"multi_step_config": {
"milestones": [1000],
"gamma": 0.1,
"last_epoch": -1,
},
"cyclic_config": {
"base_lr": 0.001,
"base_momentum": 0.8,
"cycle_momentum": True,
"gamma": 1.0,
"last_epoch": -1,
"max_lr": 0.1,
"max_momentum": 0.9,
"mode": "triangular",
"scale_fn": None,
"scale_mode": "cycle",
"step_size_down": None,
"step_size_up": 2000,
},
"one_cycle_config": {
"anneal_strategy": "cos",
"base_momentum": 0.85,
"cycle_momentum": True,
"div_factor": 25,
"final_div_factor": 10000.0,
"last_epoch": -1,
"max_lr": 0.1,
"max_momentum": 0.95,
"pct_start": 0.3,
},
"cosine_annealing_config": {"last_epoch": -1},
},
"task_scheduler_config": {
"task_scheduler": "round_robin",
"sequential_scheduler_config": {"fillup": False},
"round_robin_scheduler_config": {"fillup": False},
"mixed_scheduler_config": {"fillup": False},
},
},
"logging_config": {
"counter_unit": "epoch",
"evaluation_freq": 1,
"writer_config": {"writer": "tensorboard", "verbose": True},
"checkpointing": False,
"checkpointer_config": {
"checkpoint_path": None,
"checkpoint_freq": 1,
"checkpoint_metric": {"model/train/all/loss": "min"},
"checkpoint_task_metrics": None,
"checkpoint_runway": 0,
"checkpoint_all": True,
"clear_intermediate_checkpoints": True,
"clear_all_checkpoints": False,
},
},
}
# Test default and default args are the same
dirpath = "temp_parse_args"
Meta.reset()
init(dirpath)
parser = parse_args()
args = parser.parse_args([])
config1 = parse_args_to_config(args)
config2 = Meta.config
del config2["learner_config"]["global_evaluation_metric_dict"]
del config2["learner_config"]["optimizer_config"]["parameters"]
assert config1 == config2
shutil.rmtree(dirpath)
def test_checkpoint_metric(caplog):
"""Unit test of parsing checkpoint metric."""
caplog.set_level(logging.INFO)
# Test different checkpoint_metric
dirpath = "temp_parse_args"
Meta.reset()
init(
log_dir=dirpath,
config={
"logging_config": {
"checkpointer_config": {
"checkpoint_metric": {"model/valid/all/accuracy": "max"}
}
}
},
)
assert Meta.config == {
"meta_config": {
"seed": None,
"verbose": True,
"log_path": "logs",
"use_exact_log_path": False,
},
"data_config": {"min_data_len": 0, "max_data_len": 0},
"model_config": {
"model_path": None,
"device": 0,
"dataparallel": True,
"distributed_backend": "nccl",
},
"learner_config": {
"optimizer_path": None,
"scheduler_path": None,
"fp16": False,
"fp16_opt_level": "O1",
"local_rank": -1,
"epochs_learned": 0,
"n_epochs": 1,
"steps_learned": 0,
"n_steps": None,
"train_split": ["train"],
"valid_split": ["valid"],
"test_split": ["test"],
"ignore_index": None,
"online_eval": False,
"global_evaluation_metric_dict": None,
"optimizer_config": {
"optimizer": "adam",
"parameters": None,
"lr": 0.001,
"l2": 0.0,
"grad_clip": None,
"gradient_accumulation_steps": 1,
"asgd_config": {"lambd": 0.0001, "alpha": 0.75, "t0": 1000000.0},
"adadelta_config": {"rho": 0.9, "eps": 1e-06},
"adagrad_config": {
"lr_decay": 0,
"initial_accumulator_value": 0,
"eps": 1e-10,
},
"adam_config": {"betas": (0.9, 0.999), "amsgrad": False, "eps": 1e-08},
"adamw_config": {"betas": (0.9, 0.999), "amsgrad": False, "eps": 1e-08},
"adamax_config": {"betas": (0.9, 0.999), "eps": 1e-08},
"lbfgs_config": {
"max_iter": 20,
"max_eval": None,
"tolerance_grad": 1e-07,
"tolerance_change": 1e-09,
"history_size": 100,
"line_search_fn": None,
},
"rms_prop_config": {
"alpha": 0.99,
"eps": 1e-08,
"momentum": 0,
"centered": False,
},
"r_prop_config": {"etas": (0.5, 1.2), "step_sizes": (1e-06, 50)},
"sgd_config": {"momentum": 0, "dampening": 0, "nesterov": False},
"sparse_adam_config": {"betas": (0.9, 0.999), "eps": 1e-08},
"bert_adam_config": {"betas": (0.9, 0.999), "eps": 1e-08},
},
"lr_scheduler_config": {
"lr_scheduler": None,
"lr_scheduler_step_unit": "batch",
"lr_scheduler_step_freq": 1,
"warmup_steps": None,
"warmup_unit": "batch",
"warmup_percentage": None,
"min_lr": 0.0,
"reset_state": False,
"exponential_config": {"gamma": 0.9},
"plateau_config": {
"metric": "model/train/all/loss",
"mode": "min",
"factor": 0.1,
"patience": 10,
"threshold": 0.0001,
"threshold_mode": "rel",
"cooldown": 0,
"eps": 1e-08,
},
"step_config": {"step_size": 1, "gamma": 0.1, "last_epoch": -1},
"multi_step_config": {
"milestones": [1000],
"gamma": 0.1,
"last_epoch": -1,
},
"cyclic_config": {
"base_lr": 0.001,
"base_momentum": 0.8,
"cycle_momentum": True,
"gamma": 1.0,
"last_epoch": -1,
"max_lr": 0.1,
"max_momentum": 0.9,
"mode": "triangular",
"scale_fn": None,
"scale_mode": "cycle",
"step_size_down": None,
"step_size_up": 2000,
},
"one_cycle_config": {
"anneal_strategy": "cos",
"base_momentum": 0.85,
"cycle_momentum": True,
"div_factor": 25.0,
"final_div_factor": 10000.0,
"last_epoch": -1,
"max_lr": 0.1,
"max_momentum": 0.95,
"pct_start": 0.3,
},
"cosine_annealing_config": {"last_epoch": -1},
},
"task_scheduler_config": {
"task_scheduler": "round_robin",
"sequential_scheduler_config": {"fillup": False},
"round_robin_scheduler_config": {"fillup": False},
"mixed_scheduler_config": {"fillup": False},
},
},
"logging_config": {
"counter_unit": "epoch",
"evaluation_freq": 1,
"writer_config": {"writer": "tensorboard", "verbose": True},
"checkpointing": False,
"checkpointer_config": {
"checkpoint_path": None,
"checkpoint_freq": 1,
"checkpoint_metric": {"model/valid/all/accuracy": "max"},
"checkpoint_task_metrics": None,
"checkpoint_runway": 0,
"checkpoint_all": False,
"clear_intermediate_checkpoints": True,
"clear_all_checkpoints": False,
},
},
}
shutil.rmtree(dirpath)
| StarcoderdataPython |
381457 | import numpy as np
import pandas as pd
import nltk
nltk.download('punkt') # one time execution
import re
df = pd.read_csv("tennis_articles_v4.csv")
from nltk.tokenize import sent_tokenize
sentences = []
for s in df['article_text']:
sentences.append(sent_tokenize(s))
sentences = [y for x in sentences for y in x] # flatten list
# Extract word vectors
word_embeddings = {}
f = open('glove.6B.100d.txt', encoding='utf-8') #Download and extract in the same directory from
#here http://nlp.stanford.edu/data/glove.6B.zip
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
word_embeddings[word] = coefs
f.close()
# remove punctuations, numbers and special characters
clean_sentences = pd.Series(sentences).str.replace("[^a-zA-Z]", " ")
# make alphabets lowercase
clean_sentences = [s.lower() for s in clean_sentences]
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
# function to remove stopwords
def remove_stopwords(sen):
sen_new = " ".join([i for i in sen if i not in stop_words])
return sen_new
# remove stopwords from the sentences
clean_sentences = [remove_stopwords(r.split()) for r in clean_sentences]
# Extract word vectors
word_embeddings = {}
f = open('glove.6B.100d.txt', encoding='utf-8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
word_embeddings[word] = coefs
f.close()
sentence_vectors = []
for i in clean_sentences:
if len(i) != 0:
v = sum([word_embeddings.get(w, np.zeros((100,))) for w in i.split()])/(len(i.split())+0.001)
else:
v = np.zeros((100,))
sentence_vectors.append(v)
# similarity matrix
sim_mat = np.zeros([len(sentences), len(sentences)])
from sklearn.metrics.pairwise import cosine_similarity
for i in range(len(sentences)):
for j in range(len(sentences)):
if i != j:
sim_mat[i][j] = cosine_similarity(sentence_vectors[i].reshape(1,100), sentence_vectors[j].reshape(1,100))[0,0]
import networkx as nx
nx_graph = nx.from_numpy_array(sim_mat)
scores = nx.pagerank(nx_graph)
ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
# Extract top 10 sentences as the summary
for i in range(10):
print(ranked_sentences[i][1])
| StarcoderdataPython |
9641410 | <reponame>amihaita/GeekTraine
#Declare and initialize the variables
monthlyPayment = 0
loanAmount = 0
interestRate = 0
numberOfPayments = 0
loanDurationInYears = 0
#Ask the user for the values needed to calculate the monthly payments
strLoanAmount = input("How much money will you borrow? ")
strInterestRate = input("What is the interest rate on the loan? ")
strLoanDurationInYears = input("How many years will it take you to pay off the loan? " )
#Convert the strings into floating numbers so we can use them in teh formula
loanDurationInYears = float(strLoanDurationInYears)
loanAmount = float(strLoanAmount)
interestRate = float(strInterestRate)
#Since payments are once per month, number of payments is number of years for the loan * 12
numberOfPayments = loanDurationInYears*12
#Calculate the monthly payment based on the formula
monthlyPayment = loanAmount * interestRate * (1+ interestRate) * numberOfPayments \
/ ((1 + interestRate) * numberOfPayments -1)
#provide the result to the user
print("Your monthly payment will be " + str(monthlyPayment))
#Extra credit
print("Your monthly payment will be $%.2f" % monthlyPayment)
| StarcoderdataPython |
4882944 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 01:50:30 2018
@author: msi-pc
"""
import requests
import json
url="https://no13-asrbbe6isgzxtim.search.windows.net/indexes/c5a268aa-e90c-41f0-8a37-967f765b3623/docs?api-version=2017-11-11&search=bottle"
response=requests.get(url)
print(response) | StarcoderdataPython |
3468866 | import logging
import os
import pickle
import threading
from collections import defaultdict
from copy import deepcopy
import yaml
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.error import Unauthorized, ChatMigrated
from telegram.ext import Updater, CommandHandler, PicklePersistence, CallbackQueryHandler, MessageHandler, Filters
from QQuizGame import schedule
from QQuizGame.QuizKernel import QuizKernel
from QQuizGame.ReadWrite import ReadWrite
from QQuizGame.Types import AnswerCorrectness
from QQuizGame.logging_setup import setup_logger
class GameConfig:
def __init__(self, config):
with open(config, 'r') as handle:
config = yaml.load(handle, Loader=yaml.BaseLoader)
self.games_db_path = config['games_db_path']
self.default_game = config['default_game']
self.logger_path = config['logger_path']
self.token = config['token'] # TODO: add encryption
self.user_db_path = config['user_db_path']
self.no_spoilers_default = bool(int(config['no_spoilers_default']))
self.admin_id = int(config['admin_id'])
if 'game_of_the_day' in config:
self.game_of_the_day = config['game_of_the_day']
self.game_of_the_day_time = config['game_of_the_day_time']
self.game_of_the_day_db_path = config['game_of_the_day_db_path']
else:
self.game_of_the_day = None
self.game_of_the_day_time = "12:00"
self.game_of_the_day_db_path = ''
class Game:
__name__ = "Game"
__version__ = 0.4
def __init__(self, config_path: str):
self.config = GameConfig(config_path)
puzzles_db = PicklePersistence(filename=self.config.user_db_path)
self.updater = Updater(self.config.token, use_context=True, persistence=puzzles_db)
self.init_dispatcher(self.updater.dispatcher)
self.logger = setup_logger(__name__,
self.config.logger_path,
logging.INFO)
self.game_of_day = None
if self.config.game_of_the_day:
path_dir = os.path.join(self.config.games_db_path, self.config.game_of_the_day, 'master')
last_lev, message_buff = 0, []
if os.path.exists(self.config.game_of_the_day_db_path):
last_lev, message_buff = pickle.load(open(self.config.game_of_the_day_db_path, 'rb'))
self.game_of_day = QuizKernel(path_dir, last_lev)
self.__schedule_gotd()
self.gotd_prev_message = message_buff
self.input_event = self.__send_all_from_input()
self.admin_text = ''
def start_polling(self, demon=False):
self.updater.start_polling()
if not demon:
self.updater.idle()
def stop_polling(self):
if hasattr(self, 'shed_event'):
self.shed_event.set()
if self.config.game_of_the_day:
pickle.dump([self.game_of_day.last_question_num, self.gotd_prev_message],
open(self.config.game_of_the_day_db_path, 'wb'))
self.input_event.set()
self.updater.stop()
def __get_chat_meta(self, update, context):
if update.effective_message.chat.type == 'private':
metadata = context.user_data
else:
metadata = context.chat_data
if metadata:
if 'game_type' not in metadata.keys():
metadata['game_type'] = self.config.default_game
if 'quiz' not in metadata.keys():
metadata['quiz'] = {}
path_dir = os.path.join(self.config.games_db_path, metadata['game_type'], 'master')
metadata['quiz'][metadata['game_type']] = QuizKernel(path_dir,
context.bot,
update.effective_message.chat_id)
if 'no_spoiler' not in metadata.keys():
metadata['no_spoiler'] = self.config.no_spoilers_default
if 'message_stack' not in metadata.keys():
metadata['message_stack'] = []
if 'game_of_day' not in metadata.keys():
metadata['game_of_day'] = True
if 'answer_from_text' not in metadata.keys():
metadata['answer_from_text'] = True
if 'version' not in metadata.keys():
metadata['version'] = self.__version__
old_data = self.__get_game_meta(metadata['quiz'][metadata['game_type']])
metadata['quiz'][metadata['game_type']] = QuizKernel(*old_data)
if metadata['version'] != self.__version__:
metadata['version'] = self.__version__
old_data = self.__get_game_meta(metadata['quiz'][metadata['game_type']])
metadata['quiz'][metadata['game_type']] = QuizKernel(*old_data)
return metadata
@staticmethod
def __check_meta(metadata, update):
if not metadata:
update.effective_message.reply_text("Видимо что-то сломалось. Введите /start, чтобы начать")
return metadata
def __get_game_meta(self, game_metadata):
try:
old_data = game_metadata.serialize_to_db()
except:
old_data = game_metadata.working_dir, game_metadata.last_question_num
return old_data
def __start(self, update, context):
metadata = self.__get_chat_meta(update, context)
chat_id = update.effective_message.chat_id
if not metadata:
metadata['game_type'] = self.config.default_game
metadata['quiz'] = defaultdict(QuizKernel)
path_dir = os.path.join(self.config.games_db_path, metadata['game_type'], 'master')
metadata['quiz'][metadata['game_type']] = QuizKernel(path_dir, last_question=0)
metadata['quiz_data'] = (path_dir, 0)
metadata['no_spoiler'] = self.config.no_spoilers_default \
if update.effective_message.chat.type != 'private' else False
metadata['message_stack'] = []
metadata['game_of_day'] = True
metadata['answer_from_text'] = True
metadata['version'] = self.__version__
reply_text = (" Привет! Добро пожаловать в игру!\n"
"\n"
'/answer [ans] - Дать ответ на вопрос (/+tab ответ)\n'
'/hint - Вызвать подсказку\n'
'/repeat - Повторить последний вопрос\n'
'/getanswer - Получить ответ\n'
'/setlevel - Выбрать уровень\n'
'/settings - Настройки игры (режим и no spoilers)\n'
'/start - Начать игру\n'
'/help - Вызвать подробную инструкцию\n'
'/credits - Авторам\n'
'/reset - Сброс прогресса игры \n'
"\n"
" Удачи!\n")
metadata['message_stack'].append(
context.bot.sendMessage(chat_id=chat_id, text=reply_text))
self.__set_game(update, context)
self.logger.info('New user added %s', update.effective_user)
else:
# этот странный трюк нужен в случае, если мы что-то обновили в игровом движке
old_data = self.__get_game_meta(metadata['quiz'][metadata['game_type']])
metadata['quiz'][metadata['game_type']] = QuizKernel(*old_data)
question, path = metadata['quiz'][metadata['game_type']].get_new_question()
metadata['message_stack'] += ReadWrite.send(question, context.bot, chat_id, path)
def __question(self, update, context):
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
chat_id = update.effective_message.chat_id
metadata['message_stack'].append(update.effective_message)
question, path = metadata['quiz'][metadata['game_type']].get_new_question()
metadata['message_stack'] += ReadWrite.send(question, context.bot, chat_id, path)
def __hint(self, update, context):
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
chat_id = update.effective_message.chat_id
help_reply = metadata['quiz'][metadata['game_type']].get_hint()
metadata['message_stack'].append(update.effective_message)
metadata['message_stack'].append(context.bot.sendMessage(chat_id=chat_id, text=help_reply))
def __answer(self, update, context):
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
chat_id = update.effective_message.chat_id
metadata['message_stack'].append(update.effective_message)
if update.effective_message.text.startswith('/'):
answer = ' '.join(context.args).lower()
else:
answer = update.effective_message.text
if not metadata['answer_from_text']:
return
if not answer:
metadata['message_stack'].append(
update.effective_message.reply_text(text="Укажи ответ аргументом после команды /answer, например: "
"/answer 1984.\nЛайфхак: чтобы каждый раз не печатать слово "
"answer, можно воспользоваться комбинацией /+tab ответ"))
return
self.logger.info('User %s answered %s in game %s on question %s',
update.effective_user,
answer,
metadata['game_type'],
metadata['quiz'][metadata['game_type']].last_question_num
)
correctness = metadata['quiz'][metadata['game_type']].check_answer(answer)
if correctness == AnswerCorrectness.CORRECT:
self.logger.info('User %s solved puzzle %s from %s',
update.effective_user,
metadata['quiz'][metadata['game_type']].last_question_num, metadata['game_type'])
if metadata['no_spoiler']:
for msg in metadata['message_stack']:
try:
context.bot.deleteMessage(msg.chat_id, msg.message_id)
except:
self.logger.warning('No message "%s"', msg)
metadata['message_stack'].clear()
metadata['quiz'][metadata['game_type']].next()
question, path = metadata['quiz'][metadata['game_type']].get_new_question()
metadata['message_stack'] += ReadWrite.send(question, context.bot, chat_id, path)
elif type(correctness) == str:
metadata['message_stack'].append(
update.effective_message.reply_text(text=correctness))
# context.bot.sendMessage(chat_id=chat_id, text=correctness))
else:
self.logger.warning('Wrong answer type "%s"', correctness)
def __get_answer(self, update, context):
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
chat_id = update.effective_message.chat_id
context.bot.sendMessage(text=metadata['quiz'][metadata['game_type']].get_answer(), chat_id=chat_id)
def __error(self, update, context):
"""Log Errors caused by Updates."""
self.logger.warning('Update "%s" caused error "%s"', update, context.error)
def __reset(self, update, context):
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
update.effective_message.reply_text(self.__reset_text(),
reply_markup=self.__reset_markup())
@staticmethod
def __reset_text():
return "Точно? Все сохранения в игре удалятся."
@staticmethod
def __reset_markup():
keyboard = [[InlineKeyboardButton("Точно", callback_data='reset-1'),
InlineKeyboardButton("Нет", callback_data='reset-0')]]
reply_markup = InlineKeyboardMarkup(keyboard)
return reply_markup
def __reset_button(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
chat_id = update.effective_message.chat_id
if not metadata:
return
button = bool(int(query.data.split('-')[-1]))
if bool(button):
update.effective_message.delete()
metadata['quiz'][metadata['game_type']].reset()
question, path = metadata['quiz'][metadata['game_type']].get_new_question()
metadata['message_stack'] += ReadWrite.send(question, context.bot, chat_id, path)
self.logger.info('User %s reset %s',
update.effective_user,
metadata['game_type'])
else:
update.effective_message.delete()
def __set_game(self, update, context):
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
chat_id = update.effective_message.chat_id
if not metadata:
return
reply_markup = ReadWrite.parse_game_folders_markup(self.config.games_db_path)
context.bot.sendMessage(text=self.__settings_game_text(metadata['game_type'], False),
chat_id=chat_id,
reply_markup=reply_markup)
def __settings(self, update, context):
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
update.effective_message.reply_text(self.__settings_main_text(),
reply_markup=self.__settings_main_markup())
def __settings_main(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
query.edit_message_text(text=self.__settings_main_text(),
reply_markup=self.__settings_main_markup())
@staticmethod
def __settings_main_text():
return 'Выбери нужную настройку'
@staticmethod
def __settings_main_markup():
keyboard = [[InlineKeyboardButton("Игры", callback_data='m1-game_type'),
InlineKeyboardButton("No spoilers", callback_data='m2-no_spoiler_mode')],
[InlineKeyboardButton("Загадка дня", callback_data='m3-gotd'),
InlineKeyboardButton("Быстрый ответ", callback_data='m4-afm')],
[InlineKeyboardButton("Done", callback_data='done')]
]
reply_markup = InlineKeyboardMarkup(keyboard)
return reply_markup
# Game mode settings
def __settings_game(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
reply_markup = ReadWrite.parse_game_folders_markup(self.config.games_db_path)
query.edit_message_text(text=self.__settings_game_text(metadata['game_type']),
reply_markup=reply_markup)
@staticmethod
def __settings_game_text(status, with_current=True):
if with_current:
return "Доступные игры " + " (сейчас " + str(status) + ")"
else:
return "Доступные игры"
def __settings_game_button(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
chat_id = update.effective_message.chat_id
if not metadata:
return
button = query.data.split('-')[-1]
if button in metadata['quiz'].keys():
metadata['game_type'] = button
old_data = self.__get_game_meta(metadata['quiz'][metadata['game_type']])
metadata['quiz'][metadata['game_type']] = QuizKernel(*old_data)
else:
metadata['game_type'] = button
path_dir = os.path.join(self.config.games_db_path, metadata['game_type'], 'master')
metadata['quiz'][metadata['game_type']] = QuizKernel(path_dir,
0,
context.bot,
update.effective_message.chat_id)
self.logger.info('User %s set new game type %s',
update.effective_user,
metadata['game_type'])
question, path = metadata['quiz'][metadata['game_type']].get_new_question()
metadata['message_stack'] += ReadWrite.send(question, context.bot, chat_id, path)
query.answer(text='Теперь играем в ' + button)
update.effective_message.delete()
# Disappearing mode settings
def __settings_spoiler(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
query.edit_message_text(text=self.__settings_spoiler_text(metadata['no_spoiler']),
reply_markup=self.__settings_spoiler_markup())
@staticmethod
def __settings_spoiler_text(status):
return "При включенном режиме no spoilers будут удаляться все старые вопросы и ответы, но работает он только " \
"в групповых чатах " + " (сейчас " + str(status) + ")"
@staticmethod
def __settings_spoiler_markup():
keyboard = [[InlineKeyboardButton("Вкл", callback_data='m2_1-1'),
InlineKeyboardButton("Выкл", callback_data='m2_1-0')],
[InlineKeyboardButton("Главное меню", callback_data='main')]
]
reply_markup = InlineKeyboardMarkup(keyboard)
return reply_markup
def __settings_spoiler_button(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
button = bool(int(query.data.split('-')[-1]))
metadata['no_spoiler'] = button
query.answer(text="Режим no spoilers включен" if button else "Режим no spoilers выключен")
query.edit_message_text(
text=self.__settings_main_text(),
reply_markup=self.__settings_main_markup()
)
self.logger.info('User %s set spoiler mode to %s',
update.effective_user,
button)
# Game of the day settings
def __settings_gotd(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
query.edit_message_text(text=self.__settings_gotd_text(metadata['game_of_day']),
reply_markup=self.__settings_gotd_markup())
@staticmethod
def __settings_gotd_text(status):
return "При включенном режиме загадки дня, каждый день в чат будет приходить новый вопрос" + \
" (сейчас " + str(status) + ")"
@staticmethod
def __settings_gotd_markup():
keyboard = [[InlineKeyboardButton("Вкл", callback_data='m3_1-1'),
InlineKeyboardButton("Выкл", callback_data='m3_1-0')],
[InlineKeyboardButton("Главное меню", callback_data='main')]
]
reply_markup = InlineKeyboardMarkup(keyboard)
return reply_markup
def __settings_gotd_button(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
button = bool(int(query.data.split('-')[-1]))
metadata['game_of_day'] = button
query.answer(text="Режим загадки дня включен" if button else "Режим загадки дня выключен")
query.edit_message_text(
text=self.__settings_main_text(),
reply_markup=self.__settings_main_markup()
)
self.logger.info('User %s set game of the day to %s',
update.effective_user,
button)
# Answer message settings
def __settings_answer_message(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
query.edit_message_text(text=self.__settings_answer_message_text(metadata['answer_from_text']),
reply_markup=self.__settings_answer_message_markup())
@staticmethod
def __settings_answer_message_text(status):
return "При включенном режиме, ответы будут приниматься через обычные текстовые сообщения." \
"Пожалуйста учти, что бот логгирует все ответы на задания, чтобы улучшать ход игры," \
"поэтому во включенном состоянии будут логироваться все сообщения в этом чате. " + \
" (сейчас " + str(status) + ")"
@staticmethod
def __settings_answer_message_markup():
keyboard = [[InlineKeyboardButton("Вкл", callback_data='m4_1-1'),
InlineKeyboardButton("Выкл", callback_data='m4_1-0')],
[InlineKeyboardButton("Главное меню", callback_data='main')]
]
reply_markup = InlineKeyboardMarkup(keyboard)
return reply_markup
def __settings_answer_message_button(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
if not metadata:
return
button = bool(int(query.data.split('-')[-1]))
metadata['answer_from_text'] = button
query.answer(text="Ответы будут приниматься из сообщений" if button else "Ответ только после команды /answer")
query.edit_message_text(
text=self.__settings_main_text(),
reply_markup=self.__settings_main_markup()
)
self.logger.info('User %s set answer message mode to %s',
update.effective_user,
button)
@staticmethod
def __settings_done(update, context):
query = update.callback_query
query.answer(text='Done')
update.effective_message.delete()
@staticmethod
def __levels_markup(game):
levels = game.get_all_levels()
if not levels:
return None
keyboard = [[]]
for i, level in enumerate(levels):
if len(keyboard[-1]) == 1:
keyboard.append([])
num, lev = level[0], " ".join(level[1].split('_'))
keyboard[-1].append(
InlineKeyboardButton(str(int(num) + 1) + '. ' + lev,
callback_data='game_level-' + level[0] + "-@" + level[1]))
keyboard.append([InlineKeyboardButton("Done", callback_data='done')])
reply_markup = InlineKeyboardMarkup(keyboard)
return reply_markup
def __set_level(self, update, context): # todo: добавить кнопку exit
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
levels_markup = self.__levels_markup(metadata['quiz'][metadata['game_type']])
if levels_markup:
update.effective_message.reply_text('Выберите уровень',
reply_markup=levels_markup)
else:
update.effective_message.reply_text("Выбор уровня невозможен в этом режиме игры")
def __levels_button(self, update, context):
query = update.callback_query
metadata = self.__check_meta(self.__get_chat_meta(update, context), update)
chat_id = update.effective_message.chat_id
if not metadata:
return
button = '-'.join(query.data.split('-')[1:])
metadata['quiz'][metadata['game_type']].set_level_by_name(button)
question, path = metadata['quiz'][metadata['game_type']].get_new_question()
metadata['message_stack'] += ReadWrite.send(question, context.bot, chat_id, path)
update.effective_message.delete()
self.logger.info('User %s changed level to %s',
update.effective_user,
button)
def __help(self, update, context):
chat_id = update.effective_message.chat_id
context.bot.sendMessage(text=(
"Если у тебя есть идея ответа, то введи её после команды /answer в качестве аргумента, например: /answer "
"Пушкин. Если ответ правильный, то ты сразу перейдешь к следующему уровню. Также не исключено, "
"что автор вопроса добавил подсказку. Чтобы увидеть её вбей команду /hint. Если не получается найти ответ "
"(либо ты уверен, что написал правильно, а глупый бот тебя не понимает), то введи /getanswer, "
"и если режим игры позволяет просматривать ответы, то можешь проверить свои догадки. Также некоторые "
"режими игры позволяют менять уровень, не решив прерыдущий. Для этого введи /setlevel и выбери нужный.\n "
"\n"
"В боте предусмотрено несколько видов и источников загадок. Полный список можно найти, введя /settings и "
"выбрав опцию Игры. \n "
"\n"
"Для игры в групповых чатах предусмотрен режим No spoilers. Если включить его в меню /settings, "
"то бот будет удалять все сообщения, относящиеся к предыдущему вопросу, чтобы остальные участники группы "
"не видели ответов и могли решить загадку самостоятельно.\n "
"\n"
"Если хочешь начать игру сначала, то введи /reset, но учти, что тогда потеряются все сохранения.\n"
), chat_id=chat_id)
@staticmethod
def __credentials(update, context):
chat_id = update.effective_message.chat_id
context.bot.sendMessage(text="""Данный бот создавался только с развлекательными целями и не несёт никакой
коммерческой выгоды. Некоторые из игр в этом боте полностью скопированы с других ресурсов с загадками: Манул
загадко (http://manulapuzzle.ru), Project Euler (https://projecteuler.net), Night Run. Создатели проекта ни
коим образом не претендуют на авторство этих вопросов, а являются всего лишь большими фанатами этих ресурсов
и хотят распространить их среди своих друзей и знакомых. Если ты являешься создателем или причастным к
созданию этих задач и по каким-то причинам не доволен наличием твоих задач или упоминания ресурса в данном
боте, то напиши пожалуйста на почту <EMAIL>. Исходный код бота находится в открытом доступе
https://github.com/qashqay654/QashqayQuizBot""", chat_id=chat_id)
def __game_of_the_day_send(self):
if self.gotd_prev_message:
self.game_of_day.next()
for message in self.gotd_prev_message:
try:
# self.updater.bot.edit_message_text(text=message.text,
# chat_id=message.chat_id,
# message_id=message.message_id)
self.updater.bot.delete_message(message.chat_id, message.message_id)
except:
self.logger.warning('No message "%s"', message)
self.gotd_prev_message.clear()
keyboard = [[InlineKeyboardButton("Посмотреть ответ", callback_data='gotd_answ'),
InlineKeyboardButton("Скрыть", callback_data='done')]]
reply_markup = InlineKeyboardMarkup(keyboard)
if self.game_of_day:
question, path = self.game_of_day.get_new_question()
else:
return
user_data = self.updater.dispatcher.user_data
for user in list(user_data):
if user_data[user]:
if 'game_of_day' not in user_data[user]:
user_data[user]['game_of_day'] = True
if user_data[user]['game_of_day']:
try:
self.gotd_prev_message += ReadWrite.send(question, self.updater.bot,
user, path,
reply_markup=reply_markup,
game_of_day=True
)
except Unauthorized as ua:
del user_data[user]
self.logger.warning("User %s is deleted", user)
chat_data = self.updater.dispatcher.chat_data
for chat in list(chat_data):
if chat_data[chat]:
if 'game_of_day' not in chat_data[chat]:
chat_data[chat]['game_of_day'] = True
if chat_data[chat]['game_of_day']:
try:
self.gotd_prev_message += ReadWrite.send(question, self.updater.bot,
chat, path,
reply_markup=reply_markup,
game_of_day=True)
except Unauthorized as ua:
del chat_data[chat]
self.logger.warning("Chat %s is deleted", chat)
except ChatMigrated as e:
chat_data[e.new_chat_id] = deepcopy(chat_data[chat])
del chat_data[chat]
self.logger.warning("Chat %s is migrated", chat)
self.gotd_prev_message += ReadWrite.send(question, self.updater.bot,
e.new_chat_id, path,
reply_markup=reply_markup,
game_of_day=True)
pickle.dump([self.game_of_day.last_question_num, self.gotd_prev_message],
open(self.config.game_of_the_day_db_path, 'wb'))
self.logger.info('Game of the day send')
def __repeat_goth(self, update, context):
chat_id = update.effective_message.chat_id
keyboard = [[InlineKeyboardButton("Посмотреть ответ", callback_data='gotd_answ'),
InlineKeyboardButton("Скрыть", callback_data='done')]]
reply_markup = InlineKeyboardMarkup(keyboard)
question, path = self.game_of_day.get_new_question()
self.gotd_prev_message += ReadWrite.send(question, self.updater.bot,
chat_id, path,
reply_markup=reply_markup,
game_of_day=True
)
def __game_of_the_day_button(self, update, context):
query = update.callback_query
if self.game_of_day:
query.answer(text=self.game_of_day.get_hint(), show_alert=True)
def __schedule_gotd(self):
schedule.every().day.at(self.config.game_of_the_day_time).do(self.__game_of_the_day_send)
print("Scheduler set at " + self.config.game_of_the_day_time)
self.shed_event = schedule.run_continuously()
def __gotd_answer(self, update, context):
chat_id = update.effective_message.chat_id
answer = ' '.join(context.args).lower()
if not answer:
self.gotd_prev_message.append(update.effective_message.reply_text(text="Укажи ответ аргументом после "
"команды /dq, например: "
"/dq 1984"))
return
correctness = self.game_of_day.check_answer(answer)
self.logger.info('User %s answered %s in %s',
update.effective_user,
answer,
"game of the day"
)
if correctness == AnswerCorrectness.CORRECT:
self.logger.info('User %s solved %s',
update.effective_user,
"game of the day"
)
self.gotd_prev_message.append(update.effective_message.reply_text(text="Правильно!"))
elif type(correctness) == str:
self.gotd_prev_message.append(
context.bot.sendMessage(chat_id=chat_id, text=correctness))
else:
self.logger.warning('Wrong answer type "%s"', correctness)
def __send_all_from_admin(self, update, context):
user_id = update.effective_message.from_user.id
chat_id = update.effective_message.chat_id
if user_id == self.config.admin_id:
text = update.effective_message.text[11:].strip() # ' '.join(context.args)
if not text:
update.effective_message.reply_text(text="Нет текста")
return
update.effective_message.reply_text(text="Preview")
keyboard = [[InlineKeyboardButton("Шлем", callback_data='admin_send-1'),
InlineKeyboardButton("Не шлем", callback_data='admin_send-0')],
]
reply_markup = InlineKeyboardMarkup(keyboard)
context.bot.sendMessage(text=text, chat_id=chat_id, reply_markup=reply_markup)
self.admin_text = text
def __send_all_from_admin_button(self, update, context):
query = update.callback_query
button = bool(int(query.data.split('-')[-1]))
query.edit_message_text(text=self.admin_text)
if button:
user_data = self.updater.dispatcher.user_data
for user in list(user_data):
if user_data[user]:
try:
self.updater.bot.sendMessage(text=self.admin_text, chat_id=user)
except Unauthorized as ua:
del user_data[user]
self.logger.warning("User %s is deleted", user)
chat_data = self.updater.dispatcher.chat_data
for chat in list(chat_data):
if chat_data[chat]:
try:
self.updater.bot.sendMessage(text=self.admin_text, chat_id=chat)
except Unauthorized as ua:
del chat_data[chat]
self.logger.warning("Chat %s is deleted", chat)
except ChatMigrated as e:
chat_data[e.new_chat_id] = deepcopy(chat_data[chat])
del chat_data[chat]
self.logger.warning("Chat %s is migrated", chat)
self.updater.bot.sendMessage(text=self.admin_text, chat_id=e.new_chat_id)
self.logger.info("Admin message send %s", self.admin_text)
self.admin_text = ''
def __send_all_from_input(self):
cease_continuous_run = threading.Event()
class MassiveSender(threading.Thread):
@classmethod
def run(cls):
while not cease_continuous_run.is_set():
message = input()
if not message:
continue
confirm = ''
while confirm not in ['yes', 'no']:
print("Are you sure? [yes|no]")
confirm = input()
if confirm == 'no':
continue
else:
print('Sending')
user_data = self.updater.dispatcher.user_data
for user in list(user_data):
if user_data[user]:
try:
self.updater.bot.sendMessage(text=message, chat_id=user)
except Unauthorized as ua:
del user_data[user]
self.logger.warning("User %s is deleted", user)
chat_data = self.updater.dispatcher.chat_data
for chat in list(chat_data):
if chat_data[chat]:
try:
self.updater.bot.sendMessage(text=message, chat_id=chat)
except Unauthorized as ua:
del chat_data[chat]
self.logger.warning("Chat %s is deleted", chat)
except ChatMigrated as e:
chat_data[e.new_chat_id] = deepcopy(chat_data[chat])
del chat_data[chat]
self.logger.warning("Chat %s is migrated", chat)
self.updater.bot.sendMessage(text=message, chat_id=e.new_chat_id)
self.logger.info("Admin message send %s", message)
continuous_thread = MassiveSender()
continuous_thread.daemon = True
continuous_thread.start()
return cease_continuous_run
def init_dispatcher(self, dispatcher):
dispatcher.add_handler(CommandHandler("start", self.__start,
pass_user_data=True, pass_chat_data=True))
dispatcher.add_handler(CommandHandler("hint", self.__hint,
pass_user_data=True, pass_chat_data=True))
dispatcher.add_handler(CommandHandler("answer", self.__answer,
pass_args=True, pass_user_data=True, pass_chat_data=True))
dispatcher.add_handler(CommandHandler("repeat", self.__question,
pass_user_data=True, pass_chat_data=True))
dispatcher.add_handler(CommandHandler("getanswer", self.__get_answer,
pass_user_data=True, pass_chat_data=True))
dispatcher.add_handler(CommandHandler("help", self.__help))
dispatcher.add_handler(CommandHandler("credits", self.__credentials))
dispatcher.add_handler(CommandHandler("reset", self.__reset,
pass_user_data=True, pass_chat_data=True))
dispatcher.add_handler(CommandHandler("dq", self.__gotd_answer))
dispatcher.add_handler(CommandHandler("repeatdq", self.__repeat_goth))
dispatcher.add_handler(CallbackQueryHandler(self.__reset_button, pattern='^reset-'))
dispatcher.add_handler(CommandHandler("settings", self.__settings,
pass_user_data=True, pass_chat_data=True))
dispatcher.add_handler(CallbackQueryHandler(self.__settings_main, pattern='main'))
dispatcher.add_handler(CallbackQueryHandler(self.__settings_done, pattern='done'))
dispatcher.add_handler(CallbackQueryHandler(self.__settings_game, pattern='^m1-'))
dispatcher.add_handler(CallbackQueryHandler(self.__settings_game_button, pattern='^puzzname'))
dispatcher.add_handler(CallbackQueryHandler(self.__settings_spoiler, pattern='^m2-'))
dispatcher.add_handler(CallbackQueryHandler(self.__settings_spoiler_button, pattern='^m2_1-'))
dispatcher.add_handler(CallbackQueryHandler(self.__settings_gotd, pattern='^m3-'))
dispatcher.add_handler(CallbackQueryHandler(self.__settings_gotd_button, pattern='^m3_1-'))
dispatcher.add_handler(CallbackQueryHandler(self.__game_of_the_day_button, pattern='gotd_answ'))
dispatcher.add_handler(CallbackQueryHandler(self.__settings_answer_message, pattern='^m4-'))
dispatcher.add_handler(CallbackQueryHandler(self.__settings_answer_message_button, pattern='^m4_1-'))
dispatcher.add_handler(CommandHandler("adminsend", self.__send_all_from_admin))
dispatcher.add_handler(CallbackQueryHandler(self.__send_all_from_admin_button, pattern='^admin_send-'))
dispatcher.add_handler(CommandHandler("setlevel", self.__set_level,
pass_user_data=True, pass_chat_data=True))
dispatcher.add_handler(CallbackQueryHandler(self.__levels_button, pattern='^game_level-'))
dispatcher.add_handler(MessageHandler(Filters.text, self.__answer))
dispatcher.add_error_handler(self.__error)
# TODO: add random talk
# todo: прописать нормальный логгер вместо принтов
| StarcoderdataPython |
3504783 | from datetime import datetime, timezone
from django.db import models, migrations
from django.contrib.postgres.operations import CreateExtension
from django.utils.text import slugify
from django.contrib.gis.db import models as gis_models
class Dataset(models.Model):
name = models.CharField(max_length = 95, blank = False)
date = models.DateTimeField(default = datetime.now(timezone.utc), blank = False)
slug = models.SlugField(max_length = 95, blank = True , null = True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Dataset, self).save(*args, **kwargs)
def __str__(self):
return f'{self.id} {self.slug}'
class Row(models.Model):
dataset_id = models.ForeignKey(Dataset, related_name='rows', on_delete = models.CASCADE)
point = gis_models.PointField(srid = 4326, blank = False)
client_id = models.PositiveIntegerField(blank = False)
client_name = models.CharField(max_length = 45, blank = False)
def __str__(self):
return f'{self.dataset_id} {self.point}'
| StarcoderdataPython |
1916032 | # def prime(num):
# if num < 0:
# return "error negative num given"
# results = []
# for i in range (2, num+1):
# if isPrime(i):
# results.append(i)
# return results
def isPrime(number):
# if number in
for i in range(2, number):
if number % i == 0:
return False
return True
def prime_tdd(number):
"""
input
number(int) -> this is a positive number
outputs:
results(list) -> list of numbers
"""
if not isinstance(number, int):
return "Unexpected non integer input"
if number < 0:
return "error"
results = []
for i in range(2, number+1):
if isPrime(i):
results.append(i)
return results
| StarcoderdataPython |
168964 | import pandas as pd
from re import findall
from sklearn.utils import shuffle
data = pd.read_csv('Trafficking_Data.csv')
urls = data['url'].tolist()
regional_data = []
category_data = []
def extract_region(list_of_urls):
for strings in list_of_urls:
regdata = findall('http://([A-Za-z]+).', strings)[0]
regional_data.append(regdata)
def extract_category(list_of_urls):
for strings in list_of_urls:
regdata = findall('http://[A-Za-z]+.backpage.com/([A-Za-z]+)', strings)[0]
category_data.append(regdata)
extract_region(urls)
extract_category(urls)
data['region'] = regional_data
data['category'] = category_data
data = shuffle(data)
data.to_csv('updated.csv', header=True, sep=',')
| StarcoderdataPython |
3573411 | import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
from db.dao import add_user_photo_rec, add_photo_photo_rec
from pprint import pprint
USER_NUMBER = 23259
PHOTO_NUMBER = 8837
TOP_K_NUM = 10
def get_recommend_list_by_itemcf():
header = ['user_id', 'photo_id', 'is_fav']
df = pd.read_csv('../scidata/user_photo_fav.csv', names=header)
fav_matrix = np.zeros((USER_NUMBER, PHOTO_NUMBER))
for row in df.itertuples():
user_id = row[1] - 1 # real user id need + 1
photo_id = row[2] - 1
is_fav = row[3]
fav_matrix[user_id, photo_id] = is_fav
item_similarity = cosine_similarity(fav_matrix.T)
rec_result = fav_matrix.dot(item_similarity)
# 归一化
rec_result = rec_result / np.array([np.abs(item_similarity).sum(axis=1)])
rec_list = []
for user_id in range(rec_result.shape[0]):
rec_item_per_user = rec_result[user_id].argsort()[-TOP_K_NUM:][::-1].tolist()
rec_list.append(rec_item_per_user)
return rec_list
def do_recommend_for_user():
"""
load recommend data for user into database
:return:
"""
rec_list = get_recommend_list_by_itemcf()
user_num = len(rec_list)
rec_photo_num = len(rec_list[0])
print('rec_list shape: ({0}, {1})'.format(user_num, rec_photo_num))
for i in range(user_num):
user_id = i + 1
rec_photo_ids = ','.join(str(photo_id + 1) for photo_id in rec_list[i])
pprint(rec_photo_ids)
add_user_photo_rec(user_id, rec_photo_ids)
def get_recommend_list_by_tag_based():
header = ['photo_id', 'tag_id']
df = pd.read_csv('../scidata/photo_tag.csv', names=header)
photo_num = df.photo_id.unique().shape[0]
tag_num = df.tag_id.unique().shape[0]
photo_tag_matrix = np.zeros((photo_num, tag_num))
for row in df.itertuples():
photo_id = row[1] - 1 # real photo id need + 1
tag_id = row[2] - 1
photo_tag_matrix[photo_id, tag_id] = 1
photo_similarity = cosine_similarity(photo_tag_matrix)
np.fill_diagonal(photo_similarity, 0)
rec_result = photo_similarity
rec_list = []
for photo_id in range(rec_result.shape[0]):
rec_item_per_photo = rec_result[photo_id].argsort()[-TOP_K_NUM:][::-1].tolist()
rec_list.append(rec_item_per_photo)
return rec_list
def do_recommend_for_photo():
"""
load recommend data for photo into database
:return:
"""
rec_list = get_recommend_list_by_tag_based()
photo_num = len(rec_list)
rec_photo_num = len(rec_list[0])
print('rec_list shape: ({0}, {1})'.format(photo_num, rec_photo_num))
for i in range(photo_num):
photo_id = i + 1
rec_photo_ids = ','.join(str(e + 1) for e in rec_list[i])
add_photo_photo_rec(photo_id, rec_photo_ids)
if __name__ == '__main__':
# do_recommend_for_user()
do_recommend_for_photo()
| StarcoderdataPython |
12854511 | <reponame>githaefrancis/fluent-exchange
import unittest
from app.models import User,Role,Post,Comment
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.new_user=User(name="<NAME>",username='fgithae',password='password',email="<EMAIL>",role=Role.query.filter_by(id=1).first())
self.new_post=Post(user=self.new_user,title="The beginning",content="This is the first post ever in this channel.The fluent debutter",banner_path="images/img1.jpg")
self.new_comment=Comment(user=self.new_user,post=self.new_post,content="It's actually good")
def tearDown(self):
Comment.query.delete()
Post.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.user,self.new_user)
self.assertEquals(self.new_comment.post,self.new_post)
self.assertEquals(self.new_comment.content,"It's actually good")
def test_save_comment(self):
self.new_comment.save_comment()
self.assertTrue(len(Comment.query.all())>0)
def test_delete_comment(self):
self.new_comment.delete_comment()
self.assertEquals(self.new_comment.status,'archived')
| StarcoderdataPython |
254535 | <filename>View/forms.py
from .models import Profile,Deals
from django.forms import ModelForm
from django.contrib.auth.models import User
class ProfileForm(ModelForm):
class META:
fields = '__all__'
model= Profile
class UserForm(ModelForm):
class Meta:
model = User
exclude = ['is_staff','is_supperuser','is_active','date_joined']
| StarcoderdataPython |
5199454 | <reponame>loovien/meida-downloader
# -*- coding: utf-8 -*-
# website: https://loovien.github.io
# author: luowen<<EMAIL>>
# time: 2018/9/29 21:41
# desc:
import unittest
from src.tools.title_builder import title_gen
class TitleTest(unittest.TestCase):
def test_title(self):
title = title_gen()
print(title)
| StarcoderdataPython |
8126333 | #!/usr/bin/env python3
import os
import rospy
from lg_mirror.capture_viewport import CaptureViewport
from lg_mirror.utils import get_viewport_image_topic
from interactivespaces_msgs.msg import GenericMessage
from lg_common.helpers import handle_initial_state, required_param
from sensor_msgs.msg import CompressedImage
from lg_common.helpers import run_with_influx_exception_handler
NODE_NAME = 'mirror_capture_viewport'
def main():
rospy.init_node(NODE_NAME)
viewport = required_param('~viewport')
env_display = os.environ.get('DISPLAY')
display = rospy.get_param('~display', env_display)
if display is None:
raise ValueError('DISPLAY env or private "display" param required')
show_pointer = str(rospy.get_param('~show_pointer', False)).lower()
framerate = int(rospy.get_param('~framerate', 30))
quality = int(rospy.get_param('~quality', 85))
image_topic = get_viewport_image_topic(viewport)
image_pub = rospy.Publisher(image_topic, CompressedImage, queue_size=1)
capture = CaptureViewport(viewport,
display,
show_pointer,
framerate,
quality,
image_pub)
rospy.Subscriber('/director/scene',
GenericMessage,
capture.handle_scene_msg)
handle_initial_state(capture.handle_scene_msg)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
| StarcoderdataPython |
56621 | #!/usr/bin/env python
# Scrubs the output of msvc and prints out the dianostics.
#
# The only argument indicates the file containing the input.
#
# This script can produce lots of messages per diagnostic
#
# Copyright (c) 2007-2018 Carnegie Mellon University. All Rights Reserved.
# See COPYRIGHT file for details.
import sys
import re
import os
if len(sys.argv) != 2:
raise TypeError("Usage: " + sys.argv[0] + " <raw-input> > <org-output>")
input = sys.argv[1]
uniqueErrors = {}
regexes = []
regexes.append(re.compile("(.*?)\((\d*)\).*?error (.*?): (.*)"))
regexes.append(re.compile("(.*?)\((\d*),\d*\).*?error (.*?): (.*)"))
regexes.append(re.compile("(.*?)\((\d*)\).*?warning (.*?): (.*)"))
regexes.append(re.compile("(.*?)\((\d*),\d*\).*?warning (.*?): (.*)"))
for line in open(input):
# match regular expressions
for regex in regexes:
parse = re.match(regex, line)
if parse != None:
break
else:
continue
fileLocation = parse.group(1).strip()
lineNumber = parse.group(2).strip()
errorNumber = parse.group(3).strip()
diagonostic = parse.group(4).strip().replace("|", " ")
# print table
tableEntry = " | ".join(
["", errorNumber, fileLocation, lineNumber, diagonostic, ""])
print tableEntry
| StarcoderdataPython |
3599484 | <gh_stars>1-10
import logging
import faulthandler
CRASHLOGGER_NAME = "APPCRASH"
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, handler):
self.handler = handler
def fileno(self):
return self.handler.stream.fileno()
def enable_crashlogger(error_handler):
stream = StreamToLogger(error_handler)
faulthandler.enable(stream)
| StarcoderdataPython |
3236308 | # =================================================================
#
# Authors: <NAME> <<EMAIL>>
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import glob
import os
import shutil
import tempfile
from StringIO import StringIO
from urllib2 import urlopen
import zipfile
from paver.easy import (Bunch, call_task, cmdopts, info, options,
path, pushd, sh, task)
BASEDIR = os.path.abspath(os.path.dirname(__file__))
options(
base=Bunch(
home=path(BASEDIR),
docs=path('%s/docs' % BASEDIR),
instance=path('%s/instance' % BASEDIR),
pot=path('%s/GeoHealthCheck/translations/en/LC_MESSAGES/messages.po' %
BASEDIR),
static_docs=path('%s/GeoHealthCheck/static/docs' % BASEDIR),
static_lib=path('%s/GeoHealthCheck/static/lib' % BASEDIR),
tmp=path(tempfile.mkdtemp()),
translations=path('%s/GeoHealthCheck/translations' % BASEDIR)
),
)
@task
def setup():
"""setup plugin dependencies"""
config_file = options.base.home / 'GeoHealthCheck/config_main.py'
config_site = options.base.instance / 'config_site.py'
# setup dirs
if not os.path.exists(options.base.static_lib):
options.base.static_lib.mkdir()
if not os.path.exists(options.base.instance):
options.base.instance.mkdir()
data_dir = options.base.instance / 'data'
data_dir.mkdir()
# data_dir.chmod(0777) gives failure on Python 2.7 Paver 1.2.1
os.chmod(path(data_dir), 0777)
# setup config
config_file.copy(config_site)
# setup deps
sh('pip install -r requirements.txt')
skin = 'http://github.com/BlackrockDigital/startbootstrap-sb-admin-2/archive/v3.3.7+1.zip' # noqa
skin_dirs = ['dist', 'vendor']
need_to_fetch = False
for skin_dir in skin_dirs:
skin_dir_path = os.sep.join(
['startbootstrap-sb-admin-2-3.3.7-1', skin_dir])
if not os.path.exists(skin_dir_path):
need_to_fetch = True
if need_to_fetch:
zipstr = StringIO(urlopen(skin).read())
zipfile_obj = zipfile.ZipFile(zipstr)
zipfile_obj.extractall(options.base.static_lib)
for zf_mem in skin_dirs:
src_loc = path(options.base.static_lib /
'startbootstrap-sb-admin-2-3.3.7-1' / zf_mem)
dest_loc = path(options.base.static_lib / zf_mem)
if not os.path.exists(dest_loc):
src_loc.move(dest_loc)
else:
info('directory already exists. Skipping')
shutil.rmtree(path(options.base.static_lib /
'startbootstrap-sb-admin-2-3.3.7-1'))
# install sparklines to static/site/js
with open(path(options.base.static_lib / 'jspark.js'), 'w') as f:
content = urlopen('http://ejohn.org/files/jspark.js').read()
content.replace('red', 'green')
f.write(content)
# install bootstrap-tagsinput to static/lib
select2 = 'https://github.com/select2/select2/archive/4.0.3.zip'
zipstr = StringIO(urlopen(select2).read())
zipfile_obj = zipfile.ZipFile(zipstr)
zipfile_obj.extractall(options.base.static_lib)
dirname = glob.glob(options.base.static_lib / 'select2-*')[0]
dstdir = ''.join(dirname.rsplit('-', 1)[:-1])
try:
os.rename(dirname, dstdir)
except OSError:
shutil.rmtree(dstdir)
os.rename(dirname, dstdir)
# install leafletjs to static/lib
leafletjs = 'http://cdn.leafletjs.com/downloads/leaflet-0.7.5.zip'
zipstr = StringIO(urlopen(leafletjs).read())
zipfile_obj = zipfile.ZipFile(zipstr)
zipfile_obj.extractall(options.base.static_lib / 'leaflet')
# install html5shiv to static/lib
with open(path(options.base.static_lib / 'html5shiv.min.js'), 'w') as f:
url = 'http://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js'
content = urlopen(url).read()
f.write(content)
# install respond to static/lib
with open(path(options.base.static_lib / 'respond.min.js'), 'w') as f:
url = 'http://oss.maxcdn.com/respond/1.4.2/respond.min.js'
content = urlopen(url).read()
f.write(content)
# build i18n .mo files
call_task('compile_translations')
# build local docs
call_task('refresh_docs')
# message user
info('GeoHealthCheck is now built. Edit settings in %s' % config_site)
info('before deploying the application. Alternatively, you can start a')
info('development instance with "python GeoHealthCheck/app.py"')
@task
def create_secret_key():
"""create secret key for SECRET_KEY in instance/config_site.py"""
info('Secret key: \'%s\'' % os.urandom(24).encode('hex'))
info('Copy/paste this key to set the SECRET_KEY')
info('value in instance/config_site.py')
@task
@cmdopts([
('email=', 'e', 'email'),
('username=', 'u', 'username'),
('password=', 'p', 'password')
])
def create(options):
"""create database objects and superuser account"""
args = ''
username = options.get('username', None)
password = options.get('password', None)
email = options.get('email', None)
if all([username, password, email]):
args = '%s %s %s' % (username, password, email)
sh('python GeoHealthCheck/models.py create %s' % args)
@task
def upgrade():
"""upgrade database if changed; be sure to backup first!"""
info('Upgrading database...')
with pushd(path('%s/GeoHealthCheck' % BASEDIR)):
sh('python manage.py db upgrade')
@task
def create_wsgi():
"""create WSGI wrapper and Apache2 configuration"""
wsgi_script = '%s%sGeoHealthCheck.wsgi' % (options.base.instance, os.sep)
with open(wsgi_script, 'w') as ff:
ff.write('import sys\n')
ff.write('sys.path.insert(0, \'%s\')\n' % BASEDIR)
ff.write('from GeoHealthCheck.app import APP as application')
wsgi_conf = '%s%sGeoHealthCheck.conf' % (options.base.instance, os.sep)
with open(wsgi_conf, 'w') as ff:
ff.write('WSGIScriptAlias / %s%sGeoHealthCheck.wsgi\n' %
(options.base.instance, os.sep))
ff.write('<Directory %s%s>\n' % (BASEDIR, os.sep))
ff.write('Order deny,allow\n')
ff.write('Allow from all\n')
ff.write('</Directory>')
@task
def refresh_docs():
"""Build sphinx docs from scratch"""
make = sphinx_make()
if os.path.exists(options.base.static_docs):
shutil.rmtree(options.base.static_docs)
with pushd(options.base.docs):
sh('%s clean' % make)
sh('%s html' % make)
sh('mkdir %s' % options.base.static_docs)
sh('cp -rp %s/docs/_build/html/* %s' % (BASEDIR,
options.base.static_docs))
@task
def clean():
"""clean environment"""
if os.path.exists(options.base.static_lib):
shutil.rmtree(options.base.static_lib)
if os.path.exists(options.base.tmp):
shutil.rmtree(options.base.tmp)
if os.path.exists(options.base.static_docs):
shutil.rmtree(options.base.static_docs)
@task
def extract_translations():
"""extrect translations wrapped in _() or gettext()"""
pot_dir = path('GeoHealthCheck/translations/en/LC_MESSAGES')
if not os.path.exists(pot_dir):
pot_dir.makedirs()
sh('pybabel extract -F babel.cfg -o %st GeoHealthCheck' % options.base.pot)
@task
@cmdopts([
('lang=', 'l', '2-letter language code'),
])
def add_language_catalogue(options):
"""adds new language profile"""
lang = options.get('lang', None)
if lang is None:
raise RuntimeError('missing lang argument')
sh('pybabel init -i %s -d %s -l %s' % (
options.base.pot, options.base.translations, lang))
@task
def compile_translations():
"""build .mo files"""
sh('pybabel compile -d %s' % options.base.translations)
@task
def update_translations():
"""update language strings"""
call_task('extract_translations')
sh('pybabel update -i %s -d %s' % (
options.base.pot, options.base.translations))
def sphinx_make():
"""return what command Sphinx is using for make"""
if os.name == 'nt':
return 'make.bat'
return 'make'
| StarcoderdataPython |
3589597 | """
/******************************************************************************
This source file is part of the Avogadro project.
Copyright 2013 Kitware, Inc.
This source code is released under the New BSD License, (the "License").
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
"""
import argparse
import json
import sys
def getMetaData():
metaData = {}
metaData['inputFormat'] = 'xyz'
metaData['outputFormat'] = 'xyz'
metaData['operations'] = ['read', 'write']
metaData['identifier'] = 'ZYX Example Format'
metaData['name'] = 'ZYX Example Format'
metaData['description'] = "Mostly useless file format that reads xyz-style " +\
"files with reversed coordinates. Demonstrates " +\
"the implementation of a user-scripted file format."
metaData['fileExtensions'] = ['zyx']
metaData['mimeTypes'] = ['chemical/x-zyx']
return metaData
def write():
result = ""
# Just copy the first two lines: numAtoms and comment/title
result += sys.stdin.readline()
result += sys.stdin.readline()
for line in sys.stdin:
words = line.split()
result += '%-3s %9.5f %9.5f %9.5f'%\
(words[0], float(words[3]), float(words[2]), float(words[1]))
if len(words) > 4:
result += words[4:].join(' ')
result += '\n'
return result
def read():
result = ""
# Just copy the first two lines: numAtoms and comment/title
result += sys.stdin.readline()
result += sys.stdin.readline()
for line in sys.stdin:
words = line.split()
result += '%-3s %9.5f %9.5f %9.5f'%\
(words[0], float(words[3]), float(words[2]), float(words[1]))
if len(words) > 4:
result += words[4:].join(' ')
result += '\n'
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser('Testing file format script.')
parser.add_argument('--metadata', action='store_true')
parser.add_argument('--read', action='store_true')
parser.add_argument('--write', action='store_true')
args = vars(parser.parse_args())
if args['metadata']:
print(json.dumps(getMetaData()))
elif args['read']:
print(read())
elif args['write']:
print(write())
| StarcoderdataPython |
9769591 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-18 15:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit', '0002_auto_20160213_1617'),
]
operations = [
migrations.AddField(
model_name='redditcommand',
name='nsfw',
field=models.BooleanField(default=False),
),
]
| StarcoderdataPython |
6598149 | <reponame>pycampers/zproc
import multiprocessing
from typing import List, Mapping, Sequence, Any, Callable
import zmq
from zproc import util, serializer
from zproc.consts import DEFAULT_NAMESPACE, EMPTY_MULTIPART
from zproc.server.tools import ping
from .result import SequenceTaskResult, SimpleTaskResult
from .worker import worker_process
class Swarm:
def __init__(self, server_address: str, *, namespace: str = DEFAULT_NAMESPACE):
#: Passed on from the constructor.
self.server_address = server_address
#: Passed on from the constructor.
self.namespace = namespace
#: A ``list`` of :py:class:`multiprocessing.Process` objects for the wokers spawned.
self.worker_list = [] # type: List[multiprocessing.Process]
self._zmq_ctx = util.create_zmq_ctx()
self._server_meta = util.get_server_meta(self._zmq_ctx, server_address)
self._task_push = self._zmq_ctx.socket(zmq.PUSH)
self._task_push.connect(self._server_meta.task_proxy_in)
def ping(self, **kwargs):
return ping(self.server_address, **kwargs)
@property
def count(self) -> int:
"""
Returns the number of workers currently alive.
This property can be set manully,
in order to change the number of workers that *should* be alive.
"""
return sum(1 for w in self.worker_list if w.is_alive())
@count.setter
def count(self, value: int):
value -= self.count
if value > 0:
for _ in range(value):
recv_conn, send_conn = multiprocessing.Pipe()
process = multiprocessing.Process(
target=worker_process, args=[self.server_address, send_conn]
)
process.start()
with recv_conn:
rep = recv_conn.recv_bytes()
if rep:
serializer.loads(rep)
self.worker_list.append(process)
elif value < 0:
# Notify remaining workers to finish up, and close shop.
for _ in range(-value):
self._task_push.send_multipart(EMPTY_MULTIPART)
def start(self, count: int = None):
if count is None:
self.count = multiprocessing.cpu_count()
else:
self.count = count
def stop(self, force: bool = False):
if force:
for p in self.worker_list:
p.terminate()
else:
self.count = 0
def run(
self,
target: Callable = None,
args: Sequence = None,
kwargs: Mapping = None,
*,
pass_state: bool = False,
lazy: bool = False,
):
if target is None:
def wrapper(*a, **k):
return self.run(target, a, k, pass_state=pass_state, lazy=lazy)
return wrapper
task_id = util.generate_task_id()
if args is None:
args = ()
if kwargs is None:
kwargs = {}
params = (None, None, args, None, kwargs)
task = (target, params, pass_state, self.namespace)
self._task_push.send_multipart(
[util.encode_chunk_id(task_id, -1), serializer.dumps(task)]
)
res = SimpleTaskResult(self.server_address, task_id)
if lazy:
return res
return res.value
def map_lazy(
self,
target: Callable,
map_iter: Sequence[Any] = None,
*,
map_args: Sequence[Sequence[Any]] = None,
args: Sequence = None,
map_kwargs: Sequence[Mapping[str, Any]] = None,
kwargs: Mapping = None,
pass_state: bool = False,
num_chunks: int = None,
) -> SequenceTaskResult:
r"""
Functional equivalent of ``map()`` in-built function,
but executed in a parallel fashion.
Distributes the iterables,
provided in the ``map_*`` arguments to ``num_chunks`` no of worker nodes.
The idea is to:
1. Split the the iterables provided in the ``map_*`` arguments into ``num_chunks`` no of equally sized chunks.
2. Send these chunks to ``num_chunks`` number of worker nodes.
3. Wait for all these worker nodes to finish their task(s).
4. Combine the acquired results in the same sequence as provided in the ``map_*`` arguments.
5. Return the combined results.
*Steps 3-5 can be done lazily, on the fly with the help of an iterator*
:param target:
The ``Callable`` to be invoked inside a :py:class:`Process`.
*It is invoked with the following signature:*
``target(map_iter[i], *map_args[i], *args, **map_kwargs[i], **kwargs)``
*Where:*
- ``i`` is the index of n\ :sup:`th` element of the Iterable(s) provided in the ``map_*`` arguments.
- ``args`` and ``kwargs`` are passed from the ``**process_kwargs``.
The ``pass_state`` Keyword Argument of allows you to include the ``state`` arg.
:param map_iter:
A sequence whose elements are supplied as the *first* positional argument to the ``target``.
:param map_args:
A sequence whose elements are supplied as positional arguments (``*args``) to the ``target``.
:param map_kwargs:
A sequence whose elements are supplied as keyword arguments (``**kwargs``) to the ``target``.
:param args:
The argument tuple for ``target``, supplied after ``map_iter`` and ``map_args``.
By default, it is an empty ``tuple``.
:param kwargs:
A dictionary of keyword arguments for ``target``.
By default, it is an empty ``dict``.
:param pass_state:
Weather this process needs to access the state.
If this is set to ``False``,
then the ``state`` argument won't be provided to the ``target``.
If this is set to ``True``,
then a :py:class:`State` object is provided as the first Argument to the ``target``.
Unlike :py:class:`Process` it is set to ``False`` by default.
(To retain a similar API to in-built ``map()``)
:param num_chunks:
The number of worker nodes to use.
By default, it is set to ``multiprocessing.cpu_count()``
(The number of CPU cores on your system)
:param lazy:
Wheteher to return immediately put
:return:
The result is quite similar to ``map()`` in-built function.
It returns a :py:class:`Iterable` which contatins,
the return values of the ``target`` function,
when applied to every item of the Iterables provided in the ``map_*`` arguments.
The actual "processing" starts as soon as you call this function.
The returned :py:class:`Iterable` only fetches the results from the worker processes.
.. note::
- If ``len(map_iter) != len(maps_args) != len(map_kwargs)``,
then the results will be cut-off at the shortest Sequence.
See :ref:`worker_map` for Examples.
"""
if num_chunks is None:
num_chunks = multiprocessing.cpu_count()
lengths = [len(i) for i in (map_iter, map_args, map_kwargs) if i is not None]
assert (
lengths
), "At least one of `map_iter`, `map_args`, or `map_kwargs` must be provided as a non-empty Sequence."
length = min(lengths)
assert (
length > num_chunks
), "`length`(%d) cannot be less than `num_chunks`(%d)" % (length, num_chunks)
chunk_length, extra = divmod(length, num_chunks)
if extra:
chunk_length += 1
task_id = util.generate_task_id((chunk_length, length, num_chunks))
iter_chunks = util.make_chunks(map_iter, chunk_length, num_chunks)
args_chunks = util.make_chunks(map_args, chunk_length, num_chunks)
kwargs_chunks = util.make_chunks(map_kwargs, chunk_length, num_chunks)
target_bytes = serializer.dumps_fn(target)
for index in range(num_chunks):
params = (
iter_chunks[index],
args_chunks[index],
args,
kwargs_chunks[index],
kwargs,
)
task = (params, pass_state, self.namespace)
self._task_push.send_multipart(
[
util.encode_chunk_id(task_id, index),
target_bytes,
serializer.dumps(task),
]
)
return SequenceTaskResult(self.server_address, task_id)
def map(self, *args, **kwargs) -> list:
return self.map_lazy(*args, **kwargs).as_list
def __del__(self):
try:
self._task_push.close()
util.close_zmq_ctx(self._zmq_ctx)
except Exception:
pass
| StarcoderdataPython |
3523556 | <reponame>WorksApplications/omni_torch<gh_stars>1-10
import torch
import torch.nn as nn
import omni_torch.networks.blocks as omth_blocks
class CifarNet_Vanilla(nn.Module):
def __init__(self):
super(CifarNet_Vanilla, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.dropout_025 = nn.Dropout2d(0.25)
self.dropout_050 = nn.Dropout(0.50)
self.relu = nn.ReLU(inplace=True)
self.conv1_1 = nn.Conv2d(3, 32, 3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(32, 32, 3, stride=1, padding=0)
self.conv2_1 = nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(64, 64, 3, stride=1, padding=0)
self.fc_layer1 = nn.Linear(2304, 512)
self.fc_layer2 = nn.Linear(512, 10)
def forward(self, x):
x = self.relu(self.conv1_1(x))
x = self.pool(self.relu(self.conv1_2(x)))
x = self.dropout_025(x)
x = self.relu(self.conv2_1(x))
x = self.pool(self.relu(self.conv2_2(x)))
x = self.dropout_025(x)
x = x.view(x.size(0), -1)
x = self.dropout_050(self.fc_layer1(x))
#x = self.softmax(self.fc_layer2(x))
x = self.fc_layer2(x)
return x
class CifarNet(nn.Module):
def __init__(self):
super(CifarNet, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.dropout_050 = nn.Dropout(0.50)
self.conv_block1 = omth_blocks.Conv_Block(input=3, filters=[32, 32], kernel_sizes=[3, 3], stride=[1, 1],
padding=[1, 0], batch_norm=None, dropout=[0, 0.25])
self.conv_block2 = omth_blocks.Conv_Block(input=32, filters=[64, 64], kernel_sizes=[3, 3], stride=[1, 1],
padding=[1, 0], batch_norm=None, dropout=[0, 0.25])
self.fc_layer = omth_blocks.fc_layer(2304, [512, 10], activation=[nn.ReLU(), None], batch_norm=False)
def forward(self, x):
x = self.pool(self.conv_block1(x))
x = self.pool(self.conv_block2(x))
x = x.view(x.size(0), -1)
x = self.dropout_050(self.fc_layer1(x))
#x = self.softmax(self.fc_layer2(x))
x = self.fc_layer2(x)
return x | StarcoderdataPython |
1677016 | <reponame>basilmahmood/Wsimple<filename>setup.py<gh_stars>0
from os import path
from setuptools import setup, find_packages
def read(fname):
return open(path.join(path.dirname(__file__), fname)).read()
def from_here(relative_path):
return path.join(path.dirname(__file__), relative_path)
with open('requirements.txt') as f:
requirements = f.readlines()
# source env/bin/activate
#? pypi
# rm -rf build dist Wsimple.egg-info
# python setup.py sdist bdist_wheel
# twine upload --skip-existing dist/*
setup(
name="Wsimple",
version="1.0.5",
author="<NAME>",
author_email="<EMAIL>",
packages=find_packages(include=("images", "wsimple", "wsimple.api" )),
description="Wsimple.py: a API(Web interface) for Wealthsimple Trade",
long_description=read('README.md'),
long_description_content_type="text/markdown",
url="https://github.com/yusuf8ahmed/Wsimple",
install_requires=[
"loguru==0.5.3",
"requests==2.24.0"],
classifiers=[
'Development Status :: 3 - Alpha',
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
platforms = 'any',
keywords ='wsimple',
python_requires='>=3',
zip_safe = False
) | StarcoderdataPython |
5020012 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Editor: <NAME>
School: BUPT
Date: 2018-03-02
算法思想: 交叉字符串
动态规划,dp[i][j]表示s1前i个字符与s2前j个字符能否组成s3前i+j个字符
"""
class Solution:
"""
@param s1: A string
@param s2: A string
@param s3: A string
@return: Determine whether s3 is formed by interleaving of s1 and s2
"""
def isInterleave(self, s1, s2, s3):
# write your code here
len_1 = len(s1)
len_2 = len(s2)
len_3 = len(s3)
if len_1 + len_2 != len_3:
return False
dp = [[False for i in range(len_2+1)] for j in range(len_1+1)]
dp[0][0] = True
# dp[i][j]表示s1前i个字符与s2前j个字符能否组成s3前i+j个字符
# s1前i个与s2前0个能否构成s3前i个
for i in range(1, len_1+1):
if s1[i-1] == s3[i-1]:
dp[i][0] = True
# s1前0个与s2前j个能否构成s3前j个
for j in range(1, len_2+1):
if s2[j-1] == s3[j-1]:
dp[0][j] = True
# s1前i个与s2前j个能否构成s3前i+j个(i>=1,j>=1)
for i in range(1, len_1+1):
for j in range(1, len_2+1):
if s3[i+j-1] == s1[i-1] and s3[i+j-1] == s2[j-1]:
dp[i][j] = dp[i-1][j] or dp[i][j-1]
elif s3[i+j-1] == s1[i-1]:
dp[i][j] = dp[i-1][j]
elif s3[i+j-1] == s2[j-1]:
dp[i][j] = dp[i][j-1]
else:
dp[i][j] = False
return dp[len_1][len_2]
if __name__ == '__main__':
s1 = 'aabcc'
s2 = 'dbbca'
s3 = 'aadbbcbcac'
print Solution().isInterleave(s1, s2, s3) | StarcoderdataPython |
22848 | from typing import Callable
class Knapsack:
@staticmethod
def best_value(
capacity: int,
sizes: list,
values: list,
quantities,
min_max: Callable = max,
zero_capacity_value=0,
fill_to_capacity=True,
output_item_list=True
):
if capacity < 0:
raise ValueError(f"Capacity cannot be negative: {capacity}")
for s in sizes:
if s <= 0:
raise ValueError(f"Item sizes must be positive: {sizes}")
if len(sizes) != len(values):
raise ValueError(f"The length of sizes {sizes} not match the length of values {values}")
if quantities:
if isinstance(quantities, list):
if len(quantities) != len(sizes):
raise ValueError(f"The length of quantities {quantities} not match the length of sizes {sizes}")
for q in quantities:
if q < 0:
raise ValueError(f"Item quantities cannot contain negative: {quantities}")
elif quantities < 0:
raise ValueError(f"Item quantities cannot be negative: {quantities}")
return Knapsack.best_value_with_limited_items_1d(
capacity=capacity,
sizes=sizes,
values=values,
quantities=quantities,
min_max=min_max,
zero_capacity_value=zero_capacity_value,
fill_to_capacity=fill_to_capacity,
output_dp_table=False,
output_item_list=output_item_list
)
else:
return Knapsack.best_value_with_unlimited_items_1d(
capacity=capacity,
sizes=sizes,
values=values,
min_max=min_max,
zero_capacity_value=zero_capacity_value,
fill_to_capacity=fill_to_capacity,
output_dp_table=False,
output_item_list=output_item_list
)
@staticmethod
def ways_to_fill(
capacity: int,
sizes: list,
quantities,
output_item_list=True
):
if capacity < 0:
raise ValueError(f"Capacity cannot be negative: {capacity}")
for s in sizes:
if s <= 0:
raise ValueError(f"Item sizes must be positive: {sizes}")
if quantities:
if isinstance(quantities, list):
if len(quantities) != len(sizes):
raise ValueError(f"The length of quantities {quantities} not match the length of sizes {sizes}")
for q in quantities:
if q < 0:
raise ValueError(f"Item quantities cannot contain negative: {quantities}")
elif quantities < 0:
raise ValueError(f"Item quantities cannot be negative: {quantities}")
return Knapsack.number_of_ways_to_fill_to_capacity_with_limited_items_1d(
capacity=capacity,
sizes=sizes,
quantities=quantities,
output_dp_table=False,
output_item_list=output_item_list
)
else:
return Knapsack.number_of_ways_to_fill_to_capacity_with_unlimited_items_1d(
capacity=capacity,
sizes=sizes,
output_dp_table=False,
output_item_list=output_item_list
)
@staticmethod
def best_value_with_limited_items_2d(
capacity: int,
sizes: list,
values: list,
min_max: Callable = max,
zero_capacity_value=0,
fill_to_capacity=True,
iterate_sizes_first=True,
output_dp_table=False,
output_item_list=True
):
"""
0-1 Knapsack
Bag Capacity = C
Items Sizes = [s0, s1, ... ]
Items Values = [v0, v1, ... ]
2D-Array "max_value" Init
Cap=[0, 1, 2, ... c_j-1, c_j, ..., C]
s_0 0, 0, 0, ... 0, v_0, ..., v_0 (where c_j-1 < w0 < c_j)
s_1 0,
... 0, Max Value
s_i 0, Other cells will be overwritten later
... 0,
s_N 0,
The meaning of max_value[i][c]:
Given the FIRST "i + 1" items, the max value of a bag of size "c" can make
value_without_item_i = max_value[i-1][c]
value_with_item_i = max_value[i - 1][c - w[i]] + v[i]
max_value[i - 1][c - w[i]] means if we put item i into the bag (+v[i]),
the max value that the rest of the capacity "c - w[i]" can make with a selection of the previous items
if the capacity of the bag is not large enough for the item i, max_value[i][c] = max_value[i - 1][c]
otherwise max_value[i][c] = max( value_without_item_i + value_with_item_i )
"""
infinity = float("-inf") if min_max == max else float("inf")
knapsack_init_value = infinity if fill_to_capacity else zero_capacity_value
knapsack_value = [[knapsack_init_value for _ in range(capacity + 1)] for _ in range(len(sizes))]
item_lists = None
if output_item_list:
item_lists = [[list() for _ in range(capacity + 1)] for _ in range(len(sizes))]
for i in range(len(sizes)): # init first column
knapsack_value[i][0] = zero_capacity_value
if fill_to_capacity:
knapsack_value[0][sizes[0]] = values[0] # init first row, c != w means not filled
if output_item_list:
item_lists[0][sizes[0]].append(0)
else:
for c in range(sizes[0], capacity + 1): # init first row, c < w means the bag is empty
knapsack_value[0][c] = values[0]
if output_item_list:
item_lists[0][c].append(0)
if iterate_sizes_first: # we can iterate either of the sizes or capacity first
for i in range(1, len(sizes)):
for c in range(1, capacity + 1):
if c < sizes[i]:
knapsack_value[i][c] = knapsack_value[i - 1][c]
else:
knapsack_value[i][c] = min_max(knapsack_value[i - 1][c], knapsack_value[i - 1][c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[i][c] == knapsack_init_value:
item_lists[i][c] = list()
elif knapsack_value[i][c] == knapsack_value[i - 1][c]:
item_lists[i][c] = item_lists[i - 1][c].copy()
else:
item_lists[i][c] = item_lists[i - 1][c - sizes[i]] + [i]
else:
for c in range(1, capacity + 1):
for i in range(1, len(sizes)):
if c < sizes[i]:
knapsack_value[i][c] = knapsack_value[i - 1][c]
else:
knapsack_value[i][c] = min_max(knapsack_value[i - 1][c], knapsack_value[i - 1][c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[i][c] == knapsack_init_value:
item_lists[i][c] = list()
elif knapsack_value[i][c] == knapsack_value[i - 1][c]:
item_lists[i][c] = item_lists[i - 1][c].copy()
else:
item_lists[i][c] = item_lists[i - 1][c - sizes[i]] + [i]
if output_dp_table:
return (knapsack_value, item_lists) if output_item_list else knapsack_value
else:
best_value = knapsack_value[len(sizes) - 1][capacity]
if output_item_list:
item_list = item_lists[len(sizes) - 1][capacity]
return (None, item_list) if best_value == knapsack_init_value else (best_value, item_list)
else:
return None if best_value == knapsack_init_value else best_value
@staticmethod
def best_value_with_limited_items_1d(
capacity: int,
sizes: list,
values: list,
quantities=1,
min_max: Callable = max,
zero_capacity_value=0,
fill_to_capacity=True,
output_dp_table=False,
output_item_list=True
):
"""
Rolling dp array: copy row i-1 to row i
We just need one row:
knapsack_value[c] means the max value that a bag with capacity c can make
Each loop will overwrite the knapsack_value[c]
Cannot swap loops
"""
if isinstance(quantities, int):
quantities_list = list()
for i in range(len(sizes)):
quantities_list.append(quantities)
quantities = quantities_list
else:
assert len(sizes) == len(quantities)
infinity = float("-inf") if min_max == max else float("inf")
knapsack_init_value = infinity if fill_to_capacity else zero_capacity_value
knapsack_value = [knapsack_init_value for _ in range(capacity + 1)]
knapsack_value[0] = zero_capacity_value
item_lists = None
if output_item_list:
item_lists = [list() for _ in range(capacity + 1)]
for i in range(len(sizes)): # must loop item sizes first, because we are rolling the rows not columns
for q in range(1, quantities[i] + 1): # it is same as flatten the items: sizes=[2,3] quantities=[1,2] ==> sizes=[2, 3, 3]
# c < sizes[i], knapsack_value[c] won't change
# Capacity is looping backward, otherwise the item will be put in to the knapsack multiple times
for c in range(capacity, sizes[i] - 1, -1):
knapsack_value[c] = min_max(knapsack_value[c], knapsack_value[c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[c] == knapsack_init_value:
item_lists[c] = list()
elif knapsack_value[c] == knapsack_value[c - sizes[i]] + values[i]:
item_lists[c] = item_lists[c - sizes[i]] + [i]
# Another solution
# for c in range(capacity, sizes[i] - 1, -1):
# for q in range(1, min(quantities[i], c // sizes[i]) + 1):
# knapsack_value[c] = min_max(knapsack_value[c], knapsack_value[c - q * sizes[i]] + q * values[i])
# if output_item_list:
# if knapsack_value[c] == knapsack_init_value:
# item_lists[c] = list()
# elif knapsack_value[c] == knapsack_value[c - q * sizes[i]] + q * values[i]:
# item_lists[c] = item_lists[c - q * sizes[i]] + [i] * q
if output_dp_table:
return (knapsack_value, item_lists) if output_item_list else knapsack_value
else:
best_value = knapsack_value[capacity]
if output_item_list:
return (None, item_lists[capacity]) if best_value == knapsack_init_value else (best_value, item_lists[capacity])
else:
return None if best_value == knapsack_init_value else best_value
@staticmethod
def best_value_with_unlimited_items_1d(
capacity: int,
sizes: list,
values: list,
min_max: Callable = max,
zero_capacity_value=0,
fill_to_capacity=True,
iterate_sizes_first=True,
output_dp_table=False,
output_item_list=True
):
""" Similar to rolling row solution, but the two loops can swap the order """
infinity = float("-inf") if min_max == max else float("inf")
knapsack_init_value = infinity if fill_to_capacity else zero_capacity_value
knapsack_value = [knapsack_init_value for _ in range(capacity + 1)]
knapsack_value[0] = zero_capacity_value
item_lists = None
if output_item_list:
item_lists = [list() for _ in range(capacity + 1)]
if iterate_sizes_first:
for i in range(len(sizes)):
for c in range(sizes[i], capacity + 1): # Looping forward, so items can be added multiple times
knapsack_value[c] = min_max(knapsack_value[c], knapsack_value[c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[c] == knapsack_init_value:
item_lists[c] = list()
elif knapsack_value[c] == knapsack_value[c - sizes[i]] + values[i]:
item_lists[c] = item_lists[c - sizes[i]] + [i]
else:
for c in range(1, capacity + 1): # Looping forward, so items can be added multiple times
for i in range(len(sizes)):
if c >= sizes[i]: # c < sizes[i], knapsack_value[c] won't change
knapsack_value[c] = min_max(knapsack_value[c], knapsack_value[c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[c] == knapsack_init_value:
item_lists[c] = list()
elif knapsack_value[c] == knapsack_value[c - sizes[i]] + values[i]:
item_lists[c] = item_lists[c - sizes[i]] + [i]
if output_dp_table:
return (knapsack_value, item_lists) if output_item_list else knapsack_value
else:
best_value = knapsack_value[capacity]
if output_item_list:
return (None, item_lists[capacity]) if best_value == knapsack_init_value else (best_value, item_lists[capacity])
else:
return None if best_value == knapsack_init_value else best_value
@staticmethod
def best_value_with_unlimited_items_2d(
capacity: int,
sizes: list,
values: list,
min_max: Callable = max,
zero_capacity_value=0,
fill_to_capacity=True,
iterate_sizes_first=True,
output_dp_table=False,
output_item_list=True
):
infinity = float("-inf") if min_max == max else float("inf")
knapsack_init_value = infinity if fill_to_capacity else zero_capacity_value
knapsack_value = [[knapsack_init_value for _ in range(capacity + 1)] for _ in range(len(sizes))]
item_lists = None
if output_item_list:
item_lists = [[list() for _ in range(capacity + 1)] for _ in range(len(sizes))]
for i in range(len(sizes)): # init first column
knapsack_value[i][0] = zero_capacity_value
for c in range(sizes[0], capacity + 1): # init first row, c < w means the bag is empty, c != w means not fill
if c % sizes[0] == 0 or not fill_to_capacity:
knapsack_value[0][c] = values[0] * (c // sizes[0])
if output_item_list:
item_lists[0][c].extend([0] * (c // sizes[0]))
if iterate_sizes_first: # we can iterate either of the sizes or capacity first
for i in range(1, len(sizes)):
for c in range(1, capacity + 1):
# if c < sizes[i]:
# knapsack_value[i][c] = knapsack_value[i - 1][c]
# else:
# best_value = knapsack_init_value
# for k in range(1, (c // sizes[i]) + 1):
# best_value = min_max(best_value, knapsack_value[i - 1][c - k * sizes[i]] + k * values[i])
# knapsack_value[i][c] = min_max(knapsack_value[i - 1][c], best_value)
knapsack_value[i][c] = knapsack_value[i - 1][c]
if output_item_list:
item_lists[i][c] = item_lists[i - 1][c].copy()
if c >= sizes[i]:
knapsack_value[i][c] = min_max(knapsack_value[i][c], knapsack_value[i][c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[i][c] == knapsack_init_value:
item_lists[i][c] = list()
elif knapsack_value[i][c] == knapsack_value[i][c - sizes[i]] + values[i]:
item_lists[i][c] = item_lists[i][c - sizes[i]] + [i]
else:
for c in range(1, capacity + 1):
for i in range(1, len(sizes)):
# if c < sizes[i]:
# knapsack_value[i][c] = knapsack_value[i - 1][c]
# else:
# best_value = knapsack_init_value
# for k in range(1, (c // sizes[i]) + 1):
# best_value = min_max(best_value, knapsack_value[i - 1][c - k * sizes[i]] + k * values[i])
# knapsack_value[i][c] = min_max(knapsack_value[i - 1][c], best_value)
knapsack_value[i][c] = knapsack_value[i - 1][c]
if output_item_list:
item_lists[i][c] = item_lists[i - 1][c].copy()
if c >= sizes[i]:
knapsack_value[i][c] = min_max(knapsack_value[i][c], knapsack_value[i][c - sizes[i]] + values[i])
if output_item_list:
if knapsack_value[i][c] == knapsack_init_value:
item_lists[i][c] = list()
elif knapsack_value[i][c] == knapsack_value[i][c - sizes[i]] + values[i]:
item_lists[i][c] = item_lists[i][c - sizes[i]] + [i]
if output_dp_table:
return (knapsack_value, item_lists) if output_item_list else knapsack_value
else:
best_value = knapsack_value[len(sizes) - 1][capacity]
if output_item_list:
item_list = item_lists[len(sizes) - 1][capacity]
return (None, item_list) if best_value == knapsack_init_value else (best_value, item_list)
else:
return None if best_value == knapsack_init_value else best_value
@staticmethod
def number_of_ways_to_fill_to_capacity_with_unlimited_items_2d(
capacity: int,
sizes: list,
output_dp_table=False,
output_item_list=True
):
"""
number_of_ways[i][c] means given the FIRST i + 1 items, the number of ways to make capacity c
number_of_ways[i][c] = number_of_ways[i - 1][c] + number_of_ways[i][c - sizes[i]]
"""
number_of_ways = [[0 for _ in range(capacity + 1)] for _ in range(len(sizes))]
combo_lists = None
if output_item_list:
combo_lists = [[None for _ in range(capacity + 1)] for _ in range(len(sizes))]
for i in range(len(sizes)): # init first column
number_of_ways[i][0] = 1 # no item for 0 capacity is 1 way
if output_item_list:
combo_lists[i][0] = [[]] # empty list for no item combo
for c in range(sizes[0], capacity + 1): # init first row
if c % sizes[0] == 0:
number_of_ways[0][c] = 1
if output_item_list:
combo_lists[0][c] = [[0] * (c // sizes[0])] # one combo of all the item 0
for i in range(1, len(sizes)):
for c in range(1, capacity + 1):
number_of_ways[i][c] = number_of_ways[i - 1][c]
if c >= sizes[i]:
number_of_ways[i][c] += number_of_ways[i][c - sizes[i]] # On the same line, no i - 1
if output_item_list:
combo_lists[i][c] = combo_lists[i - 1][c]
if c >= sizes[i] and combo_lists[i][c - sizes[i]] is not None:
new_combo_list = list()
for combo in combo_lists[i][c - sizes[i]]:
new_combo_list.append(combo + [i])
combo_lists[i][c] = combo_lists[i][c] + new_combo_list if combo_lists[i][c] is not None else new_combo_list
if output_dp_table:
return (number_of_ways, combo_lists) if output_item_list else number_of_ways
else:
best_value = number_of_ways[len(sizes) - 1][capacity]
if output_item_list:
combo_list = combo_lists[len(sizes) - 1][capacity]
return (best_value, combo_list)
else:
return best_value
@staticmethod
def number_of_ways_to_fill_to_capacity_with_unlimited_items_1d(
capacity: int,
sizes: list,
output_dp_table=False,
output_item_list=True
):
"""
number_of_ways[c] means the number of ways to make capacity c
rolling row[i-1] over to row[i]
number_of_ways[c] = number_of_ways[c] + number_of_ways[c - sizes[i]]
"""
number_of_ways = [0 for _ in range(capacity + 1)]
combo_lists = None
if output_item_list:
combo_lists = [None for _ in range(capacity + 1)]
number_of_ways[0] = 1 # no item for 0 capacity is 1 way
if output_item_list:
combo_lists[0] = [[]] # empty list for no item combo
for i in range(len(sizes)):
for c in range(sizes[i], capacity + 1): # c starts from sizes[i] (c >= sizes[i])
number_of_ways[c] += number_of_ways[c - sizes[i]] # + (c > sizes[i] and c % sizes[i] == 0)
if output_item_list:
if combo_lists[c - sizes[i]] is not None:
new_combo_list = list()
for combo in combo_lists[c - sizes[i]]:
new_combo_list.append(combo + [i])
combo_lists[c] = combo_lists[c] + new_combo_list if combo_lists[c] is not None else new_combo_list
if output_dp_table:
return (number_of_ways, combo_lists) if output_item_list else number_of_ways
else:
best_value = number_of_ways[capacity]
if output_item_list:
combo_list = combo_lists[capacity]
return (best_value, combo_list)
else:
return best_value
@staticmethod
def number_of_ways_to_fill_to_capacity_with_limited_items_2d(
capacity: int,
sizes: list,
output_dp_table=False,
output_item_list=True
):
"""
number_of_ways[i][c] means given the FIRST i items, the number of ways to make capacity c
number_of_ways[i][c] = number_of_ways[i - 1][c] + number_of_ways[i - 1][c - sizes[i]]
number_of_ways[i - 1][c] means without item[i], only use FIRST i - 1 items, the number of combos
number_of_ways[i - 1][c - sizes[i]] means with item[i], every combo that make c-sizes[i] can add item[i] to get a new combo that make
"""
number_of_ways = [[0 for _ in range(capacity + 1)] for _ in range(len(sizes))]
combo_lists = None
if output_item_list:
combo_lists = [[None for _ in range(capacity + 1)] for _ in range(len(sizes))]
for i in range(len(sizes)): # init first column
number_of_ways[i][0] = 1 # no item for 0 capacity is 1 way
if output_item_list:
combo_lists[i][0] = [[]] # empty list for no item combo
if sizes[0] <= capacity: # init first row
number_of_ways[0][sizes[0]] = 1
if output_item_list:
combo_lists[0][sizes[0]] = [[0]]
for i in range(1, len(sizes)):
for c in range(1, capacity + 1):
# if c < sizes[i]:
# number_of_ways[i][c] = number_of_ways[i - 1][c]
# elif c == sizes[i]:
# number_of_ways[i][c] = number_of_ways[i - 1][c] + number_of_ways[i - 1][c - sizes[i]]
number_of_ways[i][c] = number_of_ways[i - 1][c]
if c >= sizes[i]:
number_of_ways[i][c] += number_of_ways[i - 1][c - sizes[i]]
if output_item_list:
if combo_lists[i - 1][c] is not None:
combo_lists[i][c] = combo_lists[i - 1][c]
if c >= sizes[i] and combo_lists[i - 1][c - sizes[i]] is not None:
new_combo_list = list()
for combo in combo_lists[i - 1][c - sizes[i]]:
new_combo_list.append(combo + [i])
combo_lists[i][c] = combo_lists[i][c] + new_combo_list if combo_lists[i][c] is not None else new_combo_list
if output_dp_table:
return (number_of_ways, combo_lists) if output_item_list else number_of_ways
else:
best_value = number_of_ways[len(sizes) - 1][capacity]
if output_item_list:
combo_list = combo_lists[len(sizes) - 1][capacity]
return (best_value, combo_list)
else:
return best_value
@staticmethod
def number_of_ways_to_fill_to_capacity_with_limited_items_1d(
capacity: int,
sizes: list,
quantities=1,
output_dp_table=False,
output_item_list=True
):
"""
number_of_ways[c] means the number of ways to make capacity c
number_of_ways[c] = number_of_ways[c] + number_of_ways[c - sizes[i]]
"""
if isinstance(quantities, int):
quantities_list = list()
for i in range(len(sizes)):
quantities_list.append(quantities)
quantities = quantities_list
else:
assert len(sizes) == len(quantities)
number_of_ways = [0 for _ in range(capacity + 1)]
combo_lists = None
if output_item_list:
combo_lists = [None for _ in range(capacity + 1)]
number_of_ways[0] = 1 # no item for 0 capacity is 1 way
if output_item_list:
combo_lists[0] = [[]] # empty list for no item combo
for i in range(len(sizes)):
for q in range(1, quantities[i] + 1):
for c in range(capacity, sizes[i] - 1, -1): # c >= sizes[i]
number_of_ways[c] += number_of_ways[c - sizes[i]]
if output_item_list:
if combo_lists[c - sizes[i]] is not None:
new_combo_list = list()
for combo in combo_lists[c - sizes[i]]:
new_combo_list.append(combo + [i])
combo_lists[c] = combo_lists[c] + new_combo_list if combo_lists[c] is not None else new_combo_list
if output_dp_table:
return (number_of_ways, combo_lists) if output_item_list else number_of_ways
else:
best_value = number_of_ways[capacity]
if output_item_list:
combo_list = combo_lists[capacity] # It might have duplicates
unique_combo_list = list()
if combo_list:
combo_set = set()
for i, combo in enumerate(combo_list):
t = tuple(combo.sort())
if t not in combo_set:
unique_combo_list.append(combo)
combo_set.add(t)
return (len(unique_combo_list), unique_combo_list)
return (best_value, combo_list)
else:
return best_value
| StarcoderdataPython |
1600550 | <reponame>yukiar/phrase_alignment_cted
import os, glob, pathlib
from xml.etree import ElementTree
class Node:
def __init__(self, id, tokens, start, end, pidx, cidx_list, pa1, pa2, pa3):
self.id = id # node id, e.g., c0 and t10
self.txt = ' '.join(tokens).strip(' ')
self.tokens = tokens
self.start = start
self.end = end
self.pidx = pidx
self.cidx_list = cidx_list
self.pa1 = [aid.strip() for aid in pa1.split(' ')]
self.pa2 = [aid.strip() for aid in pa2.split(' ')]
self.pa3 = [aid.strip() for aid in pa3.split(' ')]
def update_pid(self, id_idx_dic):
self.pidx = id_idx_dic[self.pidx]
def get_alignment(self, annotator_label, swap=False):
if swap:
if annotator_label == 'pa1':
return [(aid, self.id) for aid in self.pa1]
elif annotator_label == 'pa2':
return [(aid, self.id) for aid in self.pa2]
elif annotator_label == 'pa3':
return [(aid, self.id) for aid in self.pa3]
else:
if annotator_label == 'pa1':
return [(self.id, aid) for aid in self.pa1]
elif annotator_label == 'pa2':
return [(self.id, aid) for aid in self.pa2]
elif annotator_label == 'pa3':
return [(self.id, aid) for aid in self.pa3]
def __eq__(self, other):
if other is None: return False
if not isinstance(other, Node):
raise TypeError("Must compare against type Node")
return self.id == other.id
def __lt__(self, other):
if not isinstance(other, Node):
raise TypeError("Must compare against type Node")
return self.id < other.id
def is_proposer_ancestor(nid, p_cand, tree):
node_k, k_idx = get_node_by_id(tree, nid)
if k_idx == node_k.pidx: # root does not have a proper ancestor
return False
ans_k = []
get_all_ancestors(ans_k, k_idx, tree)
if p_cand in ans_k:
return True
else:
return False
def is_ancestor(iid, jid, tree):
_, i_idx = get_node_by_id(tree, iid)
_, j_idx = get_node_by_id(tree, jid)
ans_i = [i_idx]
get_all_ancestors(ans_i, i_idx, tree)
if j_idx in ans_i:
return True
else:
return False
def find_lca(i, j, tree):
_, i_idx = get_node_by_id(tree, i)
_, j_idx = get_node_by_id(tree, j)
ans_i, ans_j = [i_idx], [j_idx]
get_all_ancestors(ans_i, i_idx, tree)
get_all_ancestors(ans_j, j_idx, tree)
common_ans = set(ans_i) & set(ans_j)
lca = min(common_ans) # In tree, deeper nodes have smaller idxs
return lca
def get_all_ancestors(ans, i, tree):
ans.append(tree[i].pidx)
if tree[i].pidx == i: # root
return
get_all_ancestors(ans, tree[i].pidx, tree)
def get_all_descendants(des, i, tree):
if len(tree[i].cidx_list) == 1 and tree[i].cidx_list[0] == -1: # leaf
return
for cidx in tree[i].cidx_list:
des.append(cidx)
get_all_descendants(des, cidx, tree)
def get_node_by_id(tree, id):
for nidx in range(len(tree)):
if tree[nidx].id == id:
return tree[nidx], nidx
return None
def get_idx_by_id(tree, id):
for nidx in range(len(tree)):
if tree[nidx].id == id:
return nidx
return None
def load_corpus(path, dev_or_test, bos_eos, text_match):
# Load trees
s_trees, t_trees, s_tokens, t_tokens = [], [], [], []
if dev_or_test is None:
xml_dir = path
else:
xml_dir = os.path.join(path, dev_or_test)
pair_ids = [pathlib.Path(annot_path).stem[2:] for annot_path in glob.glob(os.path.join(xml_dir, 's-*.xml'))]
for pair_id in pair_ids:
# Read source
tree, tokens = _read_xml(os.path.join(xml_dir, 's-' + pair_id + '.xml'), bos_eos)
s_trees.append(tree)
s_tokens.append(tokens)
# Read target
tree, tokens = _read_xml(os.path.join(xml_dir, 't-' + pair_id + '.xml'), bos_eos)
t_trees.append(tree)
t_tokens.append(tokens)
# Load annotations
annotator_A = get_annotations(s_trees, t_trees, 'pa1')
annotator_B = get_annotations(s_trees, t_trees, 'pa2')
annotator_C = get_annotations(s_trees, t_trees, 'pa3')
if text_match:
annotator_A = convert_id_to_text(annotator_A, s_trees, t_trees)
annotator_B = convert_id_to_text(annotator_B, s_trees, t_trees)
annotator_C = convert_id_to_text(annotator_C, s_trees, t_trees)
return s_tokens, t_tokens, s_trees, t_trees, annotator_A, annotator_B, annotator_C
def get_annotations(s_trees, t_trees, annot_label):
all_annotations = []
for s_tree, t_tree in zip(s_trees, t_trees):
annotations = []
for node in s_tree:
# Add alignment of source -> target
annotations += node.get_alignment(annot_label)
for node in t_tree:
# Add alignment of target -> source
annotations += node.get_alignment(annot_label, swap=True)
all_annotations.append(set(annotations))
return all_annotations
def convert_id_to_text(annotation, s_trees, t_trees, verbose=False):
text_base_annotation = []
for i, pairs in enumerate(annotation):
txt_pairs = set()
for s_id, t_id in pairs:
s_txt = _find_and_get_text(s_id, s_trees[i], verbose)
t_txt = _find_and_get_text(t_id, t_trees[i], verbose)
txt_pairs.add((s_txt, t_txt))
text_base_annotation.append(txt_pairs)
return text_base_annotation
def _find_and_get_text(id, tree, verbose):
if id == '-1':
return '---'
node = list(filter(lambda x: id == x.id, tree))[0]
txt = ' '.join(node.tokens)
if verbose:
return node.id + ' ' + txt.strip(' ')
else:
return txt.strip(' ')
def _read_xml(xml_path, bos_eos):
tree, tokens = [], []
id_idx_dic = {}
root = ElementTree.parse(xml_path).getroot()[0]
_recursive_trace_postorder(root, root.get('id').strip(' '), tree, tokens, id_idx_dic, bos_eos)
for node in tree:
node.update_pid(id_idx_dic)
return tree, tokens
def _recursive_trace_postorder(node, pid, tree, tokens, id_idx_dic, bos_eos):
if len(node) == 1: # unary or pre-terminal
child = node[0]
nid = node.get('id').strip(' ')
pa1 = node.get('pa1').strip(' ') if 'pa1' in node.attrib else '-1'
pa2 = node.get('pa2').strip(' ') if 'pa2' in node.attrib else '-1'
pa3 = node.get('pa3').strip(' ') if 'pa3' in node.attrib else '-1'
if child.get('id')[0] == 't': # pre-terminal
tokens.append(child.text)
if bos_eos: # increment index for <s> symbol
tree.append(Node(nid, [child.text], len(tokens), len(tokens) + 1, pid, [-1], pa1, pa2, pa3))
else:
tree.append(Node(nid, [child.text], len(tokens) - 1, len(tokens), pid, [-1], pa1, pa2, pa3))
id_idx_dic[nid] = len(tree) - 1
return len(tokens) - 1, len(tokens), len(tree) - 1
else: # unary
start, end, cidx = _recursive_trace_postorder(child, nid, tree, tokens, id_idx_dic, bos_eos)
if bos_eos:
tree.append(Node(nid, tokens[start:end], start + 1, end + 1, pid, [cidx], pa1, pa2, pa3))
else:
tree.append(Node(nid, tokens[start:end], start, end, pid, [cidx], pa1, pa2, pa3))
id_idx_dic[nid] = len(tree) - 1
return start, end, len(tree) - 1
else:
nid = node.get('id').strip(' ')
pa1 = node.get('pa1').strip(' ') if 'pa1' in node.attrib else '-1'
pa2 = node.get('pa2').strip(' ') if 'pa2' in node.attrib else '-1'
pa3 = node.get('pa3').strip(' ') if 'pa3' in node.attrib else '-1'
start, _, left_child_idx = _recursive_trace_postorder(node[0], nid, tree, tokens, id_idx_dic, bos_eos)
_, end, right_child_idx = _recursive_trace_postorder(node[1], nid, tree, tokens, id_idx_dic, bos_eos)
if bos_eos: # increment index for <s> symbol
tree.append(
Node(nid, tokens[start:end], start + 1, end + 1, pid, [left_child_idx, right_child_idx], pa1, pa2, pa3))
else:
tree.append(Node(nid, tokens[start:end], start, end, pid, [left_child_idx, right_child_idx], pa1, pa2, pa3))
id_idx_dic[nid] = len(tree) - 1
return start, end, len(tree) - 1
| StarcoderdataPython |
6657724 | # pylint: disable=missing-docstring
import unittest
import numpy as np
import tensorflow as tf
import tf_encrypted as tfe
from tf_encrypted.keras.testing_utils import agreement_test, layer_test
np.random.seed(42)
class TestDense(unittest.TestCase):
def setUp(self):
tf.reset_default_graph()
def test_dense_bias(self):
self._core_dense(use_bias=True)
def test_dense_nobias(self):
self._core_dense(use_bias=False)
def test_dense_relu(self):
self._core_dense(activation="relu")
def _core_dense(self, **layer_kwargs):
input_shape = [4, 5]
kernel = np.random.normal(input_shape[::-1])
initializer = tf.keras.initializers.Constant(kernel)
base_kwargs = {
"units": 4,
"kernel_initializer": initializer,
}
kwargs = {**base_kwargs, **layer_kwargs}
agreement_test(tfe.keras.layers.Dense,
kwargs=kwargs,
input_shape=input_shape)
layer_test(tfe.keras.layers.Dense,
kwargs=kwargs,
batch_input_shape=input_shape)
def test_backward(self) -> None:
input_shape = [1, 5]
input_data = np.ones(input_shape)
weights_second_layer = np.ones(shape=[1, 5])
kernel = np.ones([5, 5])
initializer = tf.keras.initializers.Constant(kernel)
with tfe.protocol.SecureNN() as prot:
private_input = prot.define_private_variable(input_data)
w = prot.define_private_variable(weights_second_layer)
tfe_layer = tfe.keras.layers.Dense(
5,
input_shape=input_shape[1:],
kernel_initializer=initializer
)
dense_out_pond = tfe_layer(private_input)
loss = dense_out_pond * w
# backward
d_out = w
grad, d_x = tfe_layer.backward(d_out)
with tfe.Session() as sess:
sess.run(tf.global_variables_initializer())
tfe_loss = sess.run(loss.reveal())
tfe_d_k = sess.run(grad[0].reveal())
tfe_d_b = sess.run(grad[1].reveal())
tfe_d_x = sess.run(d_x.reveal())
# reset graph
tf.reset_default_graph()
with tf.Session() as sess:
initializer = tf.keras.initializers.Constant(kernel)
tf_layer = tf.keras.layers.Dense(
5,
input_shape=input_shape[1:],
kernel_initializer=initializer
)
x = tf.Variable(input_data, dtype=tf.float32)
y = tf_layer(x)
w = tf.Variable(weights_second_layer, dtype=tf.float32)
loss = y * w
k, b = tf_layer.trainable_weights
# backward
d_x, d_k, d_b = tf.gradients(xs=[x, k, b], ys=loss)
sess.run(tf.global_variables_initializer())
tf_loss, tf_d_x, tf_d_k, tf_d_b = sess.run([loss, d_x, d_k, d_b])
np.testing.assert_array_almost_equal(tfe_loss, tf_loss, decimal=2)
np.testing.assert_array_almost_equal(tfe_d_k, tf_d_k, decimal=2)
np.testing.assert_array_almost_equal(tfe_d_b, tf_d_b, decimal=2)
np.testing.assert_array_almost_equal(tfe_d_x, tf_d_x, decimal=2)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8044175 | """
A simple animated scene that loads from OBJ, uses textures, and does deferred lighting with shadow map.
"""
import logging
import math
from time import time
from pathlib import Path
import pyglet
from pyglet import gl
from euclid3 import Matrix4, Point3
from fogl.debug import DebugWindow
from fogl.framebuffer import FrameBuffer
from fogl.glutil import gl_matrix
from fogl.mesh import ObjMesh, Mesh
from fogl.shader import Program, VertexShader, FragmentShader
from fogl.texture import ImageTexture, Texture, NormalTexture
from fogl.util import try_except_log, load_png
from fogl.vao import VertexArrayObject
from fogl.util import enabled, disabled, debounce
class FoglWindow(pyglet.window.Window):
"""
Pyglet window subclass that draws an scene every frame.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
local = Path(__file__).parent
# Shader setup
self.view_program = Program(
VertexShader(local / "glsl/view_vertex.glsl"),
FragmentShader(local / "glsl/view_fragment.glsl")
)
self.lighting_program = Program(
VertexShader(local / "glsl/copy_vertex.glsl"),
FragmentShader(local / "glsl/copy_fragment.glsl")
)
self.copy_program = Program(
VertexShader(local / "glsl/copy_vertex.glsl"),
FragmentShader(local / "glsl/simple_copy_frag.glsl")
)
# Load a texture
texture = ImageTexture(*load_png(local / "textures/plasma.png"), unit=3)
# Load vertex data from an OBJ file as a "mesh"
# OBJ file belongs to the Blender project.
self.suzanne = ObjMesh(local / "obj/suzanne.obj", texture=texture)
# A simple plane
plane_size = 3
self.plane = Mesh([
# position color normal texture coord
((plane_size, plane_size, 0.), (1., 1., 1.), (0., 0., -1.), (1., 1., 1.)),
((-plane_size, plane_size, 0.), (1., 1., 1.), (0., 0., -1.), (0., 1., 1.)),
((plane_size, -plane_size, 0.), (1., 1., 1.), (0., 0., -1.), (1., 0., 1.)),
((-plane_size, -plane_size, 0.), (1., 1., 1.), (0., 0., -1.), (0., 0., 1.)),
], texture=texture)
# A framebuffer for rendering the shadow light. It needs only a depth texture.
self.shadow_size = 256, 256
self.shadow_buffer = FrameBuffer(self.shadow_size, autoclear=True, depth_unit=3, set_viewport=True)
self.vao = VertexArrayObject()
@debounce(0.1) # Prevent too many events from accumulating
def on_resize(self, width, height):
self.size = width, height
# We need to recreate the offscreen buffer if the window size changes
# This includes when the window is first created.
render_textures = dict(
# These will represent the different channels of the framebuffer,
# that the shader can render to.
color=Texture(self.size, unit=0),
normal=NormalTexture(self.size, unit=1),
position=NormalTexture(self.size, unit=2),
)
self.offscreen_buffer = FrameBuffer(self.size, render_textures, autoclear=True, set_viewport=True)
render_textures2 = dict(
color=Texture(self.size, unit=0),
)
self.offscreen_buffer2 = FrameBuffer(self.size, render_textures2, autoclear=True, set_viewport=True)
return pyglet.event.EVENT_HANDLED # Work around pyglet internals
@try_except_log
def on_draw(self):
# Prevent trying to draw before things have been set up
if not hasattr(self, "offscreen_buffer"):
return
# Model matrix we'll use to position the main model
suzanne_model_matrix = (Matrix4
.new_identity()
.rotatex(-math.pi/2)
.rotatez(time())) # Rotate over time
plane_model_matrix = Matrix4.new_rotatey(math.pi).translate(0, 0, 2)
# Render to an offscreen buffer
with self.offscreen_buffer, self.view_program, \
enabled(gl.GL_DEPTH_TEST), disabled(gl.GL_CULL_FACE):
gl.glDepthMask(gl.GL_TRUE)
w, h = self.size
aspect = h / w
# Calculate a view frustum; this is basically our camera.
near = 5
far = 15
width = 2
height = 2 * aspect
frustum = (Matrix4.new(
near / width, 0, 0, 0,
0, near / height, 0, 0,
0, 0, -(far + near)/(far - near), -1,
0, 0, -2 * far * near/(far - near), 0
))
# The view matrix positions the camera in the scene
view_matrix = (Matrix4
.new_identity()
.translate(0, 0, -8))
# Send the matrices to GL
gl.glUniformMatrix4fv(0, 1, gl.GL_FALSE,
gl_matrix(frustum * view_matrix))
gl.glUniformMatrix4fv(1, 1, gl.GL_FALSE,
gl_matrix(suzanne_model_matrix))
gl.glUniform4f(2, 0.3, 0.3, 1, 1) # Set the "color" uniform to blue
self.suzanne.draw()
# We'll also draw a simple plane behind the main model
gl.glUniformMatrix4fv(1, 1, gl.GL_FALSE,
gl_matrix(plane_model_matrix))
gl.glUniform4f(2, 0.3, 1, 0.3, 1) # Set the "color" uniform to green
self.plane.draw(mode=gl.GL_TRIANGLE_STRIP)
# Render shadow buffer
# Basically the same scene as above, but to a different buffer and from a different view
with self.shadow_buffer, self.view_program, enabled(gl.GL_DEPTH_TEST), disabled(gl.GL_CULL_FACE):
gl.glDepthMask(gl.GL_TRUE)
frustum = Matrix4.new_perspective(1, 1, 1, 12)
view_matrix = (Matrix4
.new_identity()
.translate(0, 0, -4)
.rotatey(0.5)
.rotatex(0.3))
light_pos = (view_matrix.inverse() * Point3(0, 0, 0))
light_view_matrix = frustum * view_matrix
gl.glUniformMatrix4fv(0, 1, gl.GL_FALSE,
gl_matrix(light_view_matrix))
gl.glUniformMatrix4fv(1, 1, gl.GL_FALSE,
gl_matrix(suzanne_model_matrix))
gl.glUniform4f(2, 0.9, 0.3, 0.4, 1)
self.suzanne.draw()
gl.glUniformMatrix4fv(1, 1, gl.GL_FALSE,
gl_matrix(plane_model_matrix))
self.plane.draw(mode=gl.GL_TRIANGLE_STRIP)
# Now draw the offscreen buffer to another buffer, combining it with the
# lighting information to get a nice image.
# Note: This step is pretty pointless here, as we might just draw directly to screen.
# Just demonstrates how to do it.
with self.vao, self.offscreen_buffer2, self.lighting_program, disabled(gl.GL_CULL_FACE, gl.GL_DEPTH_TEST):
gl.glUniform3f(0, *light_pos)
gl.glUniformMatrix4fv(1, 1, gl.GL_FALSE, gl_matrix(light_view_matrix))
# Bind some of the offscreen buffer's textures so the shader can read them.
with self.offscreen_buffer["color"], self.offscreen_buffer["normal"], \
self.offscreen_buffer["position"], self.shadow_buffer["depth"]:
gl.glDrawArrays(gl.GL_TRIANGLES, 0, 6)
# Now render the finished image to the screen
with self.vao, self.copy_program, disabled(gl.GL_CULL_FACE, gl.GL_DEPTH_TEST):
with self.offscreen_buffer2["color"]:
gl.glDrawArrays(gl.GL_TRIANGLES, 0, 6)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
config = pyglet.gl.Config(major_version=4,
minor_version=5,
double_buffer=True)
# This enables the GL error log, really useful for tracking down obscure problems.
# Requires a recent GL version, though. https://www.khronos.org/opengl/wiki/Debug_Output
config.debug = True
w = FoglWindow(config=config, resizable=True)
# DebugWindow() # Simple helper that displays all the offscreen textures
pyglet.clock.schedule_interval(lambda dt: None, 0.01)
pyglet.app.run()
| StarcoderdataPython |
1726359 | import numpy as np
import glob
import random
import torch
import torch.utils.data
from analyzer.data.utils.data_raw import *
from analyzer.data.utils.data_misc import *
from analyzer.data.augmentation import Augmentor
class PairDataset():
'''
This Dataloader will prepare sample that are pairs for feeding the contrastive
learning algorithm.
'''
def __init__(self, cfg, iter_num: int = -1):
self.cfg = cfg
self.chunks_path = self.cfg.SSL.USE_PREP_DATASET
self.sample_volume_size = (64, 64, 64)
self.sample_stride = (1, 1, 1)
self.cl_mode = self.cfg.MODE.PROCESS.replace('cl', '')
self.augmentor = Augmentor(self.sample_volume_size)
# Data information if you want to produce input on the fly.
if not self.cfg.SSL.USE_PREP_DATASET:
self.volume, self.label = self.get_input()
self.volume_size = [np.array(self.volume.shape)]
self.sample_volume_size = np.array(self.sample_volume_size).astype(int)
self.sample_stride = np.array(self.sample_stride).astype(int)
self.sample_size = [count_volume(self.volume_size[x], self.sample_volume_size, self.sample_stride)
for x in range(len(self.volume_size))]
self.sample_num = np.array([np.prod(x) for x in self.sample_size])
self.sample_num_a = np.sum(self.sample_num)
self.sample_num_c = np.cumsum([0] + list(self.sample_num))
self.iter_num = max(iter_num, self.sample_num_a)
print('Dataset chunks that will be iterated over: {}'.format(self.iter_num))
def __len__(self):
if not self.cfg.SSL.USE_PREP_DATASET:
return self.iter_num
else:
with h5py.File(self.chunks_path, 'r') as f:
return len(f['id'])
def __getitem__(self, idx):
return self.create_sample_pair(idx)
def create_sample_pair(self, idx):
'''Create a sample pair that will be used for contrastive learning.
'''
if not self.cfg.SSL.USE_PREP_DATASET:
sample = self.reject_sample()
else:
with h5py.File(self.chunks_path, 'r') as f:
sample = f['chunk'][idx]
unique_label = int(f['id'][idx])
if 'gt' in list(f.keys()):
gt_label = int(f['gt'][idx])
else:
gt_label = None
if sample.ndim > 3:
sample = np.squeeze(sample)
if self.cl_mode == 'train':
sample_pair = self.augmentor(sample)
return (sample_pair, unique_label, gt_label)
else:
return (np.expand_dims(sample, axis=0).copy(), unique_label, gt_label)
def create_chunk_volume(self):
'''
Function creates small chunk from input volume that is processed
into the training model.
'''
pos = self.get_pos(self.sample_volume_size)
pos, out_vol, out_label = self.crop_with_pos(pos, self.sample_volume_size)
return pos, self.create_masked_input(out_vol, out_label)
def create_masked_input(self, vol: np.ndarray, label: np.ndarray) -> np.ndarray:
'''
Create masked input volume, that is pure EM where the mask is not 0. Otherwise all
values set to 0. Returns the prepared mask.
:params vol (numpy.ndarray): volume that is EM input.
:params label (numpy.ndarray): associated label volume.
'''
vol[np.where(label == 0)] = 0
return np.array(vol)
def get_input(self):
'''Get input volume and labels.'''
emfns = sorted(glob.glob(self.cfg.DATASET.EM_PATH + '*.' + self.cfg.DATASET.FILE_FORMAT))
labelfns = sorted(glob.glob(self.cfg.DATASET.LABEL_PATH + '*.' + self.cfg.DATASET.FILE_FORMAT))
if len(emfns) == 1:
vol = readvol(emfns[0])
label = readvol(labelfns[0])
else:
vol = folder2Vol(chunk_size=self.cfg.DATASET.CHUNK_SIZE, fns=emfns, file_format=self.cfg.DATASET.FILE_FORMAT)
label = folder2Vol(chunk_size=self.cfg.DATASET.CHUNK_SIZE, fns=labelfns, file_format=self.cfg.DATASET.FILE_FORMAT)
return vol, label
def crop_with_pos(self, pos, vol_size):
out_volume = (crop_volume(
self.volume, vol_size, pos[1:])/255.0).astype(np.float32)
out_label = crop_volume(
self.label, vol_size, pos[1:])
return pos, out_volume, out_label
def get_pos(self, vol_size):
pos = [0, 0, 0, 0]
# pick a dataset
did = self.index_to_dataset(random.randint(0, self.sample_num - 1))
pos[0] = did
# pick a position
tmp_size = count_volume(
self.volume_size[did], vol_size, self.sample_stride)
tmp_pos = [random.randint(0, tmp_size[x]-1) * self.sample_stride[x]
for x in range(len(tmp_size))]
pos[1:] = tmp_pos
return pos
def index_to_dataset(self, index):
return np.argmax(index < self.sample_num_c) - 1
def reject_sample(self):
'''function makes sure that sample contains actual objects that are
sufficiently large enough.'''
while True:
_, sample = self.create_chunk_volume()
if np.count_nonzero(sample) > 0:
return sample
| StarcoderdataPython |
6522607 | # Get numbers divisible by fifteen from a list using an anonymous function
numberlist=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
answer=list(filter(lambda x:(x%2==0),numberlist))
print("the numbers divisible by 2 are :\n",answer)
# lambda operator or lambda function is used for creating small, one-time and anonymous function objects in Python.
| StarcoderdataPython |
11203026 | from pythonforandroid.recipe import PythonRecipe
class PycryptodomeRecipe(PythonRecipe):
version = '3.4.6'
url = 'https://github.com/Legrandin/pycryptodome/archive/v{version}.tar.gz'
depends = ['setuptools', 'cffi']
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super(PycryptodomeRecipe, self).get_recipe_env(arch, with_flags_in_cc)
# sets linker to use the correct gcc (cross compiler)
env['LDSHARED'] = env['CC'] + ' -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions'
return env
recipe = PycryptodomeRecipe()
| StarcoderdataPython |
1725479 | <reponame>supdrewin/linux-enable-ir-emitter
import os
import yaml
import sys
import logging
from typing import List
from globals import SAVE_DRIVER_FILE_PATH, ExitCode
from driver.Driver import Driver
class DriverSerializer:
@staticmethod
def _deserialize_saved_drivers() -> List[object]:
"""Load all drivers saved in globals.SAVE_DRIVER_FILE_PATH
No error catching or type checking
Returns:
List of saved driver
"""
with open(SAVE_DRIVER_FILE_PATH, "r") as save_file:
return list(yaml.load_all(save_file, Loader=yaml.Loader))
@staticmethod
def load_saved_drivers() -> List[Driver] or None:
"""Load all drivers saved in globals.SAVE_DRIVER_FILE_PATH
Returns:
List of saved driver
None if no driver is saved
"""
try:
if os.path.exists(SAVE_DRIVER_FILE_PATH):
dummy_driver = Driver([0], 0, 0, '')
deserialized = DriverSerializer._deserialize_saved_drivers()
for driver in deserialized:
assert(isinstance(driver, Driver) and dir(dummy_driver) == dir(driver))
return deserialized
logging.error("No driver is currently saved.")
except:
logging.critical("The driver file is corrupted.")
logging.info("Execute 'linux-enable-ir-emitter fix driver' to reset the file.")
sys.exit(ExitCode.FAILURE)
@staticmethod
def save_drivers(driver_list: List[Driver]) -> None:
"""Save all drivers in globals.SAVE_DRIVER_FILE_PATH
Args:
driver_list: drivers to save
"""
with open(SAVE_DRIVER_FILE_PATH, "w") as save_driver_file:
save_driver_file.write("#Caution: any manual modification of this file may corrupt the operation of the program! You must therefore be very careful.\n")
save_driver_file.write("#Please consult https://github.com/EmixamPP/linux-enable-ir-emitter/wiki/Manual-configuration before.\n")
save_driver_file.write("#If you currupt the driver file: execute 'linux-enable-ir-emitter fix driver' to reset the file.\n\n")
yaml.dump_all(driver_list, save_driver_file)
@staticmethod
def add_driver(driver: Driver) -> None:
"""Add a driver to file globals.SAVE_DRIVER_FILE_PATH
Args:
driver: driver to add
"""
saved_drivers_list = None
try:
saved_drivers_list = DriverSerializer._deserialize_saved_drivers()
# check if the device is already added, it will be removed in order to be update
for saved_driver in saved_drivers_list.copy():
if saved_driver.device == driver.device:
saved_drivers_list.remove(saved_driver)
saved_drivers_list.append(driver)
except: # if driver file corrupted or no driver saved: delete all saved drivers
saved_drivers_list = [driver]
DriverSerializer.save_drivers(saved_drivers_list)
| StarcoderdataPython |
3586889 | import argparse
import requests
TEMPLATE = (
lambda labels: f"""\
ATOMIC_NUMBER_LABELS = {str(labels)}
"""
)
def main(args: argparse.Namespace):
result = requests.get(args.input).json()
labels = {
element["number"]: element["symbol"] for element in result["elements"]
}
file_content = TEMPLATE(labels)
if args.stdout:
print(file_content)
else:
with open(args.output, "w") as f:
f.write(file_content)
print(f"Wrote {len(labels)} labels to {args.output}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create labels for atomic numbers."
)
parser.add_argument(
"-i",
"--input",
help="URL to the JSON file containing the atomic numbers.",
default="https://raw.githubusercontent.com/Bowserinator/Periodic-Table-JSON/master/PeriodicTableJSON.json",
)
parser.add_argument(
"-o",
"--output",
help="Output file name.",
default="ocpmodels/datasets/embeddings/atomic_number_labels.py",
)
parser.add_argument(
"-s",
"--stdout",
help="Output to stdout instead of a file.",
action="store_true",
)
args = parser.parse_args()
main(args)
| StarcoderdataPython |
9750623 | def author_picture():
author_picture_list = [
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"dd4f56aba5524d28beac93dbb2770783.jpg",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"3a09d554769e45fe9c1f68e58d44350a.jpg",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"7f13e9f1d0114525b7f9223ca8a81555.jpg",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"ab6dc8ddca3d44d8804c6350fec60e4d.jpg",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"861ba67ea8114864b336825b58a1801e.jpg"
]
return author_picture_list
def work_picture():
work_picture_list = [
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"ff2caee02c5347e58e4b649cd799fbe6.png",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"dc6459892d7b4c8a8640bc87ad44ddc9.png",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"0d29185e565048618efc5ac3eea9c818.png",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"fcab5c67cd7643748ca1196fad9fa405.jpg",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"0567a1d739da4352bb25389dee4588da.png",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"b667c94d61054affa71d8a6c19f28db7.jpg",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"593c6672eb564b6fbb54e6960d98ce2d.jpg",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"57fe966743674ea3883f179fb303ae4b.png",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"b60268cf66794fc08f6d1376ec3bcfbb.jpg",
"https://edu-test-1255999742.cos.ap-chengdu.myqcloud.com/portrait/20190612/"
"4d3850efa5d6443ea65d296520af25d7.jpg"
]
return work_picture_list
def turtle_code():
code = "import turtle\n\n" \
"t = turtle.Turtle()\n" \
"s = turtle.Screen()\n" \
"s.bgpic('userupload/sucai/3702/20200118/bgimg.jpg')\n" \
"t.begin_fill()\n" \
"turtle.colormode(255)\n" \
"t.forward(150)\n" \
"t.right(90)\n" \
"t.forward(170)\n" \
"t.right(90)\n" \
"t.forward(150)\n" \
"t.right(90)\n" \
"t.forward(170)\n" \
"t.fillcolor(250, 255, 230)\n" \
"t.end_fill()\n" \
"t.right(30)\n" \
"t.begin_fill()\n" \
"t.fillcolor(255, 120, 60)\n" \
"t.forward(150)\n" \
"t.right(120)\n" \
"t.forward(150)\n" \
"t.end_fill()\n\n" \
"print('abc')\n"
return code
def wrong_code():
code = "import turtle\n\n" \
"t = turtle.Turtle()\n" \
"t.forward(150)\n" \
"print(abc)\n"
return code
def pygame_code():
code = "import pygame\n\n" \
"from pygame.locals import *\n" \
"background_image = 'userupload/sucai/9816/2019-11-8/pygameBG.jpg'\n" \
"mouse_image = 'userupload/sucai/9816/pygamemouse.png'\n" \
"pygame.init()\n" \
"screen = pygame.display.set_mode((640, 480), 0, 32)\n" \
"pygame.display.set_caption('hello world')\n" \
"background = pygame.image.load(background_image)\n" \
"mouse_cursor = pygame.image.load(mouse_image)\n" \
"while True:\n" \
" screen.blit(background, (0, 0))\n" \
" x, y = pygame.mouse.get_pos()\n" \
" x -= mouse_cursor.get_width()/2\n" \
" y -= mouse_cursor.get_height()/2\n" \
" screen.blit(mouse_cursor, (x, y))\n" \
" pygame.display.update()"
return code
def multiple_files_code(file_name, content):
main_code = f"from {file_name} import hello\n\na = hello()\nprint(a)\n\n"
file_code = f"def hello():\n s = '{content}'\n\n return s"
return main_code, file_code
def jieba_files_code(file_name):
main_code = 'import jieba\n' \
'import jieba.posseg as pseg\n\n' \
f'jieba.load_userdict("{file_name}.txt")\n' \
'jieba.add_word("石墨烯")\n' \
'jieba.add_word("凱特琳")\n' \
'jieba.del_word("自定义词")\n' \
'test_sent = (\n' \
' "李小福是创新办主任也是云计算方面的专家; 什么是八一双鹿\n"\n' \
' "例如我输入一个带“韩玉赏鉴”的标题,在自定义词库中也增加了此词为N类\n"\n' \
' "「台中」正確應該不會被切開。mac上可分出「石墨烯」;此時又可以分出來凱特琳了。"\n' \
')\n' \
'words = jieba.cut(test_sent)\n' \
'print("/".join(words))\n' \
'print("=" * 40)\n' \
'result = pseg.cut(test_sent)\n' \
'for w in result:\n' \
' print(w.word, "/", w.flag, ", ", end=" ")\n' \
'print("\n" + "=" * 40)\n' \
'terms = jieba.cut("easy_install is great")\n' \
'print("/".join(terms))\n' \
'terms = jieba.cut("python 的正则表达式是好用的")\n' \
'print("/".join(terms))\n' \
'print("=" * 40)\n' \
'testlist = [\n' \
' ("今天天气不错", ("今天", "天气")),\n' \
' ("如果放到post中将出错。", ("中", "将")),\n' \
' ("我们中出了一个叛徒", ("中", "出")),\n' \
']\n' \
'for sent, seg in testlist:\n' \
' print("/".join(jieba.cut(sent, HMM=False)))\n' \
' word = "".join(seg)\n' \
' print(f"{word} Before: {jieba.get_FREQ(word)}, After: {jieba.suggest_freq(seg, True)}")\n' \
' print("/".join(jieba.cut(sent, HMM=False)))\n' \
' print("-" * 40)\n'
file_code = '创新办 3 i\n' \
'云计算 5\n' \
'凱特琳 nz\n' \
'台中'
return main_code, file_code
def matplotlib_code():
code = 'import matplotlib.pyplot as plt\n' \
'import numpy\n\n' \
't = numpy.arange(0., 5., 0.2)\n' \
'plt.plot(t, t, "r--", t, t ** 2, "bs", t, t ** 3, "g^")\n' \
'plt.show()\n' \
'plt.ylabel("no data")\n' \
'plt.show()\n'
return code
def three_dimensional_code():
code = 'import cadquery as cq\n\n' \
'model = cq.Workplane("XY")\n' \
'model = model.sphere(20)\n' \
'show_model(model, cq)\n' \
return code
"""
import io
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeCone
from OCC.Core.TopLoc import TopLoc_Location
from OCC.Core.TopoDS import TopoDS_Shape
from OCC.Core.gp import gp_Pnt, gp_Trsf, gp_Vec, gp_Ax1, gp_Dir
from OCC.Display.OCCViewer import rgb_color
import cadquery as cq
length = 80.0
width = 60.0
height = 100.0
thickness = 10.0
center_hole_dia = 22.0
cbore_hole_diameter = 2.4
cbore_inset = 12.0
cbore_diameter = 4.4
cbore_depth = 2.1
result = cq.Workplane("XY").box(length, height, thickness).faces(">Z").workplane().hole(center_hole_dia).faces(">Z").workplane().rect(length - cbore_inset, height - cbore_inset, forConstruction=True).vertices().cboreHole(cbore_hole_diameter, cbore_diameter, cbore_depth).edges("|Z").fillet(2.0)
if __name__ == "__main__":
s = io.StringIO()
cq.exporters.exportShape(result, cq.exporters.ExportTypes.STL, s, 0.1)
print(s.getvalue())
"""
def robot_code():
code = 'import robot\n\n' \
'r=robot.robot()\n' \
'r.up(1)\n' \
'r.nod(1)\n'
return code
| StarcoderdataPython |
356375 | <gh_stars>0
###
# Copyright 2019 <NAME>, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
""" List Command for RDMC """
from argparse import ArgumentParser
import redfish.ris
from rdmc_base_classes import RdmcCommandBase, add_login_arguments_group
from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS, InvalidCommandLineError, \
Encryption
class ListCommand(RdmcCommandBase):
""" Constructor """
def __init__(self, rdmcObj):
RdmcCommandBase.__init__(self,\
name='list',\
usage='list [OPTIONS]\n\n\tDisplays the current values of the ' \
'properties within\n\ta selected type including'\
' reserved properties\n\texample: list\n\n\tNOTE: If ' \
'you wish to not list all the reserved properties\n\t ' \
' run the get command instead',\
summary='Displays the current value(s) of a' \
' property(ies) within a selected type including'\
' reserved properties.',\
aliases=['ls'],\
argparser=ArgumentParser())
self.definearguments(self.parser)
self._rdmc = rdmcObj
self.lobobj = rdmcObj.commands_dict["LoginCommand"](rdmcObj)
self.selobj = rdmcObj.commands_dict["SelectCommand"](rdmcObj)
self.getobj = rdmcObj.commands_dict["GetCommand"](rdmcObj)
def run(self, line):
""" Wrapper function for main list function
:param line: command line input
:type line: string.
"""
try:
(options, args) = self._parse_arglist(line)
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.listvalidation(options)
fvals = (None, None)
if options.filter:
try:
if (str(options.filter)[0] == str(options.filter)[-1])\
and str(options.filter).startswith(("'", '"')):
options.filter = options.filter[1:-1]
(sel, val) = options.filter.split('=')
fvals = (sel.strip(), val.strip())
except:
raise InvalidCommandLineError("Invalid filter" \
" parameter format [filter_attribute]=[filter_value]")
self.getobj.getworkerfunction(args, options, filtervals=fvals, uselist=False)
return ReturnCodes.SUCCESS
def listvalidation(self, options):
""" List data validation function
:param options: command line options
:type options: list.
"""
inputline = list()
if self._rdmc.app.config._ac__format.lower() == 'json':
options.json = True
try:
_ = self._rdmc.app.current_client
except:
if options.user or options.password or options.url:
if options.url:
inputline.extend([options.url])
if options.user:
if options.encode:
options.user = Encryption.decode_credentials(options.user)
inputline.extend(["-u", options.user])
if options.password:
if options.encode:
options.password = Encryption.decode_credentials(options.password)
inputline.extend(["-p", options.password])
if options.https_cert:
inputline.extend(["--https", options.https_cert])
else:
if self._rdmc.app.config.get_url():
inputline.extend([self._rdmc.app.config.get_url()])
if self._rdmc.app.config.get_username():
inputline.extend(["-u", self._rdmc.app.config.get_username()])
if self._rdmc.app.config.get_password():
inputline.extend(["-p", self._rdmc.app.config.get_password()])
if self._rdmc.app.config.get_ssl_cert():
inputline.extend(["--https", self._rdmc.app.config.get_ssl_cert()])
if inputline and options.selector:
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
inputline.extend(["--selector", options.selector])
self.lobobj.loginfunction(inputline)
elif options.selector:
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
if options.ref:
inputline.extend(["--refresh"])
inputline.extend([options.selector])
self.selobj.selectfunction(inputline)
else:
try:
inputline = list()
selector = self._rdmc.app.selector
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
if options.ref:
inputline.extend(["--refresh"])
inputline.extend([selector])
self.selobj.selectfunction(inputline)
except redfish.ris.NothingSelectedError:
raise redfish.ris.NothingSelectedError
def definearguments(self, customparser):
""" Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
add_login_arguments_group(customparser, full=True)
customparser.add_argument(
'--selector',
dest='selector',
help="Optionally include this flag to select a type to run"\
" the current command on. Use this flag when you wish to"\
" select a type without entering another command, or if you"\
" wish to work with a type that is different from the one"\
" you currently have selected.",
default=None,
)
customparser.add_argument(
'--filter',
dest='filter',
help="Optionally set a filter value for a filter attribute."\
" This uses the provided filter for the currently selected"\
" type. Note: Use this flag to narrow down your results. For"\
" example, selecting a common type might return multiple"\
" objects that are all of that type. If you want to modify"\
" the properties of only one of those objects, use the filter"\
" flag to narrow down results based on properties."\
"\t\t\t\t\t Usage: --filter [ATTRIBUTE]=[VALUE]",
default=None,
)
customparser.add_argument(
'-j',
'--json',
dest='json',
action="store_true",
help="Optionally include this flag if you wish to change the"\
" displayed output to JSON format. Preserving the JSON data"\
" structure makes the information easier to parse.",
default=False
)
customparser.add_argument(
'--logout',
dest='logout',
action="store_true",
help="Optionally include the logout flag to log out of the"\
" server after this command is completed. Using this flag when"\
" not logged in will have no effect",
default=None,
)
customparser.add_argument(
'--refresh',
dest='ref',
action="store_true",
help="Optionally reload the data of selected type and clear "\
"patches from current selection.",
default=False,
)
| StarcoderdataPython |
5070336 | <gh_stars>0
import numpy as np
import torch
class DistancesNumpy:
"""A collection of nearly all known distance functions implemented with numpy operators"""
@staticmethod
def braycurtis(a, b):
return np.sum(np.fabs(a - b)) / np.sum(np.fabs(a + b))
@staticmethod
def canberra(a, b):
return np.sum(np.fabs(a - b) / (np.fabs(a) + np.fabs(b)))
@staticmethod
def chebyshev(a, b):
return np.amax(a - b)
@staticmethod
def cityblock(a, b):
return self.manhattan(a, b)
@staticmethod
def correlation(a, b):
a = a - np.mean(a)
b = b - np.mean(b)
return 1.0 - np.mean(a * b) / np.sqrt(np.mean(np.square(a)) * np.mean(np.square(b)))
@staticmethod
def cosine(a, b):
return 1 - np.dot(a, b) / (np.sqrt(np.dot(a, a)) * np.sqrt(np.dot(b, b)))
@staticmethod
def dice(a, b):
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))
@staticmethod
def euclidean(a, b):
return np.sqrt(np.sum(np.dot((a - b), (a - b))))
@staticmethod
def hamming(a, b, w = None):
if w is None:
w = np.ones(a.shape[0])
return np.average(a != b, weights = w)
@staticmethod
def hellinger_distance(p, q):
return np.linalg.norm((np.sqrt(p) - np.sqrt(q)), ord=2, axis=1) / np.sqrt(2)
@staticmethod
def jaccard(u, v):
return np.double(np.bitwise_and((u != v), np.bitwise_or(u != 0, v != 0)).sum()) / np.double(np.bitwise_or(u != 0, v != 0).sum())
@staticmethod
def kulsinski(a, b):
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return (ntf + nft - ntt + len(a)) / (ntf + nft + len(a))
@staticmethod
def mahalanobis(a, b, vi):
return np.sqrt(np.dot(np.dot((a - b), vi),(a - b).T))
@staticmethod
def manhattan(a, b):
return np.sum(np.fabs(a - b))
@staticmethod
def matching(a, b):
return self.hamming(a, b)
@staticmethod
def minkowski(a, b, p):
return np.power(np.sum(np.power(np.fabs(a - b), p)), 1 / p)
@staticmethod
def rogerstanimoto(a, b):
nff = ((1 - a) * (1 - b)).sum()
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
@staticmethod
def russellrao(a, b):
return float(len(a) - (a * b).sum()) / len(a)
@staticmethod
def seuclidean(a, b, V):
return np.sqrt(np.sum((a - b) ** 2 / V))
@staticmethod
def sokalmichener(a, b):
nff = ((1 - a) * (1 - b)).sum()
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
@staticmethod
def sokalsneath(a, b):
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return float(2.0 * (ntf + nft)) / np.array(ntt + 2.0 * (ntf + nft))
@staticmethod
def sqeuclidean(a, b):
return np.sum(np.dot((a - b), (a - b)))
@staticmethod
def wminkowski(a, b, p, w):
return np.power(np.sum(np.power(np.fabs(w * (a - b)), p)), 1 / p)
@staticmethod
def yule(a, b):
nff = ((1 - a) * (1 - b)).sum()
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return float(2.0 * ntf * nft / np.array(ntt * nff + ntf * nft))
class DistancesTorch:
"""A collection of nearly all known distance functions implemented with torch operators"""
@staticmethod
def braycurtis(a, b):
return torch.sum(torch.abs(a - b)) / torch.sum(torch.abs(a + b))
@staticmethod
def canberra(a, b):
return torch.sum(torch.abs(a - b) / (torch.abs(a) + torch.abs(b)))
@staticmethod
def chebyshev(a, b):
return torch.max(a - b)
@staticmethod
def cityblock(a, b):
return self.manhattan(a, b)
@staticmethod
def correlation(a, b):
a = a - torch.mean(a)
b = b - torch.mean(b)
return 1.0 - torch.mean(a * b) / torch.sqrt(torch.mean(torch.square(a)) * torch.mean(torch.square(b)))
@staticmethod
def cosine(a, b):
return 1 - torch.dot(a, b) / (torch.sqrt(torch.dot(a, a)) * torch.sqrt(torch.dot(b, b)))
@staticmethod
def dice(a, b):
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return ((ntf + nft) / torch.tensor(2.0 * ntt + ntf + nft, dtype=torch.float32)).type(dtype=torch.float32)
@staticmethod
def euclidean(a, b):
return torch.sqrt(torch.sum(torch.dot((a - b), (a - b))))
@staticmethod
def hamming(a, b):
return torch.cdist(a, b, p=0)
@staticmethod
def hellinger(p, q):
return torch.norm((torch.sqrt(p) - torch.sqrt(q)), p=2, dim=1) / np.sqrt(2)
@staticmethod
def jaccard(u, v):
return (torch.bitwise_and((u != v), torch.bitwise_or(u != 0, v != 0)).sum()).type(dtype=torch.float64) \
/ (torch.bitwise_or(u != 0, v != 0).sum()).type(dtype=torch.float64)
@staticmethod
def kulsinski(a, b):
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return (ntf + nft - ntt + a.shape[0]) / (ntf + nft + a.shape[0])
@staticmethod
def mahalanobis(a, b, vi):
return torch.sqrt(torch.dot(torch.dot((a - b), vi),(a - b).T))
@staticmethod
def manhattan(a, b):
return torch.sum(torch.abs(a - b))
@staticmethod
def matching(a, b):
return self.hamming(a, b)
@staticmethod
def minkowski(a, b, p):
return torch.power(torch.sum(torch.pow(torch.abs(a - b), p)), 1 / p)
@staticmethod
def rogerstanimoto(a, b):
nff = ((1 - a) * (1 - b)).sum()
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return (2.0 * (ntf + nft)).type(dtype=torch.float32) \
/ (ntt + nff + (2.0 * (ntf + nft))).type(dtype=torch.float32)
@staticmethod
def russellrao(a, b):
return (a.shape[0] - (a * b).sum()).type(dtype=torch.float32) / a.shape[0]
@staticmethod
def seuclidean(a, b, V):
return torch.sqrt(torch.sum((a - b) ** 2 / V))
@staticmethod
def sokalmichener(a, b):
nff = ((1 - a) * (1 - b)).sum()
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return (2.0 * (ntf + nft)).type(dtype=torch.float32) \
/ (ntt + nff + 2.0 * (ntf + nft)).type(dtype=torch.float32)
@staticmethod
def sokalsneath(a, b):
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return (2.0 * (ntf + nft)).type(dtype=torch.float32) \
/ torch.tensor(ntt + 2.0 * (ntf + nft), dtype=torch.float32)
@staticmethod
def sqeuclidean(a, b):
return torch.sum(torch.dot((a - b), (a - b)))
@staticmethod
def wminkowski(a, b, p, w):
return torch.pow(torch.sum(torch.pow(torch.abs(w * (a - b)), p)), 1 / p)
@staticmethod
def yule(a, b):
nff = ((1 - a) * (1 - b)).sum()
nft = ((1 - a) * b).sum()
ntf = (a * (1 - b)).sum()
ntt = (a * b).sum()
return (2.0 * ntf * nft / torch.tensor(ntt * nff + ntf * nft)).type(dtype=torch.float32)
| StarcoderdataPython |
4999941 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-07-01 18:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('resource_type', models.CharField(choices=[(b'image', b'Image'), (b'audio', b'Audio')], max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
1995892 | <gh_stars>0
import torch
from torch import nn
from torch.nn import functional as F
from .i_attention_layer import SqueezeExcitation_c64
from .i_attention_layer import SqueezeExcitation_c100
from .i_attention_layer import SpatialAttention
from .i_attention_layer import ConvBNReLU
from .i_attention_layer import SE
from .i_attention_layer import PowerIndex
# modified by zy 20210313
class CARAFE_3_sa_se_3_norm(nn.Module):
def __init__(self, channels, compressed_channels=64, scale_factor=2, up_kernel=5, encoder_kernel=3):
""" The unofficial implementation of the CARAFE module.
The details are in "https://arxiv.org/abs/1905.02188".
Args:
channels c: The channel number of the input and the output.
compressed_channels c_mid: The channel number after compression.
scale_factor scale: The expected upsample scale.
up_kernel k_up: The size of the reassembly kernel.
encoder_kernel k_enc: The kernel size of the encoder.
Returns:
X: The upsampled feature map.
"""
super(CARAFE_3_sa_se_3_norm, self).__init__()
self.scale = scale_factor
self.comp = ConvBNReLU(channels, compressed_channels, kernel_size=1, stride=1,
padding=0, dilation=1)
self.enc = ConvBNReLU(compressed_channels, (scale_factor * up_kernel) ** 2, kernel_size=encoder_kernel,
stride=1, padding=encoder_kernel // 2, dilation=1,
use_relu=False)
self.enc2 = ConvBNReLU((scale_factor * up_kernel) ** 2, (scale_factor * up_kernel) ** 2, kernel_size=encoder_kernel,
stride=1, padding=encoder_kernel // 2, dilation=1,
use_relu=False)
self.pix_shf = nn.PixelShuffle(scale_factor)
self.upsmp = nn.Upsample(scale_factor=scale_factor, mode='nearest')
self.unfold = nn.Unfold(kernel_size=up_kernel, dilation=scale_factor,
padding=up_kernel // 2 * scale_factor)
# modified by zy 20210313
# c = 100
self.se = SE((scale_factor * up_kernel) ** 2, 16)
self.sa = SpatialAttention()
def forward(self, X):
b, c, h, w = X.size()
h_, w_ = h * self.scale, w * self.scale
W = self.comp(X) # b * m * h * w
W = self.enc(W) # modify by zy 20210111 增加一个3*3的卷积
W_ = W
W *= self.sa(W)
W += W_
W_ = W
W *= self.se(W)
W += W_
W = self.enc2(W) # b * 100 * h * w
W = self.pix_shf(W) # b * 25 * h_ * w_
# W = F.softmax(W, dim=1) # b * 25 * h_ * w_
W = F.normalize(W, p=1, dim=1)
X = self.upsmp(X) # b * c * h_ * w_
X = self.unfold(X) # b * 25c * h_ * w_
X = X.view(b, c, -1, h_, w_) # b * 25 * c * h_ * w_
X = torch.einsum('bkhw,bckhw->bchw', [W, X]) # b * c * h_ * w_
return X
if __name__ == '__main__':
x = torch.Tensor(1, 16, 24, 24)
carafe = CARAFE_3_sa_se_3_norm(16)
oup = carafe(x)
print(oup.size()) | StarcoderdataPython |
9721614 | # -*- coding: utf-8 -*-
"""column type"""
__all__ = ['ColumnType', 'DateColumnType', 'TimeColumnType', 'DateTimeColumnType', 'basic_column_type']
import abc
import datetime as dt
from .default import ValueFetcher
from pyqttable.editor import *
basic_column_type = [int, float, str, bool]
class ColumnType(metaclass=abc.ABCMeta):
"""
Column type
Methods to convert data between original format and string for display
Also bind EditorFactory to create data editor in table cell
"""
# Editor factory to create cell editor in table
EditorFactory = LineEditorFactory()
@classmethod
def make(cls, fetcher: ValueFetcher):
"""Make ColumnType from ValueFetcher"""
klass = fetcher.get('type')
if isinstance(klass, cls):
return klass
elif klass in basic_column_type:
return BasicColumnType.from_type(klass)
elif klass in [dt.datetime, dt.date, dt.time]:
return DateTimeColumnType.from_type(klass)
else:
raise TypeError(f'invalid type \'{klass}\'')
def to_string(self, value):
"""try/except wrapper to convert data from original format to string"""
try:
return self.to_str(value)
except Exception as e:
_ = e
raise ValueError(
f'[{self.__class__.__name__}] '
f'cannot convert \'{value}\' to string'
)
def to_value(self, string):
"""try/except wrapper to convert data from string to original format"""
try:
return self.to_val(string)
except Exception as e:
_ = e
raise ValueError(
f'[{self.__class__.__name__}] '
f'cannot convert \'{string}\' to value'
)
@abc.abstractmethod
def to_str(self, value):
"""Convert data from original format to string"""
...
@abc.abstractmethod
def to_val(self, string):
"""Convert data from string to original format"""
...
class BasicColumnType(ColumnType):
"""Column type for basic types - int/float/str/bool"""
def __init__(self, cls):
self.cls = cls
@classmethod
def from_type(cls, klass):
if klass in [int, float, str]:
return cls(klass)
elif klass in [bool]:
return BoolColumnType(klass)
else:
raise TypeError(f'invalid type \'{klass}\'')
def to_str(self, value):
return str(value)
def to_val(self, string):
return self.cls(string)
class BoolColumnType(BasicColumnType):
"""Special column type for boolean"""
# Boolean editor factory
EditorFactory = BoolEditorFactory()
def to_str(self, value):
return 'True' if value else 'False'
def to_val(self, string):
return string == 'True'
class DateTimeColumnType(ColumnType):
"""Column type for datetime related variables"""
# Datetime format to display in table cell
DtFormat = '%Y-%m-%d %H:%M:%S'
# Datetime format to display in cell editor
EditorDtFormat = 'yyyy-MM-dd hh:mm:ss'
# Datetime editor factory
EditorFactory = DateTimeEditorFactory(DtFormat, EditorDtFormat)
def __init__(self, cls):
self.cls = cls
@classmethod
def from_type(cls, klass):
if klass == dt.datetime:
return DateTimeColumnType(klass)
elif klass == dt.date:
return DateColumnType(klass)
elif klass == dt.time:
return TimeColumnType(klass)
else:
raise TypeError(f'invalid type \'{klass}\'')
def to_str(self, value):
assert isinstance(value, self.cls), \
f'invalid {self.cls} given: \'{value}\''
return value.strftime(self.DtFormat)
def to_val(self, string):
return dt.datetime.strptime(self.DtFormat, string)
class DateColumnType(DateTimeColumnType):
"""Column type for datetime.date"""
DtFormat = '%Y-%m-%d'
EditorDtFormat = 'yyyy-MM-dd'
EditorFactory = DateEditorFactory(DtFormat, EditorDtFormat)
def to_val(self, string):
return super().to_val(string).date()
class TimeColumnType(DateTimeColumnType):
"""Column type for datetime.time"""
DtFormat = '%H:%M:%S'
EditorDtFormat = 'hh:mm:ss'
EditorFactory = TimeEditorFactory(DtFormat, EditorDtFormat)
def to_val(self, string):
return super().to_val(string).time()
if __name__ == '__main__':
pass
| StarcoderdataPython |
109826 | <gh_stars>0
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script checks the output of 'ovn-sbctl list connections' for error
conditions.
"""
import sys
import os
import json
from collections import namedtuple
from subprocess import check_output, CalledProcessError
NAGIOS_STATUS_OK = 0
NAGIOS_STATUS_WARNING = 1
NAGIOS_STATUS_CRITICAL = 2
NAGIOS_STATUS_UNKNOWN = 3
NAGIOS_STATUS = {
NAGIOS_STATUS_OK: "OK",
NAGIOS_STATUS_WARNING: "WARNING",
NAGIOS_STATUS_CRITICAL: "CRITICAL",
NAGIOS_STATUS_UNKNOWN: "UNKNOWN",
}
OUTPUT_FILE = "/var/lib/nagios/ovn_db_connections.out"
OVNSB_DB_CTL = "/var/run/ovn/ovnsb_db.ctl"
TMP_OUTPUT_FILE = OUTPUT_FILE + ".tmp"
EXPECTED_CONNECTIONS = 2
Alert = namedtuple("Alert", "status msg")
def get_uuid(connection):
"""Retreive UUID from OVN DB connection JSON."""
return connection["_uuid"][1]
def check_role_target(connection):
"""Validate OVN connection target and role fields."""
uuid = get_uuid(connection)
if connection["target"] not in ["pssl:6642", "pssl:16642"]:
return Alert(
NAGIOS_STATUS_CRITICAL,
"{}: unexpected target: {}".format(uuid, connection["target"]),
)
if connection["role"] not in ["ovn-controller", ""]:
return Alert(
NAGIOS_STATUS_CRITICAL,
"{}: unexpected role: {}".format(uuid, connection["role"]),
)
if connection["target"] == "pssl:6642" and connection["role"] == "":
return Alert(
NAGIOS_STATUS_WARNING, "{}: RBAC is disabled".format(uuid)
)
if connection["target"] == "pssl:16642" and connection["role"] != "":
return Alert(
NAGIOS_STATUS_CRITICAL,
"{}: target pssl:16642 should not be used by role {}".format(
uuid, connection["role"]
),
)
return Alert(NAGIOS_STATUS_OK, "{}: target and role are OK".format(uuid))
def check_read_only(connection):
"""Ensure that OVN DB connection isn't in read_only state."""
uuid = get_uuid(connection)
if connection["read_only"] is not False:
return Alert(
NAGIOS_STATUS_CRITICAL, "{}: connection is read only".format(uuid)
)
return Alert(
NAGIOS_STATUS_OK, "{}: connection is not read_only".format(uuid)
)
def check_connections(connections):
"""Run checks against OVN DB connections."""
alerts = []
controllers_count = 0
if len(connections) != EXPECTED_CONNECTIONS:
alerts.append(
Alert(
NAGIOS_STATUS_CRITICAL,
"expected 2 connections, got {}".format(len(connections)),
)
)
for conn in connections:
if conn["role"] == "ovn-controller":
controllers_count += 1
alerts.append(check_role_target(conn))
alerts.append(check_read_only(conn))
# assert that exactly 1 controller connection exists
if controllers_count != 1:
alerts.append(
Alert(
NAGIOS_STATUS_CRITICAL,
"expected 1 ovn-controller connection, got {}".format(
controllers_count
),
)
)
return alerts
def parse_output(raw):
"""Parses output of ovnsb-ctl"""
status = json.loads(raw)
data = status["data"]
headings = status["headings"]
connections = []
for connection_data in data:
connections.append(dict(zip(headings, connection_data)))
return connections
def write_output_file(output):
"""Write results of checks to the defined location for nagios to check."""
try:
with open(TMP_OUTPUT_FILE, "w") as output_file:
output_file.write(output)
except IOError as err:
print(
"Cannot write output file {}, error {}".format(
TMP_OUTPUT_FILE, err
)
)
sys.exit(1)
os.rename(TMP_OUTPUT_FILE, OUTPUT_FILE)
def is_leader():
"""Check whether the current unit is OVN Southbound DB leader."""
cmd = [
"ovs-appctl",
"-t",
OVNSB_DB_CTL,
"cluster/status",
"OVN_Southbound",
]
output = check_output(cmd).decode("utf-8")
output_lines = output.split("\n")
role_line = [line for line in output_lines if line.startswith("Role:")]
if len(role_line) > 0:
_, role = role_line[0].split(":")
return role.strip() == "leader"
print("'Role:' line not found in the output of '{}'".format(" ".join(cmd)))
return False
def aggregate_alerts(alerts):
"""Reduce results down to an overall single status based on the highest
level."""
total_crit = 0
total_warn = 0
msg_crit = []
msg_warn = []
msg_ok = []
for alert in alerts:
if alert.status == NAGIOS_STATUS_CRITICAL:
total_crit += 1
msg_crit.append(alert.msg)
elif alert.status == NAGIOS_STATUS_WARNING:
total_warn += 1
msg_warn.append(alert.msg)
else:
msg_ok.append(alert.msg)
severity = "OK"
status_detail = ""
if total_crit > 0:
severity = "CRITICAL"
status_detail = "; ".join(
filter(
None,
[
status_detail,
"critical[{}]: {}".format(total_crit, msg_crit),
],
)
)
if total_warn > 0:
if severity != "CRITICAL":
severity = "WARNING"
status_detail = "; ".join(
filter(
None,
[
status_detail,
"warnings[{}]: {}".format(total_warn, msg_warn),
],
)
)
if total_crit == 0 and total_warn == 0:
status_detail = "OVN DB connections are normal"
return "{}: {}".format(severity, status_detail)
def run_checks():
"""Check health of OVN SB DB connections."""
output = "UNKNOWN"
try:
if is_leader():
cmd = ["ovn-sbctl", "--format=json", "list", "connection"]
cmd_output = check_output(cmd).decode("utf-8")
connections = parse_output(cmd_output)
alerts = check_connections(connections)
output = aggregate_alerts(alerts)
else:
output = "OK: no-op (unit is not the DB leader)"
except CalledProcessError as error:
output = "UKNOWN: {}".format(error.stdout.decode(errors="ignore"))
write_output_file(output)
if __name__ == "__main__":
run_checks()
| StarcoderdataPython |
11227747 | #!/usr/bin/python
# coding: utf-8
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.Qsci import QsciScintilla, QsciLexerXML
import packtools_wrapper
class SimpleXMLEditor(QsciScintilla):
ARROW_MARKER_NUM = 8
def __init__(self, parent=None):
super(SimpleXMLEditor, self).__init__(parent)
# Set the default font
font = QtGui.QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(10)
self.setFont(font)
self.setMarginsFont(font)
# Margin 0 is used for line numbers
fontmetrics = QtGui.QFontMetrics(font)
self.setMarginsFont(font)
self.setMarginWidth(0, fontmetrics.width("00000") + 6)
self.setMarginLineNumbers(0, True)
self.setMarginsBackgroundColor(QtGui.QColor("#cccccc"))
# Clickable margin 1 for showing markers
self.setMarginSensitivity(1, True)
self.connect(self,
QtCore.SIGNAL('marginClicked(int, int, Qt::KeyboardModifiers)'),
self.on_margin_clicked)
self.markerDefine(QsciScintilla.RightArrow,
self.ARROW_MARKER_NUM)
self.setMarkerBackgroundColor(QtGui.QColor("#ee1111"),
self.ARROW_MARKER_NUM)
# Brace matching: enable for a brace immediately before or after
# the current position
#
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
# Current line visible with special background color
self.setCaretLineVisible(True)
self.setCaretLineBackgroundColor(QtGui.QColor("#ffe4e4"))
# Set XML lexer
# Set style for Python comments (style number 1) to a fixed-width
# courier.
lexer = QsciLexerXML()
lexer.setDefaultFont(font)
self.setLexer(lexer)
self.SendScintilla(QsciScintilla.SCI_STYLESETFONT, 1, 'Courier')
# Don't want to see the horizontal scrollbar at all
# Use raw message to Scintilla here (all messages are documented
# here: http://www.scintilla.org/ScintillaDoc.html)
self.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 0)
# not too small
self.setMinimumSize(600, 450)
def on_margin_clicked(self, nmargin, nline, modifiers):
# Toggle marker for the line the margin was clicked on
if self.markersAtLine(nline) != 0:
self.markerDelete(nline, self.ARROW_MARKER_NUM)
else:
self.markerAdd(nline, self.ARROW_MARKER_NUM)
class MainWindow(QtGui.QMainWindow):
new_xml_input_signal = QtCore.pyqtSignal(dict, name="new_xml_input_signal")
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
@QtCore.pyqtSlot(dict)
def analyze_xml_callback(self, params):
if params.has_key('xml_source'):
results, exc = packtools_wrapper.analyze_xml(params['xml_source'])
if results:
self.populateEditor(results['annotations'])
if exc:
self.populateEditor(str(exc))
def initUI(self):
self.editor = SimpleXMLEditor(parent=self)
self.setCentralWidget(self.editor)
# Action: Exit Application
exitAction = QtGui.QAction(QtGui.QIcon('resources/exit.png'), 'Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
# Action: Open Local XML file
openFile = QtGui.QAction(QtGui.QIcon('resources/open.png'), 'Open local XML File', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open local XML File')
openFile.triggered.connect(self.showOpenXMLDialog)
# Action: Open URL (remote XML)
openURL = QtGui.QAction(QtGui.QIcon('resources/web.png'), 'Open URL XML File', self)
openURL.setShortcut('Ctrl+U')
openURL.setStatusTip('Open URL XML File')
openURL.triggered.connect(self.showOpenURLDialog)
self.statusbar = self.statusBar()
self.statusbar.showMessage('Packtools version: %s' % packtools_wrapper.PACKTOOLS_VERSION)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openFile)
fileMenu.addAction(openURL)
fileMenu.addAction(exitAction)
toolbar = self.addToolBar('Exit')
toolbar.addAction(openFile)
toolbar.addAction(openURL)
toolbar.addAction(exitAction)
self.new_xml_input_signal.connect(self.analyze_xml_callback)
self.resize(800, 600)
self.center()
self.setWindowTitle('Packtools GUI v0.1')
self.show()
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(
self, 'Message', "Are you sure to quit?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def showOpenXMLDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open XML file', '.', "XML Files (*.xml)")
with open(fname, 'r') as f:
self.new_xml_input_signal.emit({'xml_source': f})
def showOpenURLDialog(self):
url, ok = QtGui.QInputDialog.getText(self, 'Input URL Dialog', 'Enter valid URL:')
if ok:
self.new_xml_input_signal.emit({'xml_source': str(url)})
def populateEditor(self, text_content, decode_as='utf-8'):
self.editor.setText(text_content.decode(decode_as))
def main():
app = QtGui.QApplication(sys.argv)
w = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| StarcoderdataPython |
3314543 | from django.test import TestCase
from .signals import send_newsletter
from .models import Newsletter, Topic
from .script import get_mailing_list
from user.models import Profile
from django.contrib.auth.models import User
import datetime
# Create your tests here.
class NewsletterTest(TestCase):
def setUp(self):
Topic.objects.create(name="Test category")
nl_target = Topic.objects.get(name="Test category")
Newsletter.objects.create(
global_title="Newletter Test",
subject_1="Fire and Ice",
target=nl_target,
body_1='Some say the world will end in Fire, Some say in Ice',
time_submitted=datetime.datetime.now()
)
self.nl_test = Newsletter.objects.get(global_title="Newletter Test")
def test_nl_save(self):
nl = Newsletter.objects.last()
self.assertEqual(nl.global_title, self.nl_test.global_title)
self.assertEqual(nl.subject_1, self.nl_test.subject_1)
self.assertEqual(nl.target, self.nl_test.target)
self.assertEqual(nl.body_1, self.nl_test.body_1)
def signal_received(self):
pass
class MailinglistTest(TestCase):
def setUp(self):
Topic.objects.create(name="Test topic for mail")
self.topic = Topic.objects.get(name="Test topic for mail")
self.credentials = {
'username': 'user_mailtest',
'email': '<EMAIL>',
'password': '<PASSWORD>'}
User.objects.create_user(**self.credentials)
user = User.objects.get(username='user_mailtest')
user_pk = user.pk
profile = Profile.objects.get(user_id=user_pk)
profile.subscriptions.add(self.topic)
profile.save()
def get_mailinglist_test(self):
mailinglist = get_mailing_list(self.topic.pk)
self.assertIn("<EMAIL>", mailinglist)
| StarcoderdataPython |
9708586 | import base64
from googleapiclient import discovery
class Transcriber(object):
# the transcript chunks
transcript_chunks = []
def __init__(self, api_key):
self.api_key = api_key
def get_speech_service(self):
"""
Get the Google Speech service.
"""
return discovery.build('speech', 'v1', developerKey=self.api_key)
def transcribe(self, filepath):
"""
Transcribe the given audio file.
Params:
filepath (string): The name of the audio file.
"""
with open(filepath, 'rb') as speech:
# Base64 encode the binary audio file for inclusion in the JSON
# request.
speech_content = base64.b64encode(speech.read())
service = self.get_speech_service()
service_request = service.speech().recognize(
body={
'config': {
'encoding': 'LINEAR16', # raw 16-bit signed LE samples
'sampleRateHertz': 16000, # 16 khz
'languageCode': 'en-US', # a BCP-47 language tag
'enableAutomaticPunctuation': 'true',
},
'audio': {
'content': speech_content.decode('UTF-8')
}
})
response = service_request.execute()
return response
def transcribe_many(self, filepaths):
"""
Transcribe the given list of audio files.
Params:
filepaths (list[string]): The list of audio files.
"""
items = len(filepaths)
# loop through the files and transcribe them
for i, f in enumerate(filepaths, start=1):
print "Transcribing [ %d / %d ] %s ..." % (i, items, f)
response = self.transcribe(f)
# read the response and extract the transcript
for alternatives in response['results']:
self.transcript_chunks.append(
alternatives['alternatives'][0]['transcript'])
return self.get_transcript_str()
def get_transcript_str(self, glue=""):
"""
Returns a string representation of the transcript chunks.
Params:
glue (string): The glue to join the chunks. Default value is the
newline character (\n)
"""
if not glue:
glue = "\n"
return glue.join(self.transcript_chunks)
| StarcoderdataPython |
1751957 | import paramiko
IP = '192.168.127.12'
USER = 'pruebaURJC'
PASSWORD = '<PASSWORD>'
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(IP, port=22, username=USER, password=PASSWORD)
sftp = ssh.open_sftp()
sftp.get('./Clases.py','Clases.py')
sftp.get('./__init__.py','__init__.py')
sftp.close()
ssh.close()
except KeyboardInterrupt:
print('Fallo') | StarcoderdataPython |
1918039 | <filename>test/vanilla/legacy/Expected/AcceptanceTests/BodyComplexPythonThreeOnly/bodycomplexpython3only/models/_auto_rest_complex_test_service_enums.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class CMYKColors(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CYAN = "cyan"
MAGENTA = "Magenta"
YELLOW = "YELLOW"
BLAC_K = "blacK"
class GoblinSharkColor(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Colors possible"""
PINK = "pink"
GRAY = "gray"
BROWN = "brown"
#: Uppercase RED.
UPPER_RED = "RED"
#: Lowercase RED.
LOWER_RED = "red"
class MyKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
KIND1 = "Kind1"
| StarcoderdataPython |
1859194 | import os
# Get box size from xyz file or use the default box size parameter
def get_box_info(input_file_name):
with open(input_file_name) as f:
load_lines = f.readlines()
try:
box_size_parameter = (((load_lines[1].split('['))[1].split(']'))[0]).split(',')
box_size_parameter = list(map(float, box_size_parameter))
# If box info is not correct, use default box parameters
except:
box_size_parameter = default_box_size
# If box info is missing, use default box parameters
if not len(box_size_parameter) == 9:
box_size_parameter = default_box_size
return box_size_parameter
# Extract molecule structure from xyz file
def get_molecule_structure(input_file_name):
molecule_structure = []
with open(input_file_name) as f:
load_lines = f.readlines()
for i in range(2,int(load_lines[0])+2):
line = load_lines[i][:-1].split(" ")
no_space_line = []
# Remove space and tab from list
for object in line:
if not len(object) == 0:
no_space_line.append(object)
print(no_space_line)
molecule_structure.append([no_space_line[0],float(no_space_line[1]),float(no_space_line[2]),float(no_space_line[3])])
# Re-rank all atoms base on the elements
molecule_structure.sort(key = lambda x: x[0])
return molecule_structure
# Calculate the molecule's center (average position of all atoms) and move it to the center of box
def recenter_molcecule(molecule_structure,box_size_parameter):
recenter_molecule_structure = []
molceule_x = 0
molceule_y = 0
molceule_z = 0
# Calculate molecule's center by average position of all atoms
for ele in molecule_structure:
molceule_x += ele[1]
molceule_y += ele[2]
molceule_z += ele[3]
molecule_number = len(molecule_structure)
molecule_center = [molceule_x/molecule_number,molceule_y/molecule_number,molceule_z/molecule_number]
# Calculate box's center
box_center = [box_size_parameter[0]/2,box_size_parameter[4]/2,box_size_parameter[8]/2]
# Calculate shift value and apply to all atoms
x_shift = molecule_center[0] - box_center[0]
y_shift = molecule_center[1] - box_center[1]
z_shift = molecule_center[2] - box_center[2]
for ele in molecule_structure:
recenter_molecule_structure.append([ele[0],ele[1]-x_shift,ele[2]-y_shift,ele[3]-z_shift])
return recenter_molecule_structure
# Find elements type and each type's number
def get_elements_and_number(molecule_structure):
ele_list = []
ele_non_repeat_list = []
ele_number = []
for atom in molecule_structure:
ele_list.append(atom[0])
# Remove repeated elements without change their rank
ele_non_repeat_list = list({}.fromkeys(ele_list).keys())
for ele in ele_non_repeat_list:
ele_number.append(ele_list.count(ele))
return [ele_non_repeat_list,ele_number]
# Write the info into POSCAR file
def write_POSCAR(input_file_name,box_size_parameter,recenter_molcecule_structure,elements_and_number):
with open(input_file_name[:-4] + ".POSCAR","a") as f:
f.write("Input file generated from " + input_file_name + "\n")
f.write("1.0\n")
f.write(" " + str(box_size_parameter[0]) + " " + str(box_size_parameter[1]) + " " + str(box_size_parameter[2]) + "\n")
f.write(" " + str(box_size_parameter[3]) + " " + str(box_size_parameter[4]) + " " + str(box_size_parameter[5]) + "\n")
f.write(" " + str(box_size_parameter[6]) + " " + str(box_size_parameter[7]) + " " + str(box_size_parameter[8]) + "\n")
for ele in elements_and_number[0]:
f.write(" " + ele)
f.write("\n")
for ele_num in elements_and_number[1]:
f.write(" " + str(ele_num))
f.write("\n")
f.write("Cartesian\n")
for atom in recenter_molcecule_structure:
f.write(" " + str(atom[1]) + " " + str(atom[2]) + " " + str(atom[3]) + "\n")
# Default parameters
default_box_size = [100.0,0.0,0.0,0.0,100.0,0.0,0.0,0.0,100.0]
# Get xyz files' name in the same path with this script
files = os.listdir(os.curdir)
xyz_files = []
for single_file in files:
if single_file[-3:] == "xyz":
xyz_files.append(single_file)
# Transfer all xyz file into POSCAR
for file in xyz_files:
# Define File name
input_file_name = file
# Get molecule structure
molecule_structure = get_molecule_structure(input_file_name)
# Get box info
box_size_parameter = get_box_info(input_file_name)
# Recenter the molecule center with box center
recenter_molcecule_structure = recenter_molcecule(molecule_structure,box_size_parameter)
# Get elements' info, which is essential for make POSCAR
elements_and_number = get_elements_and_number(recenter_molcecule_structure)
# Write all info into POSCAR file
write_POSCAR(input_file_name,box_size_parameter,recenter_molcecule_structure,elements_and_number)
| StarcoderdataPython |
3588262 | """
Napisz program wczytujący liczbę naturalną z klawiatury i odpowiadający na pytanie,
czy jej cyfry stanowią ciąg rosnący.
"""
def are_digits_incresing(strnumber, i):
while i != len(strnumber):
if strnumber[i] > strnumber[i - 1]:
i += 1
elif len(strnumber) < len(strnumber) - 2:
return False
elif strnumber[i] < strnumber[i - 1]:
return False
return True
number = int(input("Enter a number: "))
str_number = str(number)
print(are_digits_incresing(str_number, i=1))
| StarcoderdataPython |
4805029 | # Copyright: <NAME> <<EMAIL>>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from aqt.qt import *
from operator import itemgetter
from aqt.utils import showInfo, askUser, getText, maybeHideClose, openHelp
import aqt.modelchooser, aqt.clayout
from anki import stdmodels
from aqt.utils import saveGeom, restoreGeom
import collections
class Models(QDialog):
def __init__(self, mw, parent=None, fromMain=False):
self.mw = mw
self.parent = parent or mw
self.fromMain = fromMain
QDialog.__init__(self, self.parent, Qt.Window)
self.col = mw.col
self.mm = self.col.models
self.mw.checkpoint(_("Note Types"))
self.form = aqt.forms.models.Ui_Dialog()
self.form.setupUi(self)
self.form.buttonBox.helpRequested.connect(lambda: openHelp("notetypes"))
self.setupModels()
restoreGeom(self, "models")
self.exec_()
# Models
##########################################################################
def setupModels(self):
self.model = None
f = self.form; box = f.buttonBox
t = QDialogButtonBox.ActionRole
b = box.addButton(_("Add"), t)
b.clicked.connect(self.onAdd)
b = box.addButton(_("Rename"), t)
b.clicked.connect(self.onRename)
b = box.addButton(_("Delete"), t)
b.clicked.connect(self.onDelete)
if self.fromMain:
b = box.addButton(_("Fields..."), t)
b.clicked.connect(self.onFields)
b = box.addButton(_("Cards..."), t)
b.clicked.connect(self.onCards)
b = box.addButton(_("Options..."), t)
b.clicked.connect(self.onAdvanced)
f.modelsList.currentRowChanged.connect(self.modelChanged)
f.modelsList.itemDoubleClicked.connect(self.onRename)
self.updateModelsList()
f.modelsList.setCurrentRow(0)
maybeHideClose(box)
def onRename(self):
txt = getText(_("New name:"), default=self.model['name'])
if txt[1] and txt[0]:
self.model['name'] = txt[0]
self.mm.save(self.model)
self.updateModelsList()
def updateModelsList(self):
row = self.form.modelsList.currentRow()
if row == -1:
row = 0
self.models = self.col.models.all()
self.models.sort(key=itemgetter("name"))
self.form.modelsList.clear()
for m in self.models:
mUse = self.mm.useCount(m)
mUse = ngettext("%d note", "%d notes", mUse) % mUse
item = QListWidgetItem("%s [%s]" % (m['name'], mUse))
self.form.modelsList.addItem(item)
self.form.modelsList.setCurrentRow(row)
def modelChanged(self):
if self.model:
self.saveModel()
idx = self.form.modelsList.currentRow()
self.model = self.models[idx]
def onAdd(self):
m = AddModel(self.mw, self).get()
if m:
txt = getText(_("Name:"), default=m['name'])[0]
if txt:
m['name'] = txt
self.mm.ensureNameUnique(m)
self.mm.save(m)
self.updateModelsList()
def onDelete(self):
if len(self.models) < 2:
showInfo(_("Please add another note type first."),
parent=self)
return
if self.mm.useCount(self.model):
msg = _("Delete this note type and all its cards?")
else:
msg = _("Delete this unused note type?")
if not askUser(msg, parent=self):
return
self.mm.rem(self.model)
self.model = None
self.updateModelsList()
def onAdvanced(self):
d = QDialog(self)
frm = aqt.forms.modelopts.Ui_Dialog()
frm.setupUi(d)
frm.latexsvg.setChecked(self.model.get("latexsvg", False))
frm.latexHeader.setText(self.model['latexPre'])
frm.latexFooter.setText(self.model['latexPost'])
d.setWindowTitle(_("Options for %s") % self.model['name'])
frm.buttonBox.helpRequested.connect(lambda: openHelp("latex"))
restoreGeom(d, "modelopts")
d.exec_()
saveGeom(d, "modelopts")
self.model['latexsvg'] = frm.latexsvg.isChecked()
self.model['latexPre'] = str(frm.latexHeader.toPlainText())
self.model['latexPost'] = str(frm.latexFooter.toPlainText())
def saveModel(self):
self.mm.save(self.model)
def _tmpNote(self):
self.mm.setCurrent(self.model)
n = self.col.newNote(forDeck=False)
for name in list(n.keys()):
n[name] = "("+name+")"
try:
if "{{cloze:Text}}" in self.model['tmpls'][0]['qfmt']:
n['Text'] = _("This is a {{c1::sample}} cloze deletion.")
except:
# invalid cloze
pass
return n
def onFields(self):
from aqt.fields import FieldDialog
n = self._tmpNote()
FieldDialog(self.mw, n, parent=self)
def onCards(self):
from aqt.clayout import CardLayout
n = self._tmpNote()
CardLayout(self.mw, n, ord=0, parent=self, addMode=True)
# Cleanup
##########################################################################
# need to flush model on change or reject
def reject(self):
self.saveModel()
self.mw.reset()
saveGeom(self, "models")
QDialog.reject(self)
class AddModel(QDialog):
def __init__(self, mw, parent=None):
self.parent = parent or mw
self.mw = mw
self.col = mw.col
QDialog.__init__(self, self.parent, Qt.Window)
self.model = None
self.dialog = aqt.forms.addmodel.Ui_Dialog()
self.dialog.setupUi(self)
# standard models
self.models = []
for (name, func) in stdmodels.models:
if isinstance(name, collections.Callable):
name = name()
item = QListWidgetItem(_("Add: %s") % name)
self.dialog.models.addItem(item)
self.models.append((True, func))
# add copies
for m in sorted(self.col.models.all(), key=itemgetter("name")):
item = QListWidgetItem(_("Clone: %s") % m['name'])
self.dialog.models.addItem(item)
self.models.append((False, m))
self.dialog.models.setCurrentRow(0)
# the list widget will swallow the enter key
s = QShortcut(QKeySequence("Return"), self)
s.activated.connect(self.accept)
# help
self.dialog.buttonBox.helpRequested.connect(self.onHelp)
def get(self):
self.exec_()
return self.model
def reject(self):
QDialog.reject(self)
def accept(self):
(isStd, model) = self.models[self.dialog.models.currentRow()]
if isStd:
# create
self.model = model(self.col)
else:
# add copy to deck
self.model = self.mw.col.models.copy(model)
self.mw.col.models.setCurrent(self.model)
QDialog.accept(self)
def onHelp(self):
openHelp("notetypes")
| StarcoderdataPython |
4870432 | from PyQt5.QtCore import QObject, pyqtSignal
class CurrentThread(QObject):
_on_execute = pyqtSignal(object, tuple, dict)
def __init__(self):
super(QObject, self).__init__()
self._on_execute.connect(self._execute_in_thread)
def execute(self, f, args, kwargs):
self._on_execute.emit(f, args, kwargs)
def _execute_in_thread(self, f, args, kwargs):
f(*args, **kwargs)
main_thread = CurrentThread()
def run_in_main_thread(f):
def result(*args, **kwargs):
main_thread.execute(f, args, kwargs)
return result | StarcoderdataPython |
1890325 | import os
import pytest
from featuretools import list_primitives
from featuretools.primitives import (
Age,
Count,
Day,
GreaterThan,
Haversine,
Last,
Max,
Mean,
Min,
Mode,
Month,
NumCharacters,
NumUnique,
NumWords,
PercentTrue,
Skew,
Std,
Sum,
Weekday,
Year,
get_aggregation_primitives,
get_default_aggregation_primitives,
get_default_transform_primitives,
get_transform_primitives
)
from featuretools.primitives.base import PrimitiveBase
from featuretools.primitives.utils import (
_get_descriptions,
_get_unique_input_types,
list_primitive_files,
load_primitive_from_file
)
from featuretools.utils.gen_utils import Library
def test_list_primitives_order():
df = list_primitives()
all_primitives = get_transform_primitives()
all_primitives.update(get_aggregation_primitives())
for name, primitive in all_primitives.items():
assert name in df['name'].values
row = df.loc[df['name'] == name].iloc[0]
actual_desc = _get_descriptions([primitive])[0]
if actual_desc:
assert actual_desc == row['description']
assert row['dask_compatible'] == (Library.DASK in primitive.compatibility)
assert row['valid_inputs'] == ', '.join(_get_unique_input_types(primitive.input_types))
assert row['return_type'] == getattr(primitive.return_type, '__name__', None)
types = df['type'].values
assert 'aggregation' in types
assert 'transform' in types
def test_valid_input_types():
actual = _get_unique_input_types(Haversine.input_types)
assert actual == {'<ColumnSchema (Logical Type = LatLong)>'}
actual = _get_unique_input_types(GreaterThan.input_types)
assert actual == {'<ColumnSchema (Logical Type = Datetime)>',
"<ColumnSchema (Semantic Tags = ['numeric'])>",
'<ColumnSchema (Logical Type = Ordinal)>'}
actual = _get_unique_input_types(Sum.input_types)
assert actual == {"<ColumnSchema (Semantic Tags = ['numeric'])>"}
def test_descriptions():
primitives = {NumCharacters: 'Calculates the number of characters in a string.',
Day: 'Determines the day of the month from a datetime.',
Last: 'Determines the last value in a list.',
GreaterThan: 'Determines if values in one list are greater than another list.'}
assert _get_descriptions(list(primitives.keys())) == list(primitives.values())
def test_get_default_aggregation_primitives():
primitives = get_default_aggregation_primitives()
expected_primitives = [Sum, Std, Max, Skew, Min, Mean, Count, PercentTrue,
NumUnique, Mode]
assert set(primitives) == set(expected_primitives)
def test_get_default_transform_primitives():
primitives = get_default_transform_primitives()
expected_primitives = [Age, Day, Year, Month, Weekday, Haversine, NumWords,
NumCharacters]
assert set(primitives) == set(expected_primitives)
@pytest.fixture
def this_dir():
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def primitives_to_install_dir(this_dir):
return os.path.join(this_dir, "primitives_to_install")
@pytest.fixture
def bad_primitives_files_dir(this_dir):
return os.path.join(this_dir, "bad_primitive_files")
def test_list_primitive_files(primitives_to_install_dir):
files = list_primitive_files(primitives_to_install_dir)
custom_max_file = os.path.join(primitives_to_install_dir, "custom_max.py")
custom_mean_file = os.path.join(primitives_to_install_dir, "custom_mean.py")
custom_sum_file = os.path.join(primitives_to_install_dir, "custom_sum.py")
assert {custom_max_file, custom_mean_file, custom_sum_file}.issubset(set(files))
def test_load_primitive_from_file(primitives_to_install_dir):
primitve_file = os.path.join(primitives_to_install_dir, "custom_max.py")
primitive_name, primitive_obj = load_primitive_from_file(primitve_file)
assert issubclass(primitive_obj, PrimitiveBase)
def test_errors_more_than_one_primitive_in_file(bad_primitives_files_dir):
primitive_file = os.path.join(bad_primitives_files_dir, "multiple_primitives.py")
error_text = "More than one primitive defined in file {}".format(primitive_file)
with pytest.raises(RuntimeError) as excinfo:
load_primitive_from_file(primitive_file)
assert str(excinfo.value) == error_text
def test_errors_no_primitive_in_file(bad_primitives_files_dir):
primitive_file = os.path.join(bad_primitives_files_dir, "no_primitives.py")
error_text = "No primitive defined in file {}".format(primitive_file)
with pytest.raises(RuntimeError) as excinfo:
load_primitive_from_file(primitive_file)
assert str(excinfo.value) == error_text
| StarcoderdataPython |
8121143 | from __future__ import division
__version__ = '0.0.3'
__author__ = '<NAME>'
import md5
from math import sqrt, log, sin, cos, pi
import types
TAU = 2.0*pi
def gaussian(value1, value2):
"""
Converts two flat distributed numbers into a gaussian distribution
Input from 0-1, output mean 0 STDDEV 1.
"""
output1 = sqrt(-2*log(value1))*cos(TAU*value2)
output2 = sqrt(-2*log(value1))*sin(TAU*value2)
return output1, output2
class pghgen(object):
"""
Stores a seed and a separator, and provides a function
that creates pghash objects from tuples.
"""
def __init__(self, joiner=lambda t: str(t), seed=0):
self.seed = seed
if type(joiner) is types.LambdaType:
self.joiner = joiner
else: # If it's not a lambda, then build a lambda on the assumption that it's a tuple or a three-character string
f1 = lambda t: joiner[0] + joiner[1].join(map(str, t)) + joiner[2] if hasattr(t, '__iter__') else str(t)
m1 = lambda t: joiner[1].join(map(f1, t))
self.joiner = m1
def hash(self, tup):
"""
Creates pghash objects from tuples using the stored separator.
"""
return pghash(tup=(self.seed,tup), joiner=self.joiner)
class pghash(object):
"""
Hashes a tuple, and provides the answer in a variety of forms
(hex, float, int, gaussian).
"""
def __init__(self, tup, joiner=lambda t: str(t)):
self.hex = md5.new(joiner(tup)).hexdigest()
def pgvalue(self):
"""
Returns the full integer value of the hash from 0 to 2^^128-1.
"""
return int(self.hex, 16)
def pghalves(self):
"""
Returns the hash value as hex in two halves.
"""
return self.hex[:16], self.hex[16:]
def pgvalues(self):
"""
Returns the hash value as integers in two halves.
"""
return int(self.hex[:16], 16), int(self.hex[16:], 16)
def random(self):
"""
Returns the hash value as a float between 0 and 1.
"""
return self.pgvalue() / 2**128
def randint(self, lbound, ubound):
"""
Returns the hash value as an integer in the range supplied.
"""
return int(self.random() * (ubound-lbound+1) + lbound)
def gauss(self, mu, sigma):
"""
Returns the hash value as a number with a mean of mu and a standard deviation of sigma.
"""
intpair = self.pgvalues()
floatpair = (intpair[0]/2**64, intpair[1]/2**64)
return gaussian(floatpair[0], floatpair[1])[0]*sigma+mu
def normalvariate(self, mu, sigma):
"""
Returns the hash value as a number with a mean of mu and a standard deviation of sigma.
"""
return self.gauss(mu, sigma)
def choice(self, seq):
"""
Returns an element picked out of collection using the hash value as a random number.
"""
return seq[int(self.random() * len(seq))]
| StarcoderdataPython |
6492700 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from logging import getLogger
from concurrent.futures import ProcessPoolExecutor, wait
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from .basics.classifier_characteristics import ClassifierCharacteristics
logger = getLogger('disteval.recursive_selection')
def recursive_feature_selection_roc_auc(clf,
X,
y,
sample_weight=None,
n_features=10,
cv_steps=10,
n_jobs=1,
forward=True,
matching_features=True):
"""Method building a feature set in a recursive fashion. Depending
on the setting it is run as a forward selection/backward elimination
searching for a set of n features with the highest/lowest mismatch.
To get the set with the size n starting from n_total features the
following approaches are used:
Forward Selection:
To get the k+1 set every not yet selected feature is used to
generate (n_total - k sets). The set with the best score is the
k + 1 set. Those steps are repeated until n features are selected
Backward Elimination:
To get k+1 eliminated features every not yet eleminated feature is used
to generate (n_total - k) sets. The sets consist of all not yet
eliminated features minus the one that is tested. The set with the
best score determines the next feature to eliminate. Those steps are
repeated until n features are eliminated.
What the best score depends also on the settings:
matching_features:
forward: min(|auc - 0.5|)
not forward: max(|aux - 0.5|)
not matching_features:
forward: max(auc )
not forward: min(aux)
Parameters
----------
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contains the sample weights.
None in the case of no weights.
n_features : int, optional (default=10)
Number of feature that are selected (forward=True) or eliminated
(forward=False)
n_jobs: int, optional (default=1)
Number of parallel jobs spawned in each a classification in run.
Total number of used cores is the product of n_jobs from the clf
and the n_jobs of this function.
forward: bool, optional (default=True)
If True it is a 'forward selection'. If False it is a 'backward
elimination'.
matching_features: bool, optional (default=True)
Wether for matching or mismatching feature should be searched
Returns
-------
selected_features: list of ints
Return a list containing the indeces of X, that were
selected/eliminated. The order corresponds to the order the
features were selected/eliminated.
auc_scores: np.array float shape(n_features_total, n_features)
Return a array containing the auc values for all steps.
np.nan is the feature was already selected in the specific run.
"""
desired_characteristics = ClassifierCharacteristics()
desired_characteristics.opts['callable:fit'] = True
desired_characteristics.opts['callable:predict_proba'] = True
clf_characteristics = ClassifierCharacteristics(clf)
assert clf_characteristics.fulfilling(desired_characteristics), \
'Classifier sanity check failed!'
if n_features > X.shape[1]:
logger.info(' \'n_features\' higher than total number of features.'
' \'n_features\' reduced!')
n_features = X.shape[1]
auc_scores = np.zeros((X.shape[1], n_features))
selected_features = []
while len(selected_features) != n_features:
auc_scores_i = get_all_auc_scores(clf,
selected_features,
X,
y,
sample_weight=sample_weight,
cv_steps=cv_steps,
n_jobs=n_jobs,
forward=forward)
value_best = None
index_best = None
for idx, auc in enumerate(auc_scores_i):
if not np.isfinite(auc):
continue
if value_best is None:
value_best = auc
index_best = idx
if matching_features:
if forward:
if np.abs(auc - 0.5) < np.abs(value_best - 0.5):
value_best = auc
index_best = idx
else:
if np.abs(auc - 0.5) > np.abs(value_best - 0.5):
value_best = auc
index_best = idx
else:
if forward:
if auc > value_best:
value_best = auc
index_best = idx
else:
if auc < value_best:
value_best = auc
index_best = idx
auc_scores[:, len(selected_features)] = auc_scores_i
selected_features.append(index_best)
return selected_features, auc_scores
def __single_auc_score__(feature_i,
clf,
cv_indices,
X,
y,
sample_weight=None):
"""Method determining the 'area under curve' for a single test set.
This function is intended for internal use.
Parameters
----------
feature_i: int
Index of the tested feature.
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
cv_indices: list of tuples
Indices for all the cross validation steps. They are explicit
pass, so all test sets use the same splitting.
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contain the sample weights.
None in the case of no weights.
Returns
-------
feature_i: int
Index of the tested feature. It is need as a return value for
asynchronous parallel processing
auc_score: float
Returns calculated auc score.
"""
y_pred = np.zeros_like(y, dtype=float)
for i, [train_idx, test_idx] in enumerate(cv_indices):
X_train = X[train_idx]
X_test = X[test_idx]
y_train = y[train_idx]
if sample_weight is None:
sample_weight_train = None
sample_weight_test = None
else:
sample_weight_train = sample_weight[train_idx]
sample_weight_test = sample_weight[test_idx]
clf = clf.fit(X=X_train,
y=y_train,
sample_weight=sample_weight_train)
y_pred[test_idx] = clf.predict_proba(X_test)[:, 1]
auc_score = roc_auc_score(y, y_pred, sample_weight=sample_weight_test)
return feature_i, auc_score
def get_all_auc_scores(clf,
selected_features,
X,
y,
sample_weight=None,
cv_steps=10,
n_jobs=1,
forward=True,
random_state=None):
"""Method determining the 'area under curve' for all not yet
selected features. In this function also the feature sets for the
tests are created.
Parameters
----------
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
selected_features: list of ints
List of already selected features
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contains the sample weights.
None in the case of no weights.
n_jobs: int, optional (default=1)
Number of parallel jobs spawned in each a classification in run.
Total number of used cores is the product of n_jobs from the clf
and the n_jobs of this function.
forward: bool, optional (default=True)
If True it is a 'forward selection'. If False it is a 'backward
elimination'.
random_state: None, int or RandomState
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
Returns
-------
auc_scores: np.array float shape(n_features_total)
Return a array containing the auc values. np.nan is the feature
is already selected.
"""
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
selected_features = np.array(selected_features, dtype=int)
if cv_steps < 2:
raise ValueError('\'cv_steps\' must be 2 or higher')
else:
cv_iterator = StratifiedKFold(n_splits=cv_steps,
shuffle=True,
random_state=random_state)
cv_indices = [[train, test] for train, test in cv_iterator.split(X, y)]
test_features = np.array([int(i) for i in range(X.shape[1])
if i not in selected_features], dtype=int)
process_args = []
for feature_i in test_features:
if forward:
set_i = np.hstack((selected_features, feature_i))
test_set = np.sort(set_i)
else:
set_i = list(test_features)
set_i.remove(feature_i)
test_set = np.array(set_i)
process_args.append([feature_i, X[:, test_set],
y,
sample_weight,
clf])
test_sets = {}
for feature_i in test_features:
if forward:
set_i = np.hstack((selected_features, feature_i))
test_sets[feature_i] = np.sort(set_i)
else:
set_i = list(test_features)
set_i.remove(feature_i)
test_sets[feature_i] = np.array(set_i)
auc_scores = np.empty(X.shape[1])
auc_scores[:] = np.nan
if n_jobs > 1:
futures = []
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
for feature_i, test_set in test_sets.items():
futures.append(executor.submit(__single_auc_score__,
feature_i=feature_i,
clf=clf,
cv_indices=cv_indices,
X=X[:, test_set],
y=y,
sample_weight=sample_weight))
results = wait(futures)
for future_i in results.done:
feature_i, auc = future_i.result()
auc_scores[feature_i] = auc
else:
auc_scores = []
for feature_i, test_set in test_sets.items():
_, auc = __single_auc_score__(feature_i=feature_i,
clf=clf,
cv_indices=cv_indices,
X=X[:, test_set],
y=y,
sample_weight=sample_weight)
auc_scores[feature_i] = auc
return auc_scores
| StarcoderdataPython |
5169438 | import gym
import numpy as np
import matplotlib.pyplot as plt
from windy_gridworld import WindyGridworldEnv
import time, os
def create_state_action_dictionary(env):
Q = {}
for key in range(env.nS):
Q[key] = {a: 0.0 for a in range(env.nA)}
return Q
def epsilon_greedy_action(env, epsilon, s, Q):
if np.random.random() < epsilon:
return env.action_space.sample()
else:
greedy_action = np.argmax(list(Q[s].values()))
return greedy_action
def n_step_sarsa(env, n_episodes, n, epsilon, alpha, gamma):
Q = create_state_action_dictionary(env)
visits_per_state = np.zeros(env.nS)
for _ in range(n_episodes):
s_0 = env.reset()
T = np.inf
t = 0
a_t = epsilon_greedy_action(env, epsilon, s_0, Q)
states = [s_0]
actions = [a_t]
rewards = [0]
while True:
#env._render()
#time.sleep(0.05)
#os.system('cls')
if t < T:
next_state, r, done, info = env.step(actions[t])
visits_per_state[next_state] += 1
rewards.append(r)
states.append(next_state)
if done:
T = t + 1
else:
next_action = epsilon_greedy_action(env, epsilon, next_state, Q)
actions.append(next_action)
tau = t - n + 1
if tau >= 0:
G = 0
for i in range(tau+1, min(tau+n+1,T+1)):
G += np.power(gamma, i-tau-1) * rewards[i]
if tau + n < T:
G += np.power(gamma, n) * Q[states[tau+n]][actions[tau+n]]
Q[states[tau]][actions[tau]] = Q[states[tau]][actions[tau]] + alpha * (G - Q[states[tau]][actions[tau]])
if tau == T - 1:
break
t += 1
return Q, visits_per_state.reshape(env.shape[0], env.shape[1])
n_episodes = 100
epsilon = 0.1
alpha = 0.5
gamma = 0.9
n = [1, 2, 8, 30]
# Try to (dis)able the king's moves to see the impact
env = WindyGridworldEnv(kings_move=True)
total_visits_per_state = []
avg_total_visit = []
for v in n:
Q, visits_per_state = n_step_sarsa(env, n_episodes, v, epsilon, alpha, gamma)
#avg_q_values_per_state = np.array([np.mean(list(el.values())) for el in Q.values()]).reshape((env.shape[0], env.shape[1]))
avg_total_visit.append(np.mean(visits_per_state))
total_visits_per_state.append(visits_per_state)
# --------------------
# VISUALIZE RESULTS
# --------------------
fig, ax = plt.subplots(2, 2)
im = ax[0,0].imshow(total_visits_per_state[0])
ax[0,0].set_xticks(np.arange(env.shape[1]))
ax[0,0].set_yticks(np.arange(env.shape[0]))
for i in range(env.shape[0]):
for j in range(env.shape[1]):
text = ax[0,0].text(j, i, total_visits_per_state[0][i, j], ha="center", va="center", color="w")
ax[0,0].set_title('Visits Per State With n=1\nAverage Visits = %d' % avg_total_visit[0])
im = ax[0,1].imshow(total_visits_per_state[1])
ax[0,1].set_xticks(np.arange(env.shape[1]))
ax[0,1].set_yticks(np.arange(env.shape[0]))
for i in range(env.shape[0]):
for j in range(env.shape[1]):
text = ax[0,1].text(j, i, total_visits_per_state[1][i, j], ha="center", va="center", color="w")
ax[0,1].set_title('Visits Per State With n=2\nAverage Visits = %d' % avg_total_visit[1])
im = ax[1,0].imshow(total_visits_per_state[2])
ax[1,0].set_xticks(np.arange(env.shape[1]))
ax[1,0].set_yticks(np.arange(env.shape[0]))
for i in range(env.shape[0]):
for j in range(env.shape[1]):
text = ax[1,0].text(j, i, total_visits_per_state[2][i, j], ha="center", va="center", color="w")
ax[1,0].set_title('Visits Per State With n=8\nAverage Visits = %d' % avg_total_visit[2])
im = ax[1,1].imshow(total_visits_per_state[3])
ax[1,1].set_xticks(np.arange(env.shape[1]))
ax[1,1].set_yticks(np.arange(env.shape[0]))
for i in range(env.shape[0]):
for j in range(env.shape[1]):
text = ax[1,1].text(j, i, total_visits_per_state[3][i, j], ha="center", va="center", color="w")
ax[1,1].set_title('Visits Per State With n=30\nAverage Visits = %d' % avg_total_visit[3])
fig.tight_layout()
plt.show() | StarcoderdataPython |
12818426 | from scipy.stats import entropy, ks_2samp, kstest, anderson
import numpy as np
EPSILON = 10e-10
# ############################################### #
# ##### Distribution Metrics for Testing G: ##### #
# ############################################### #
def calc_Dkl(true_samples, generated_samples, bin_num=100):
# calc mutual bins for the pdf:
true_pdf, generated_pdf = _calc_mutual_pdf(true_samples, generated_samples, bin_num)
# Dkl = (true_pdf * pd.np.log2(true_pdf / generated_pdf)).sum()
Dkl = entropy(true_pdf, generated_pdf, base=2)
return Dkl
def calc_ks(true_samples, generated_samples):
if isinstance(true_samples, np.ndarray): # apply ks test for samples
return ks_2samp(true_samples, generated_samples)
else: # true_samples is a cdf callable
return kstest(rvs=generated_samples, cdf=true_samples, alternative="two-sided")
def calc_l1_cdf(true_samples, generated_samples, bin_num=100):
"""
Like total variation distance but based on the cdf rather the pdf
:param true_samples:
:param generated_samples:
:param bin_num:
:return:
"""
true_pdf, generated_pdf = _calc_mutual_pdf(true_samples, generated_samples, bin_num)
true_cdf = np.cumsum(true_pdf)
generated_cdf = np.cumsum(generated_pdf)
return np.sum(np.abs(true_cdf - generated_cdf)) # l_1 distance
def calc_anderson(generated_samples):
return anderson(generated_samples, dist="norm")
def _calc_mutual_pdf(true_samples, generated_samples, bin_num):
"""
calculates pdfs over mutual range
:param true_samples:
:param generated_samples:
:param num_bins:
:return:
"""
min_val = min(generated_samples.min(), true_samples.min())
max_val = max(generated_samples.max(), true_samples.max())
bins = np.linspace(start=min_val, stop=max_val, num=bin_num, endpoint=True)
generated_pdf, _ = np.histogram(generated_samples, bins=bins, density=True)
generated_pdf[generated_pdf == 0] = EPSILON # to avoid division by zero
generated_pdf /= generated_pdf.sum()
true_pdf, _ = np.histogram(true_samples, bins=bins, density=True)
true_pdf /= true_pdf.sum()
return true_pdf, generated_pdf
# ###################### #
# ##### Testing D: ##### #
# ###################### #
| StarcoderdataPython |
4848492 | import urllib.request, json
from datetime import datetime
from django.utils import timezone
from django.conf import settings
from django.db.models import Q
from fbevents.models import Event
# Assume all the upcoming events are in the first page, paging is ignored
def sync_upcoming_events_with_fb():
fb_events_api = 'https://graph.facebook.com/{}/events?access_token={}&fields=name,id,place,cover,start_time,end_time'.format(settings.FB_PAGE_ID, settings.FB_ACCESS_TOKEN)
with urllib.request.urlopen(fb_events_api) as url:
events = json.loads(url.read().decode())
for event in events['data']:
end_time = event.get('end_time', None)
if end_time:
end_time = datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%S%z')
start_time = datetime.strptime(event['start_time'], '%Y-%m-%dT%H:%M:%S%z')
now = timezone.now()
location = ''
if 'place' in event and 'name' in event['place']:
location = event['place']['name']
if (end_time and now < end_time) or now < start_time:
defaults = {
'name': event['name'],
'location': location,
'cover': event['cover']['source'],
'start_time': event['start_time'],
'end_time': end_time,
}
Event.objects.update_or_create(fb_id=event['id'], defaults=defaults)
def get_upcoming_events():
return Event.objects.filter(Q(end_time__gt=timezone.now()) | Q(start_time__gt=timezone.now())).order_by('start_time')
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.