content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
'''
Python program to add two positive integers without using the '+' operator
'''
'''
x << y
Returns x with the bits shifted to the left by y places (and new bits on the right-hand-side are zeros). This is the same as multiplying x by 2**y.
x >> y
Returns x with the bits shifted to the right by y places. This is the same as //'ing x by 2**y.
x & y
Does a "bitwise and". Each bit of the output is 1 if the corresponding bit of x AND of y is 1, otherwise it's 0.
x | y
Does a "bitwise or". Each bit of the output is 0 if the corresponding bit of x AND of y is 0, otherwise it's 1.
~ x
Returns the complement of x - the number you get by switching each 1 for a 0 and each 0 for a 1. This is the same as -x - 1.
x ^ y
Does a "bitwise exclusive or". Each bit of the output is the same as the corresponding bit in x if that bit in y is 0, and it's the complement of the bit in x if that bit in y is 1.
'''
def add_without_plus_operator (a,b):
while b != 0 :
data = a & b
a = a ^ b
b = data << 1
return a
print (add_without_plus_operator (2,10))
print (add_without_plus_operator (-20, 10))
print (add_without_plus_operator (-10, -20))
|
nilq/baby-python
|
python
|
import timer.helper.thread as thread
class TestThreadIsNone():
def test_real_none(self) -> None:
assert thread.is_none(None) is True
def test_text_none_uppercase(self) -> None:
assert thread.is_none("NONE") is True
def test_text_none_lowercase(self) -> None:
assert thread.is_none("none") is False
def test_random_text(self) -> None:
assert thread.is_none("something") is False
def test_random_number(self) -> None:
assert thread.is_none(123) is False
|
nilq/baby-python
|
python
|
# Copyright (c) 2019, Stefan Grönke
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import typing
import ctypes
import struct
import enum
import freebsd_sysctl.libc
import freebsd_sysctl.types
import freebsd_sysctl.flags
from freebsd_sysctl.__version__ import __version__
NULL_BYTES = b"\x00"
CTL_MAXNAME = ctypes.c_uint(24)
T_OID = (ctypes.c_int * 2)
BUFSIZ = 1024 # see /include/stdio.h#L209
class Sysctl:
_name: typing.Optional[str]
_oid: typing.Optional[typing.List[int]]
_kind: typing.Optional[int]
_fmt = typing.Optional[str]
_size: typing.Optional[int]
_value: typing.Optional[typing.Any]
_description: typing.Optional[str]
def __init__(
self,
name: typing.Optional[str]=None,
oid: typing.Optional[typing.List[int]]=None
) -> None:
self._name = name
self._oid = oid
self._kind = None
self._fmt = None
self._size = None
self._value = None
self._description = None
@property
def oid(self) -> typing.List[int]:
if self._oid is None:
if self.name is None:
raise ValueError("Name or OID required")
self._oid = self.name2oid(self.name)
return self._oid
@property
def name(self) -> str:
if self._name is None:
if self.oid is None:
raise ValueError("Name or OID required")
self._name = self.oid2name(self.oid)
return self._name
@property
def kind(self) -> int:
if self._kind is None:
self.__query_kind_and_fmt()
return self._kind
@property
def fmt(self) -> int:
if self._fmt is None:
self.__query_kind_and_fmt()
return self._fmt
@property
def size(self) -> int:
if self._size is None:
self._size = self.query_size(self.oid, self.ctl_type)
return self._size
@property
def raw_value(self) -> typing.Any:
if self._value is None:
self._value = self.query_value(self.oid, self.size, self.ctl_type)
return self._value
@property
def value(self) -> typing.Any:
if type(self.raw_value.value) == str:
return self.raw_value.value.strip("\n")
return self.raw_value.value
@property
def description(self) -> str:
if self._description is None:
self._description = self.query_description(self.oid)
return self._description.strip("\n")
@property
def next(self):
return self.__class__(oid=self.query_next(self.oid))
@property
def children(self) -> typing.Iterator['Sysctl']:
if self.ctl_type != freebsd_sysctl.types.NODE:
return
current = self.next
while self.oid == current.oid[:len(self.oid)]:
yield current
current = current.next
def __query_kind_and_fmt(self) -> None:
self._kind, self._fmt = self.query_fmt(self.oid)
@staticmethod
def name2oid(name: str) -> typing.List[int]:
p_name = ctypes.c_char_p(name.encode() + NULL_BYTES)
oid = T_OID(0, 3)
p_oid = ctypes.POINTER(T_OID)(oid)
length = ctypes.c_int(CTL_MAXNAME.value * ctypes.sizeof(ctypes.c_int))
p_length = ctypes.POINTER(ctypes.c_int)(length)
Res = ctypes.c_int*length.value
res = (Res)()
freebsd_sysctl.libc.dll.sysctl(
p_oid,
2,
ctypes.POINTER(Res)(res),
p_length,
p_name,
len(p_name.value)
)
oid_length = int(length.value / ctypes.sizeof(ctypes.c_int))
return res[:oid_length]
@staticmethod
def oid2name(oid: typing.List[int]) -> str:
qoid_len = (2 + len(oid))
qoid_type = ctypes.c_int * qoid_len
qoid = (qoid_type)(*([0, 1] + oid))
p_qoid = ctypes.POINTER(qoid_type)(qoid)
buf = ctypes.create_string_buffer(BUFSIZ)
buf_void = ctypes.cast(buf, ctypes.c_void_p)
buf_length = ctypes.sizeof(buf)
p_buf_length = ctypes.POINTER(ctypes.c_int)(ctypes.c_int(buf_length))
freebsd_sysctl.libc.dll.sysctl(
p_qoid,
qoid_len,
buf_void,
p_buf_length,
0,
0
)
return buf.value.decode()
@staticmethod
def query_fmt(oid: typing.List[int]) -> typing.Tuple[int, str]:
qoid_len = (2 + len(oid))
qoid_type = ctypes.c_int * qoid_len
qoid = (qoid_type)(*([0, 4] + oid))
p_qoid = ctypes.POINTER(qoid_type)(qoid)
buf_type = ctypes.c_char * BUFSIZ
buf = buf_type()
p_buf = ctypes.POINTER(buf_type)(buf)
buf_void = ctypes.cast(p_buf, ctypes.c_void_p)
buf_length = ctypes.sizeof(buf)
p_buf_length = ctypes.POINTER(ctypes.c_int)(ctypes.c_int(buf_length))
freebsd_sysctl.libc.dll.sysctl(
p_qoid,
qoid_len,
buf_void,
p_buf_length,
0,
0
)
if len(buf) < 4:
raise Exception("response buffer too small")
result = buf[:buf_length]
kind, = struct.unpack("I", result[:4])
null_pos = result.find(b'\x00',4) # buf is large and string is small
fmt = result[4:null_pos].decode()
return (kind, fmt)
@staticmethod
def query_size(
oid: typing.List[int],
ctl_type: freebsd_sysctl.types.CtlType
) -> bytes:
oid_type = ctypes.c_int * len(oid)
_oid = (oid_type)(*oid)
p_oid = ctypes.POINTER(oid_type)(_oid)
length = ctypes.c_int()
p_length = ctypes.POINTER(ctypes.c_int)(length)
freebsd_sysctl.libc.dll.sysctl(
p_oid,
len(oid),
None,
p_length,
0
)
return max(length.value, ctl_type.min_size)
@staticmethod
def query_value(
oid: typing.List[int],
size: int,
ctl_type: freebsd_sysctl.types.CtlType
) -> bytes:
# ToDo: check if value is readable
oid_type = ctypes.c_int * len(oid)
_oid = (oid_type)(*oid)
p_oid = ctypes.POINTER(oid_type)(_oid)
buf_type = ctypes.c_char * size
buf = buf_type()
p_buf = ctypes.POINTER(buf_type)(buf)
p_buf_void = ctypes.cast(p_buf, ctypes.c_void_p)
buf_length = ctypes.sizeof(buf)
p_buf_length = ctypes.POINTER(ctypes.c_int)(ctypes.c_int(buf_length))
freebsd_sysctl.libc.dll.sysctl(
p_oid,
ctypes.c_uint32(len(oid)),
p_buf_void,
p_buf_length,
None,
0
)
return ctl_type(buf, size)
@staticmethod
def query_description(
oid: typing.List[int]
) -> str:
qoid_len = (2 + len(oid))
qoid_type = ctypes.c_int * qoid_len
qoid = (qoid_type)(*([0, 5] + oid))
p_qoid = ctypes.POINTER(qoid_type)(qoid)
buf_type = ctypes.c_char * BUFSIZ
buf = buf_type()
p_buf = ctypes.POINTER(buf_type)(buf)
buf_void = ctypes.cast(p_buf, ctypes.c_void_p)
buf_length = ctypes.sizeof(buf)
p_buf_length = ctypes.POINTER(ctypes.c_int)(ctypes.c_int(buf_length))
freebsd_sysctl.libc.dll.sysctl(
p_qoid,
qoid_len,
buf_void,
p_buf_length,
0,
0
)
return buf.value.decode()
@staticmethod
def query_next(oid: typing.List[int]) -> bytes:
qoid_len = (2 + len(oid))
qoid_type = ctypes.c_int * qoid_len
qoid = (qoid_type)(*([0, 2] + oid))
p_qoid = ctypes.POINTER(qoid_type)(qoid)
buf_type = ctypes.c_int * CTL_MAXNAME.value
buf = buf_type()
p_buf = ctypes.POINTER(buf_type)(buf)
buf_void = ctypes.cast(p_buf, ctypes.c_void_p)
buf_length = ctypes.sizeof(buf)
p_buf_length = ctypes.POINTER(ctypes.c_int)(ctypes.c_int(buf_length))
freebsd_sysctl.libc.dll.sysctl(
p_qoid,
qoid_len,
buf_void,
p_buf_length,
0,
0
)
oid_length = int(
p_buf_length.contents.value / ctypes.sizeof(ctypes.c_int)
)
return buf[0:oid_length]
@property
def ctl_type(self) -> freebsd_sysctl.types.CtlType:
return self.get_ctl_type(self.kind, self.fmt)
@staticmethod
def get_ctl_type(
kind: int,
fmt: bytes
) -> freebsd_sysctl.types.CtlType:
return freebsd_sysctl.types.identify_type(kind, fmt)
def has_flag(self, flag: int) -> bool:
"""Return is the sysctl has a certain flag."""
return (self.kind & flag == flag) is True
|
nilq/baby-python
|
python
|
# todo
|
nilq/baby-python
|
python
|
import pickle
from pathlib import Path
import torch
import os
from sklearn.model_selection import GroupKFold
from torch.utils.data import DataLoader
from classifier.config import get_conf
from classifier.fracture_detector.data import get_meta, WristFractureDataset
from classifier.fracture_detector.data._transform import get_train_val_transformations_kneel
from classifier.fracture_detector.model import ModelWithTemperature
from utils import apply_fixed_seed, apply_deterministic_computing, get_snapshots, FractureDetector
if __name__ == '__main__':
cwd = Path().cwd()
conf_file = cwd.parents[0] / 'config' / 'config.yaml'
config = get_conf(conf_file=conf_file, cwd=cwd)
apply_fixed_seed(config.seed)
apply_deterministic_computing(config.deterministic)
if isinstance(config.local_rank, int):
device = torch.device(f'cuda:{config.local_rank}')
torch.cuda.set_device(config.local_rank)
else:
device = torch.device('cpu')
# meta is the master meta here
meta = get_meta(config)
if isinstance(config.dataset.side, int):
config.dataset.side = [config.dataset.side]
fd_lat_folder = os.path.join(config.snapshot_folder, 'LAT')
fd_pa_folder = os.path.join(config.snapshot_folder, 'PA')
fd_lat_snapshots = get_snapshots(fd_lat_folder)
fd_pa_snapshots = get_snapshots(fd_pa_folder)
lat_detector = FractureDetector(config, fd_lat_snapshots, side=1, device=device)
pa_detector = FractureDetector(config, fd_pa_snapshots, side=0, device=device)
meta_pa = meta[meta.Side == 0]
meta_lat = meta[meta.Side == 1]
_, pa_trf = get_train_val_transformations_kneel(config, meta, 0)
_, lat_trf = get_train_val_transformations_kneel(config, meta, 1)
gkf = GroupKFold(5)
_, val_ind_pa = next(gkf.split(meta_pa, meta_pa.Fracture, meta_pa.ID))
gkf = GroupKFold(5) # gfk need to re-initialize to have the same validaiton data as the training
_, val_ind_lat = next(gkf.split(meta_lat, meta_lat.Fracture, meta_lat.ID))
val_ds_pa = WristFractureDataset(root=config.dataset.data_home, meta=meta_pa.iloc[val_ind_pa],
transform=pa_trf)
val_ds_lat = WristFractureDataset(root=config.dataset.data_home, meta=meta_lat.iloc[val_ind_lat],
transform=lat_trf)
loader_pa = DataLoader(dataset=val_ds_pa,
batch_size=config.train_params.val_bs,
num_workers=config.dataset.n_data_workers,
shuffle=False,
pin_memory=True)
loader_lat = DataLoader(dataset=val_ds_lat,
batch_size=config.train_params.val_bs,
num_workers=config.dataset.n_data_workers,
shuffle=False,
pin_memory=True)
temp_dict = dict()
temp_dict['PA'] = list()
temp_dict['LAT'] = list()
for model in pa_detector.models:
model_with_tmp = ModelWithTemperature(model, device)
model_with_tmp.set_temperature(loader_pa)
temp_dict['PA'].append(model_with_tmp.temperature.item())
for model in lat_detector.models:
model_with_tmp = ModelWithTemperature(model, device)
model_with_tmp.set_temperature(loader_lat)
temp_dict['LAT'].append(model_with_tmp.temperature.item())
with open('temp_old.pkl', 'wb') as f:
pickle.dump(temp_dict, f)
print(temp_dict)
|
nilq/baby-python
|
python
|
# pylint: disable=all
__version__ = "2.12.0"
__author__ = "Criteo"
|
nilq/baby-python
|
python
|
from ray.util.collective.collective import nccl_available, gloo_available, \
is_group_initialized, init_collective_group, destroy_collective_group, \
create_collective_group, get_rank, get_collective_group_size, \
allreduce, allreduce_multigpu, barrier, reduce, reduce_multigpu, \
broadcast, broadcast_multigpu, allgather, allgather_multigpu, \
reducescatter, reducescatter_multigpu, send, send_multigpu, recv, \
recv_multigpu
__all__ = [
"nccl_available", "gloo_available", "is_group_initialized",
"init_collective_group", "destroy_collective_group",
"create_collective_group", "get_rank", "get_collective_group_size",
"allreduce", "allreduce_multigpu", "barrier", "reduce", "reduce_multigpu",
"broadcast", "broadcast_multigpu", "allgather", "allgather_multigpu",
"reducescatter", "reducescatter_multigpu", "send", "send_multigpu", "recv",
"recv_multigpu"
]
|
nilq/baby-python
|
python
|
from sys import argv, exit
import sys
sys.path.append('src')
import os
import pandas as pd
import numpy as np
import random
from matplotlib import pyplot as plt
from ag import Ag
from graph import Graph
from pprint import pprint
from utils import readFiles
if __name__=='__main__':
vertexes, edges, cities_df, cities_new_cases = readFiles()
# Treinando o algoritimo.
graph = Graph(vertexes, edges, cities_df, cities_new_cases)
n_steps=105
city = 3168804 # Tiradentes
accumulated_curve = []
for i in range(len(cities_new_cases[city])):
if i == 0:
accumulated_curve.append(cities_new_cases[city][0])
else:
accumulated_curve.append( cities_new_cases[city][i] + accumulated_curve[i-1] )
ag = Ag(graph, accumulated_curve[0:n_steps], city)
# executa o algoritmo genético
c, weights = ag.run(npop=30, nger=150, cp=1.0, mp=0.01, xmaxc=2.0, xmax_edge=100)
print(c, weights)
# executa o projeção novamente com os pesos que ajustaram a curva melhor
graph.setWeights(city, c, weights)
predictions = np.zeros(shape=(30,len(cities_new_cases[city])-1))
for i in range(30):
graph.resetVertexValues()
predictions[i] = graph.predict_cases(len(cities_new_cases[city])-1, city, debug=True)
mean_prediction = predictions.mean(axis=0)
plt.plot(mean_prediction, label="Prediction")
plt.plot(accumulated_curve, label="Real Curve")
plt.grid()
plt.xlabel("Qtde. Dias")
plt.ylabel("Qtde. Casos")
plt.xticks(list(range(0,len(accumulated_curve),10)))
y_min = 0
y_max = max(accumulated_curve[-1], mean_prediction[len(cities_new_cases[city])-2])
plt.vlines(n_steps, ymin=y_min, ymax=y_max, colors='red', linestyles='dashed', label='train/test')
plt.ylim([0,y_max])
plt.legend()
plt.savefig('plots/aprox.pdf')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from Crypto.Cipher import AES
import base64
import time
import gzip
from hashlib import md5
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8', line_buffering=True)
def Decrypt(key:str, text:str) -> str:
if len(key) < 32: key += ' ' * (32 - len(key))
elif len(key) > 32: key = key[0:32]
cipher = AES.new(bytes(key,encoding='utf-8'), AES.MODE_CBC, bytes(AES.block_size))
return str(gzip.decompress(bytes.strip(cipher.decrypt(base64.b64decode(text)))), encoding='utf-8')
def Pass(id, priv_key):
prefix = str(id) + str(int(time.time()))
pub_key = prefix + md5(bytes(prefix + priv_key, 'utf8')).hexdigest()
print('恭喜通过第%d关,通关公钥:%s' % (id, pub_key))
key=input('1+1=')
exec(Decrypt(key,'JIvH7KUKFAKDu6ZfRjsV9VsCODat2VbDd6S+QAGKEXtGlSxvhUIhqHfXq/1EhGohqhFelniKn3294DpzdccOhP6KcQQPxpGVgKcQJfezn+4JA4Aq0rvWkVoYew8OkRCt2/7MmgVwLCxlqhIrI5SvibCg2Yg0nBs/qe+7rI2EcC16ncIiBICvQFIvewAsYLcIEHFFdbzkM2nwfjxFnQ1bqgchYMm0lsKvztSAxxRS6ZFrdZqNb3u8Iyg6DB1vRu2BZFu5ed3E0g926LASeliCxvltvE5EJaJfJtquFAMeJxlcDTEkRdWbdoi5zbB2UK7ZM+i+STJPK+QKo0MEMAm+pkXmm0ZYttEYXDSqJHoutOVGX73EHnsBtGSYqs20UVHT5AbFXu8adbUtM5eqWJ5NRy8spXVnd/hOZo/qoS/Yp6LAKwWccC/J1As//SDpm+gsYENoKVgGoqJFStWccrqk6pWGIwEwimUq2tXaTsfCbHYCNT+AOrWYD0w6c3LJdFj38PrZSYjEceJHFeP7bdX2u5JmXlXKrZgpDNVP/RnQS1Zhw76ZTid31IPprHVHD1indT21WapbtdVuhDijAYpAFvzVmjeFPXjaUuAZwJw9voW/jg9Ucfe0OScMs82xVTW0EfBqPpM2WH+OXjC+xZUrrlqkuqG67qaf66Lhl+uSuuGinTIbzaMnlY8CyNpRBbJyHpu4/keDWZC2n0C5DCdvmWIQHtM0UJs0v4MICgu74Rrf11tmuUvKb4htLMTGT3BDjELZQvejWqMNjKods8W+B62hKYqLJDyJEsxjGe1uZWdmyZnm4oPLwzpJLlOZqIUL+uJkm7/nCkqadPdRQT/80xXz+K4btjaNkiKmTPSBtnCs3clWH1ZDHehMTZXu6Md2Y9TUjVXoEB7f96ZmWmuttFuLBnLpT9FsOxxHL1XBXSusgltORLgJx7t2zrcFJr+z8Uw3fyiN6XiR/YdbMhhUucgroPLhJB0Z6g0h5pdKjmyHsXzQ9k9PA8hdXHzME4MG7rdi7IsHPMC56PPoxenrkNLnFrcwxJ4vmVPhXHqljKo0PrtGsfFHw3Yy5/MqOmz5ZSN9F92gZQiHZwhKLXW/HNGnOexEONDCSccDch7Nt7ztqlcA3fygD6Kx8/N+YNTtiudlw6ZG3FzCaZusn9JQsswrhYMN2lWCSSB+JB2Ol1yOHwIGRKCJ+cj6XShojG/KHbfDahNt4GPZi7fK+8kIUir+9KQ8PqEFi1K9N868oqlY1JN85LhA55WPdvVlTAe8o7XQCVYM31ce9iM/ZCRLC6uAu/EVK1aju4zgMumxQumfSDn4J3m80R4WANDvyPSmqqhB950TqarXHc9ni9g91wp6OqmZcs43Mtwyj5DLpITc1AZTGagiLDC8ChDZJQ7v2o5Hegf4iPdTSB4j8bMkRYDOAjLutSix4tqA5uDt7z069UPIhNUSFWOhGkN2jzUqoITNbOx1Icxbj4YPsiZ3bT3DUXoEzAtjf6JW8N9X3iItG9kz8LqdnkpUmOtaMlDwTXnbQC1/gkFZKuCPK0Nf4PXiEmWLUcaajM1mCuKDrTRqaevcqsOXIVw2dODsQQTLysnQaAXlWJv9jYYCpcenvQ9dVGc5XJz7NNzBcy1XmNBrctQuiUvc1v2IkQfKVlmlEo4OaN0ZkxjQZZUkg3ghyr7dA3qve3VRn6i9ObPC1MmATr5NjXsBoyhDO9nidqZYfRhJamhL5AuCR4Y91PI2h9qapdGbRYJs1WX3d5qZ/wVTt6dHFAZPwxL7wEHmevLCoGw6Fp8YnxVZGynwsonR37WfQt6BcNYUZMPr4Is9rO79tRmbsOe932VOCi1dZ2eEvEMM5hah6/1fc266Ssu6HHsmkkrwe8C74QTwduP0vpxD1kX5GSu9jq2Y4Keg5nCRtBlMg2xdIeyyg4CIDX7BYDkmP4Yn/3xczpbB7+PfB80x0qi70u4mfEikdwuasaxkChIEXBBaMAdjUj7rVfJvasy/hUNZ6tp2AJwwBfLKSLxsKIb7p0E+a/Vz0lJ88u3HHjqiL/UjN6qTV5oWFJcU303Bpbh8wlTRoFU89Jq31GfkPbuifwGEmTgjyzQpg6AJP0K9wJX3f7C8W2TbEeUA3noWkNtl814jvbovSIB/inK1DWuChLsn9eInyLJ7d7u/OFL/UFPA/C5fvAsS/l+Kwf68ghZRB8ftr/x8b835k2woU2LWgbi70R3iNVBQ/q04lxYJYImYaHWGRyQCjv4n6WF1c53fN7l9ATuNOwR57Ap7XpEwHSSAeP/kt7pkhM4wp6o17XRYiHjzZI/hv+9LieLPB+uLpth1PoL2Lo0w5930Dj/g1gLtJAdowfjyvSjcIUUHwVZOkjmgm/vvEH0pFohWTZr7ZSPkGvXwEEdjocWA/4qNCHSbXXceqDqEaW7w/599WkEKbA5zTw04c0AsXSrCjPGgm99ZGvIn0/8I7XUdR7uPbw36ybgwjBYCq37jqCDf5wxNp7UhXLLHehn4TtGGlX6v6iwDVU2tWBS3U8BfWRIqTTUtrr+b3U1J2bHi2cDmvLS4ym5eci0Kv7XHD9cj2aBj6cPOkXt0kgBNiylVwFJg0bcuNWYOXeN36kj3PIVrSJ7mDqCYT1wupgQT/PlYZpq6uy1YuBS8loSfi0TP3uXr5gz4ZKCd5UhA5Dj4qeSYJs2tOkpSOhMQMguZYNHeZrPnHJMRq7I3LqZOAnQ299Y9JEN5YNT2s5PrgqkzzQka4IV9bE3JgxykW66ZJxapHG820aH9s5RvOMcdJJms/FA/kX0oOiLNrYW450Ec70MPi4ZGzom4tqavSyPj/iYZlVHAt2WIB3zoToIgf4rcjkgshN81tGg33zpIV59j3sWJ7paqEoE7BszOz0193AUML7NC7dJJpJStH+pkGncL91at4eeMplBXUBIuKknrrEti/X4eFvBY8ns0hHH+pI5uv3tyGxdI3GkHpwLRxGlyLR4Wril9VcIqiTMhdcag/JS5AByd68RkHkKJScwX7Qb9t1uWsplbQ0SlSvqZgQqNO5Rw126B/ywXPHOLgpUfrgp3EnhJ/3mxdxDF8Lj6GP+nEChzVa4eZ0lZBLsyDJeGI2rmKKDQLMGZMs+xtLB9kfrIvlvLyTTuSXzlX/EDJ+BEmVlURyELCEDezhWT60Lt2kGJwCp2hl+pzbQh7wc0bbBgWRJwzdD74rZgWlHG8D8wOYlf+obtM2tjY5DCsxZtiEVatcdnhPqSZI3eIHnLHpfDZu69VMm01FlQwWirtK6cHIJAjXYnQEnj6H90Rp2LczNhzJkzS1vo/sV1N5iHP0Y+NE5Q1kypPHwTkOc0XdSlh3WIYwiYFtXu5PsLvYqbCcbjaBP6MbbOjTiwE73uMzp3T3hG3VzoqGWCYQFsDYtuz8/3uhHFEMFKjd0dhvV8q7bdCMgfJ8gm9CaEvnTH4h6Ta/fnermWvkBGveV7hE5lCDknDoKJzNU2giiHZHv77HvQuqnHG2UxLwFWrWNsYtqA8GTUYyxxr7sKxikCKdl079qVDUp99Xb/0CpNx8f1ajVg3VWGPHwY7v0BTITax+z/JG8EolLRua9oyb2uCx827/9F6A+D5bmZaKbImeOzejSslLx7lZkA/8cs1JzbdpgBcXP2cHvXmrWutxiLJkDiKgXOEE/trdSwzYXn5TwWSRCtRx65D3RGKnjA7mPpSpHWmOJz7NpIxgi3CJSGmZAkPp6NjskpIhqPMAD1MjyY6BmlqSXvgNVArNEHegOoZWCwHVgO/0hxM2hUcSq1f1SPoq1N61qXQvw66DjgCYOLLb47lW3Y9OWWFCtDxnbR9w52xv8XyohW+26c/QGx07Z4Tt4k2Em7gslWSQiqvclL+P0cjVy75uwG0a0ARbBBADit9QFVFnsZyLQ3qCyTLi73LGRVzD11PsL6se7pRvRWMNmvmiQKw/4SfTaYF1srWpaDxgVwHoF2l2bufgatZufXyGOqMQW1b4Oim943Fobf81+jhPipKeonMspKrx1S/8iifz7UVXAVh2MebJo8YEQszRg38DzMcK2AxpXFANWA8i2tdVtU++njqXzM655+wblloZYa2s/x8iOO/YMHw4Q4iH5YfIp602tbOTUdYbTw3avhIC0vBsAzwi1kPOvfZeWXSPfqMChAvBboPPsEmu5ST/RFbWF3Wph/MPjKr548wudh29MRdKDqvTvK8ZCA9ymEIs6/nXyXVrPg3WMlVCwuiST+zsd4Aph3G2S051ndEiOqgirG6CVejwGg40YKG4f7jUWxL+Kps69ialit/Fz2+gG5jeZG+PmagxjnYHZtCzrWu4uYV+IQuJXcqlNIFznSTsEsvU2lbQgCbkSp9/CFtZqE4bXz8Oe02/j/rjnSGylT8VlrRa25O64byQYljv6Gvr6kgxcp8FygFcAjMzBaamYZydH5ZnSNBBrzrWeuWP2NfamUM0eGccSbhf3mWeJjm7O1ybYxAJdLqOTTh3AYE+nzhl9nOoF7QSC4eIIDGO0+PFMCr9IltBaNwx7AmhrIvaAOwyct+tJuDT0EKxPhuNfIJWNJ6ub3UT7iGB4xPVzIERA1Mue7UuvLdardWhMqAqFhBEDzFwNwM7b/lJsoRPFoc+WJr8isCLLfiGjzZhpuHmzVfMXwCOUvZnzYBUqHsxx4SAJPwk0PW6qUWkUG3vYCrRb6I/qge9QuYHPTQ5OE9WzQef9HIm7tp6bqywArRM+b7Mm0ldUz/ugebDo9cKGQqm4I3rBZ0FXh/VMdxbH6e/+0snAWdmL36VuLgXAVHko1hPsHe3PO/DVQhUXQQITMMJ2yUajWCmGHqFIyS9gqVqG9E9WdTSkmxs+2h4g+sk5OuPKdczvzm9Yf5oA49lksQuJcWD3M0MaXnvH07xwEsQuJiRWdo0JzPXA0OuMcQ1GPUV5E/rMiNn4yjRPP/HAFP7LlfKmkguFfcOsYyXhkNQ2zow9Q4+F12qXiHJGT5ShL4dZWiSU6PCgAmh/cLqFSD6+ILK4wOBRz9gqlck1pocJJazkP8FaXadW6+pfIWSeVSKQcsZDIXySu453ZsNxAtHOp1/TgtQZFpuarIVSGbUIpwqUacoL3NcuxuBhznHVLUp6WVvxNks4Z5O4wWH4c3tnE7qrx8r0qcVeuFrTRw96ICkDHqWNEr+gZrIlKAed9KIqGqMzjBZK+QtXDMECCXaS0nIab+ZlRNKFpWiqObLKPkSpLKZ5owcuO7EOudaeI6xc50wa7z6FBNMd2oCS9JWt14bbtMLnPXvZ+iMXMgEP929qnFtKZzeRcvkkvnMbaGrqsb/yiQVX5wan6rUzunAWPdTVgcqJT1Pi54G/OQxiVlcyvg4/PRAfV+8RLW0qeHhJExUVPIS8mz5fE3MIvLNgBHCqsQe/GnLMBV2aUqH1l5o1WsvVTWYJYWZHKZbxpSixxkx1qLeHO+W2NHGJHL6rWOJctmVuW9IDusIjeGC/L4t1ZygZlkKgpq848PIhMetJxD9j8Aq6GK3gxlXax7dpQ2y/J53kgHbDEvslD5x6MlswhgWcwC9hDcb/gYYTr8BmrZd0LtvCzrOJAYsCPObZbZPqOO37gbykhRhJ2FQv0+Lvp+lj/M5OoRmHtrTPjqNaDVmDncSPTIajXjAItkRxJLJboacSeEsGsJvSD0H0xgUhzhOfK0QepXXLfzG4aX/ow7we9pOXw3G7ydfdd9iB1yCiIICaW3SAavL2zy/dHMb5/0a0WxMza89pRW8KMZ/GQSxZOS2Ek8fJ954mEbJv8c5ZrzKyC9fbO89FsZmHimnBNZBlGyNrKckhBywYcHI/k4ytgkWMpFmYiNxV8j0WVmw1NDXuF/FCnRHHnexgRiVoZU8SWtnBWAqz4gZt3Z9ehoGXYKWXjS8eG0bWX6ueeNYrNKND5b1zXEd3SlN1UTqrtiqa2NKFAht0DlsMxYqweGTBMk4h06w=='))
|
nilq/baby-python
|
python
|
import pytest
from time import time
from bitmex_async_rest import BitMEXRestApi
@pytest.fixture
def testnet():
return BitMEXRestApi('testnet')
async def test_throttle(testnet: BitMEXRestApi):
# for i in range(120):
# funding = await testnet.funding(count=1)
# assert i == 119
assert True
async def test_order_book_L2(testnet: BitMEXRestApi):
# book = await testnet.order_book_L2('XBTUSD', 5)
# assert len(book) == 10
assert True
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unsupervised Kernel Regression (UKR) for Python.
Implemented as a scikit-learn module.
Author: Christoph Hermes
Created on Januar 16, 2015 18:48:22
The MIT License (MIT)
Copyright (c) 2015 Christoph Hermes
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from scipy.optimize import minimize
import sklearn
from sklearn import decomposition, manifold
from scipy.linalg import sqrtm
# own modules
from ukr_core import (ukr_bp, ukr_dY, ukr_E, ukr_project,
ukr_backproject_particles)
import rprop
# possible UKR kernels: tuple(kernel, kernel derivative)
try: # try using numexpr
import numexpr as ne
gaussian = (lambda x: ne.evaluate('exp(-.5 * x)'), lambda x: ne.evaluate('-.5 * exp(-.5 * x)'))
quartic = (lambda x: np.where(x<1, (1. - x)**2, np.zeros_like(x)), lambda x: np.where(x<1, -2. * (1. - x), np.zeros_like(x)))
student_n = (lambda x, n: ne.evaluate('(1. + x/n)**(-(n+1.)/2.)'), lambda x, n: ne.evaluate('-(n+1.)/2. * n**((n+1.)/2.) * (x+n)**(-(n+1.)/2.-1.)') )
except ImportError:
gaussian = (lambda x: np.exp(-.5 * x), lambda x: -.5 * np.exp(-.5 * x))
quartic = (lambda x: np.where(x<1, (1. - x)**2, np.zeros_like(x)), lambda x: np.where(x<1, -2. * (1. - x), np.zeros_like(x)))
student_n = (lambda x, n: (1. + x/n)**(-(n+1.)/2.), lambda x, n: -(n+1.)/2. * n**((n+1.)/2.) * (x+n)**(-(n+1.)/2.-1.) )
student_1 = (lambda x: student_n[0](x, 1), lambda x: student_n[1](x, 1))
student_2 = (lambda x: student_n[0](x, 2), lambda x: student_n[1](x, 2))
student_3 = (lambda x: student_n[0](x, 3), lambda x: student_n[1](x, 3))
student_9 = (lambda x: student_n[0](x, 9), lambda x: student_n[1](x, 9))
student_k = lambda k: (lambda x: student_n[0](x, k), lambda x: student_n[1](x, k))
class UKR(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
"""Unsupervised Kernel Regression (UKR)
Parameters
----------
n_components : int
Manifold dimension, usually in {1,2,3}.
kernel : str or tuple(k : func(x), k_der : func(x))
UKR kernel `k` and its derivative `k_der`. A few examples are included
in this module: gaussian, quartic and student_{1,2,3,9}.
metric : {L1, L2} or float
Distance metric.
L1: cityblock/manhattan; L2: euclidean
float : arbitrary Minkowsky
n_iter : int
Maximum number of iterations for training the UKR model.
lko_cv : int
Leave-k-out cross validation for training the UKR model.
embeddings : list of initial manifold generators
If None, the initial embedding is set to TSNE and then PCA (if TSNE is
not available).
Good choices are:
* sklearn.decomposition.PCA(`n_components`)
* sklearn.decomposition.KernelPCA(`n_components`, kernel='rbf')
* sklearn.manifold.locally_linear.LocallyLinearEmbedding(n_neighbors, `n_components`, method='modified')
* sklearn.manifold.MDS(n_components=`n_components`, n_jobs=-1),
* sklearn.manifold.TSNE(n_components=`n_components`),
enforceCycle : bool
Are the high-dimensional points sampled from a cyclic data, e.g. a
rotating object or a walking person? In this case the UKR tries to
maintain a close spatial distance of subsequent manifold points.
verbose : bool
Print additional information esp. during the training stage.
Attributes
----------
X : np.ndarray, shape=(N,D)
High-dimensional point list for UKR training.
Y : np.ndarray, shape=(N,n_components)
Low-dimensional respresentation of `X`.
"""
def __init__(self, n_components=2, kernel=gaussian, metric='L2', lko_cv=1, n_iter=1000, embeddings=None, enforceCycle=False, verbose=True):
if isinstance(kernel, basestring):
if kernel.lower() == 'gaussian':
self.k, self.k_der = gaussian
elif kernel.lower() == 'quartic':
self.k, self.k_der = quartic
elif kernel.lower() == 'student_1':
self.k, self.k_der = student_1
elif kernel.lower() == 'student_2':
self.k, self.k_der = student_2
elif kernel.lower() == 'student_3':
self.k, self.k_der = student_3
elif kernel.lower() == 'student_9':
self.k, self.k_der = student_9
else:
self.k, self.k_der = kernel
if isinstance(metric, basestring):
assert metric in ['L1', 'L2'], "failed condition: metric in ['L1', 'L2']"
if metric == 'L1': self.metric = 1.
elif metric == 'L2': self.metric = 2.
else:
self.metric = metric
self.n_components = n_components
self.lko_cv = lko_cv
self.n_iter = n_iter
self.enforceCycle = enforceCycle
self.verbose = verbose
if embeddings is None:
try:
self.embeddings = [manifold.TSNE(n_components=self.n_components)]
except AttributeError:
print 'ukr.py::Warning: TSNE not found in the sklearn packages. Try PCA instead.'
self.embeddings = [decomposition.PCA(n_components=self.n_components)]
else:
self.embeddings = embeddings
self.X = None
self.Y = None
self.B = None
pass
def fit(self, X, y=None):
"""Train the UKR model.
Parameters
----------
X : np.ndarray, shape=(N,D)
Sample set with `N` elements and `D` dimensions.
Returns
-------
UKR model object.
"""
X = np.atleast_2d(X)
###########################
# find an initial embedding
Y = None
embed_ = None
error = np.inf
for embeddingI, embedding in enumerate(self.embeddings):
if self.verbose:
print 'Try embedding %2d/%2d: %s' % (embeddingI+1, len(self.embeddings), embedding.__class__.__name__)
try:
Y_init_ = embedding.fit_transform(X)
Y_init_ = Y_init_ - Y_init_.mean(axis=0) # center around zero
except:
continue
# normalize initial hypothesis to Y.T * Y = I
Y_init_ = Y_init_.dot(np.linalg.pinv(sqrtm(Y_init_.T.dot(Y_init_))))
# optimze the scaling factor by using least squares
def residuals(p, X_, Y_):
B, P = ukr_bp(Y_ * p, self.k, self.k_der, self.lko_cv, metric=self.metric)
return ukr_E(X_, B)
p0 = np.ones((1,self.n_components))
sol = minimize(residuals, p0, method='Nelder-Mead', args=(X, Y_init_))
if sol['x'].max() < 1000:
Y_init_ = Y_init_ * sol['x']
else:
print 'UKR::warning: scaling initialization failed'
Y_init_ = Y_init_ * 20
# final projection error estimation
B, P = ukr_bp(Y_init_, self.k, self.k_der, self.lko_cv, metric=self.metric)
err_ = ukr_E(X, B)
if self.verbose:
print ' Error: %f' % err_
# store the results if they're an improvement
if err_ < error:
error = err_
Y = Y_init_
embed_ = embedding
# Summary:
if self.verbose:
print '=> using embedding', embed_.__class__.__name__
######################
# Refine the UKR model
iRpropPlus = rprop.iRpropPlus()
for iter in xrange(self.n_iter):
if self.verbose and iter % 10 == 0:
print 'UKR iter %5d, Err=%9.6f' % (iter, iRpropPlus.E_prev)
# derivative of X_model w.r.t. to the error gradient
B, P = ukr_bp(Y, self.k, self.k_der, self.lko_cv, metric=self.metric)
if self.enforceCycle and iter % 20 < 10 and iter < self.n_iter/2:
# close spatial distance of subsequent manifold points every
# ten iterations for the first half of the full training
dY = -np.diff(np.vstack([Y, Y[0]]), axis=0)
else:
dY = ukr_dY(Y, X, B, P)
# reconstruction error
E_cur = ukr_E(X, B) / X.shape[1]
Y = iRpropPlus.update(Y, dY, E_cur)
# store training results
self.X = X # original data
self.Y = Y # manifold points
return self
def fit_transform(self, X, y=None):
"""Train the UKR model and return the low-dimensional samples.
Parameters
----------
X : np.ndarray, shape=(N,D)
Sample set with `N` elements and `D` dimensions.
Returns
-------
Y : np.ndarray, shape=(N, `n_components`)
Low-dimensional representation of `X`.
"""
X = np.atleast_2d(X)
self.fit(X, y)
return self.Y
def transform(self, X, n_particle_iter=100):
"""Project each sample in `X` to the embedding.
Uses a particle set for the optimization.
Parameters
----------
X : np.ndarray, shape=(N,D)
Sample set with `N` elements and `D` dimensions.
Returns
-------
Y : np.ndarray, shape=(N, `n_components`)
Low-dimensional representation of `X`.
"""
X = np.atleast_2d(X)
Y = ukr_backproject_particles(self.Y, self.X, self.k, self.k_der, self.metric, X,
n_particles=self.Y.shape[0], n_iter=n_particle_iter)
return Y
def predict(self, Y):
"""Project a set of manifold points into the orignal space.
Parameters
----------
Y : np.ndarray, shape=(N,`n_components`)
Arbitrary points on the manifold.
Returns
-------
X : np.ndarray, shape=(N,D)
Corresponding samples in the high-dimensional space.
"""
assert self.Y is not None, "untrained UKR model"
Y = np.atleast_2d(Y)
assert Y.shape[1] == self.n_components, \
"failed condition: Y.shape[1] == self.n_components"
B, _ = ukr_bp(self.Y, self.k, self.k_der, diagK=-1, Y=Y, metric=self.metric)
return ukr_project(self.X, B)
def predict_proba(self, Y):
"""Kernel density estimate for each sample.
Parameters
----------
Y : np.ndarray, shape=(N,`n_components`)
Arbitrary points on the manifold.
Returns
-------
p : array-like, shape=(N,)
Estimated density value for each sample.
"""
assert self.Y is not None, "untrained UKR model"
Y = np.atleast_2d(Y)
assert Y.shape[1] == self.n_components, \
"failed condition: Y.shape[1] == self.n_components"
B, _ = ukr_bp(self.Y, self.k, self.k_der, diagK=-1, Y=Y, bNorm=False, metric=self.metric)
return B.mean(axis=0)
pass
|
nilq/baby-python
|
python
|
from typing import *
@overload
def check_array_indexer(array: geopandas.array.GeometryArray, indexer: numpy.ndarray):
"""
usage.geopandas: 4
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray, indexer: slice[None, int, None]
):
"""
usage.geopandas: 2
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray, indexer: slice[int, int, int]
):
"""
usage.geopandas: 2
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray, indexer: slice[None, None, None]
):
"""
usage.geopandas: 1
"""
...
@overload
def check_array_indexer(array: geopandas.array.GeometryArray, indexer: List[int]):
"""
usage.geopandas: 5
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray, indexer: slice[int, None, int]
):
"""
usage.geopandas: 1
"""
...
@overload
def check_array_indexer(array: geopandas.array.GeometryArray, indexer: int):
"""
usage.geopandas: 1
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray, indexer: slice[int, None, int]
):
"""
usage.geopandas: 1
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray,
indexer: slice[numpy.int64, numpy.int64, numpy.int64],
):
"""
usage.geopandas: 1
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray, indexer: slice[None, None, None]
):
"""
usage.geopandas: 1
"""
...
@overload
def check_array_indexer(array: geopandas.array.GeometryArray, indexer: list):
"""
usage.geopandas: 1
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray,
indexer: pandas.core.arrays.boolean.BooleanArray,
):
"""
usage.geopandas: 2
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray,
indexer: pandas.core.arrays.integer.IntegerArray,
):
"""
usage.geopandas: 2
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray,
indexer: List[Union[pandas._libs.missing.NAType, int]],
):
"""
usage.geopandas: 2
"""
...
@overload
def check_array_indexer(
array: geopandas.array.GeometryArray, indexer: slice[int, int, int]
):
"""
usage.geopandas: 1
"""
...
def check_array_indexer(array: geopandas.array.GeometryArray, indexer: object):
"""
usage.geopandas: 27
"""
...
|
nilq/baby-python
|
python
|
import pandas as pd
from scipy import stats
def my_oneway_anova(x):
my_data = pd.read_csv(x)
normal = my_data[my_data['condition']=='condition_a']['response_time']
degraded = my_data[my_data['condition']=='condition_b']['response_time']
return stats.f_oneway(normal, degraded)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import argparse
from argparse import RawTextHelpFormatter
import bammend as bm
def parse_args():
"""Parse command-line arguments"""
summary = ('Remove pulses from reads in Pacbio Bam. Annotation indices \n'
'are indexed from the beginning of the ZMW read (i.e. query \n'
'indexing).')
parser = argparse.ArgumentParser(prog='bammend',
description=summary,
formatter_class=RawTextHelpFormatter)
parser.add_argument('subreads',
help='Path to subread bam')
parser.add_argument('bammend_csv',
help=('Path to CSV with scheme \n'
'| ZMW | Annotation Start Index '
'| Annotation End Index |'))
parser.add_argument('output_subreads',
help='Path to output bam')
args = parser.parse_args()
return args.subreads, args.bammend_csv, args.output_subreads
def main():
"""Bammend a subreadset."""
read_bam_path, annotation_csv_path, out_bam_path = parse_args()
bm.reject_basecalls(read_bam_path, annotation_csv_path, out_bam_path)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""
The API, responsible for receiving the files, submitting jobs, and getting their results.
"""
import asyncio
from contextlib import suppress
import os
from typing import Optional
from uuid import UUID, uuid4
from fastapi import (
Depends,
FastAPI,
File,
HTTPException,
Path,
Query,
Response,
UploadFile,
)
from pydantic import confloat
from mognet_demo.config import DemoConfig
from mognet_demo.models import Job, Upload, UploadJobResult
from mognet_demo.mognet_app import app as mognet_app
from mognet_demo.s3 import get_s3_client
from mognet_demo.tasks import process_document_upload
app = FastAPI(
title="Mognet Demo API",
description='API to demonstrate how to use Mognet in a "real world" application.',
)
# We need to connect the Mognet application
# to the backends (Redis and RabbitMQ) before we can use it.
#
# Here we leverage FastAPI (or rather, Starlette)'s event system
# to do this.
@app.on_event("startup")
async def connect_mognet_app_on_startup():
await mognet_app.connect()
# And for completeness, we close it here too.
@app.on_event("shutdown")
async def close_mognet_app_on_shutdown():
await mognet_app.close()
@app.post("/jobs", response_model=Job)
async def upload_document(
file: UploadFile = File(...),
config: DemoConfig = Depends(DemoConfig.instance),
):
"""
Upload a file to have it be processed in the background.
This will return an object with a `job_id` which can be used to then get the result.
"""
# Upload the file to S3
upload = Upload(
upload_id=uuid4(),
file_name=os.path.basename(os.path.normpath(file.filename)),
)
async with get_s3_client(config) as s3:
await s3.put_object(
Bucket=config.s3.bucket_name,
Key=f"uploads/{upload.upload_id}",
Body=await file.read(),
)
# Here, we create a Request to run the task.
# This will create an object holding:
#
# - It's ID
# - The task to run
# - The arguments
#
# It is possible to configure this object
# with more parameters (check the mognet.Request class),
# either through it's constructor or through it's fields.
req = mognet_app.create_request(process_document_upload, upload=upload)
# Here we _submit_ the Request to be run on the background.
# This returns a Result object.
# Each submitted Request has a corresponding Result on the Result Backend.
# If one were to await `res`, then the caller would wait until the task finished
# running, and the result would hold either the result, or it would raise an exception
# (in case of failure).
res = await mognet_app.submit(req)
# Here, we return an object that can be used for client-side tracking.
# We don't store this in a database, as that's beyond the scope of this demo.
#
# However, if you need to store this information (for auditing purposes),
# you can create a database table / collection that holds information about each Request
# that was submitted.
return Job(job_id=res.id)
JobWaitTime = confloat(gt=0, le=30)
@app.get("/jobs/{job_id}/result", response_model=UploadJobResult)
async def get_job_result(
job_id: UUID = Path(...),
wait: Optional[JobWaitTime] = Query(
None,
description="**ADVANCED**: Optionally delay the return of this endpoint, in case the job isn't yet finished.",
),
) -> UploadJobResult:
"""
Get the result of a job.
This endpoint can be used to poll for the result of a job.
A good way to see the `wait` parameter in action is to stop the Mognet Worker before launching a task (because they are very fast).
"""
# To get the Result for a job, one should do so via the app's
# `result_backend.get()` method. This will fetch the result from it.
#
# Note that there's no guarantee of the persistence of the Result Backend, assuming
# you're using Redis (due to key eviction policies and TTLs).
# By default, Mognet will keep results for 7 days.
# See the mognet.AppConfig class for more details.
#
# Therefore, this method may return None, and we should handle it accordingly.
res = await mognet_app.result_backend.get(job_id)
if res is None:
raise HTTPException(404, f"Job {job_id} not found")
# **ADVANCED**: We can do a small optimization: instead of the client polling with high frequency,
# we can instead delay the return of this endpoint for a few seconds, in case the job isn't done.
# We can use the `wait()` function for this. This results in less HTTP traffic, at the expense of
# more Redis traffic. You can use the poll argument to slow down the polling period (default is 0.1s, which
# is optimized for performance-sensitive scenarios).
#
# Note that we must handle `asyncio.TimeoutError` ourselves, which happens if the job didn't finish
# during the wait period.
#
# Bear in mind that this also has higher resource and timeout requirements for the server, because you are keeping
# connections open for long periods of time. You should take care not to allow flooding of your server.
if not res.done and wait is not None:
with suppress(asyncio.TimeoutError):
await res.wait(timeout=wait, poll=1)
# We use a wrapper class that represents both the job and it's return value.
# That way, it's easier to represent with OpenAPI schemas, and also easier
# to handle for the client.
if not res.done:
return UploadJobResult(job_id=res.id, job_status=res.state, result=None)
# If this is False, it means that the job's result holds an Exception.
# We decide not to retrieve it here, and instead just tell the client that the job failed.
if not res.successful:
return UploadJobResult(job_id=res.id, job_status=res.state, result=None)
# If we get here, the job finished successfully, so get the value
# and return it to the client.
value = await res.get()
return UploadJobResult(job_id=res.id, job_status=res.state, result=value)
@app.post("/jobs/{job_id}/revoke", status_code=204, response_class=Response)
async def revoke_job(job_id: UUID = Path(...)):
"""Revoke (abort) a job, preventing it from running."""
# Revoking a job is done via the `revoke()` method on the Mognet app.
# It will do the following:
#
# - Mark the result as REVOKED on the Result Backend (Redis)
# - Tell the Mognet Workers to cancel the running task, if any
# - Do the same for the subtasks
#
# If a revoked task is received by a Worker, it is discarded. Likewise,
# if a subtask of a revoked task is received, it is also revoked.
#
# If you call the `get_job_result` endpoint after calling this,
# you will see that it is stored as REVOKED, unless it already finished.
# You can pass `force=True` to this method if you really want to enforce it.
await mognet_app.revoke(job_id)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Safe
a commandline password manager using AES encryption, written in python.
:: author Erick Daniszewski
:: date 05 December 2015
"""
import json
import getpass
import argparse
import struct
from os import mkdir, remove
from os.path import join as join_path
from os.path import isdir, isfile, expanduser, getsize
from random import randint
from Crypto.Cipher import AES
from hashlib import md5, sha256
# Safe Constants
SAFE_ROOT = join_path(expanduser('~'), '.safe')
SAFE_CONFIG = join_path(SAFE_ROOT, 'config.json')
SAFE_META = join_path(SAFE_ROOT, 'meta.json')
SAFES_PATH = join_path(SAFE_ROOT, 'safes')
ALL_SAFES = '__all'
# Define the argument parser
parser = argparse.ArgumentParser(description='Safe :: A minimalistic commandline password manager')
parser.add_argument('-i', '--init', nargs='*', help='initialize a new safe.')
parser.add_argument('-o', '--open', nargs='*', help='open the specified safe. this is needed for both read and write operations.')
parser.add_argument('-c', '--close', nargs='?', const=ALL_SAFES, help='close the specified safe. if no safe is specified, all open safes '
'are closed. safes will not close automatically - it is up to the user '
'to close their safes.')
parser.add_argument('-a', '--add', nargs='*', help='add a new entry to the safe. add can take 0..3 arguments, where the user will be prompted '
'to fill in any missing arguments. the arguments are positional. the order is as follows: '
'(1) entry name, (2) username/id, (3) password')
parser.add_argument('-d', '--delete', help='remove an entry from the safe.')
parser.add_argument('--default', help='set the default safe. the default safe is used by commands, such as open, to determine which safe '
'to use if none is specified.')
parser.add_argument('-D', '--delete_safe', help='remove an existing safe. this removes the safe and all of its entries permanently.')
parser.add_argument('-m', '--modify', nargs='?', const=False, help='modify an existing entry for the open safe. this can be used '
'to change username/password information.')
parser.add_argument('-M', '--modify_safe', nargs='*', help='modify an existing safe. this should be used if one wants to change the '
'master password to a safe, without losing the safe contents.')
parser.add_argument('-u', '--username', action='store_true', help='a flag which, when present, will include username info in an entry\'s output.')
parser.add_argument('-w', '--whole', action='store_true', help='a flag which, when present, will show the full entry (all data).')
parser.add_argument('-e', '--entries', action='store_true', help='show all entries (by name) which exist in the open safe.')
parser.add_argument('-s', '--safes', action='store_true', help='show all safes (by name) which exist.')
parser.add_argument('-f', '--force', action='store_true', help='force an action. typically, this is used with deletes in order to suppress '
'the verification prompt.')
parser.add_argument('-v', '--verbose', action='store_true', help='a flag which toggles the verbosity of safe. if set to true, additional messages '
'will be output, such as verification of action success.')
parser.add_argument('entry', nargs='?', help='the name of the entry in the safe for which the stored information will be retrieved.')
# Define 'clean' state of configuration and metadata files
default_cfg = dict(default_safe=None, verbose=False)
default_meta = dict(safes=[])
# By default, set the verbosity to False. This will get updated based on the value
# stored in the Safe configuration file at runtime.
is_verbose = False
# ========================================================================
# Convenience Methods
# ========================================================================
def get_meta():
""" Get the Safe metadata from the metadata file.
:return: A dictionary containing the metadata stored in the metadata file.
:rtype: dict
"""
with open(SAFE_META, 'r') as f:
meta = json.load(f)
return meta
def set_meta(data):
""" Write a metadata to the metadata file.
This will overwrite any existing metadata which may exist in the file.
:param data: The metadata to write to the metadata file.
:type data: dict
:return: None
"""
with open(SAFE_META, 'w') as f:
json.dump(data, f)
def get_config_value(key):
""" Get a value from the config file.
:param key: The key to search for in the config file.
:return: The value found in the config file, if exists. Otherwise None.
"""
with open(SAFE_CONFIG, 'r') as f:
cfg_data = json.load(f)
if key in cfg_data:
return cfg_data[key]
return None
def overwrite_config(**kwargs):
""" Update the config file based on the specified kwargs.
If a key specified in the kwargs does not exist in the config file, that
key:value pair will be skipped (will not be added to the config file, but
also will not fail).
:param kwargs: The entries in the config JSON to update.
:return: None
"""
with open(SAFE_CONFIG, 'r+') as f:
cfg_data = json.load(f)
for k, v in kwargs.items():
if k in cfg_data:
cfg_data[k] = v
f.seek(0)
json.dump(cfg_data, f)
f.truncate()
def toggle_config_value(to_toggle):
""" Toggle values in the config file based on the specified kwargs.
The only values which may be toggled are boolean values. Attempts to toggle
any other type will be ignored.
:param to_toggle: The entry in the config JSON to toggle.
:return: The new value of the updated config field.
:rtype: bool
"""
result = None
with open(SAFE_CONFIG, 'r+') as f:
cfg_data = json.load(f)
if to_toggle in cfg_data and isinstance(cfg_data[to_toggle], bool):
cfg_data[to_toggle] = not cfg_data[to_toggle]
result = cfg_data[to_toggle]
f.seek(0)
json.dump(cfg_data, f)
f.truncate()
return result
def get_open_safe():
""" Get the name of the Safe that is currently open, if it exists.
There should never be more than one Safe open at a time. If multiple Safes
are found to be open, this will indiscriminately close all of them to prevent
erroneous writes.
:return: The name of the open safe, if any. Otherwise None
"""
meta = get_meta()
unlocked = []
for safe in meta['safes']:
if safe['is_open']:
unlocked.append(safe['name'])
if not unlocked:
return None
if len(unlocked) > 1:
info('more than one safe is open. closing all safes.')
close_safe()
return None
else:
return unlocked[0]
def get_safe_file_paths(safe_name):
""" Get the path for the open and closed Safe files.
:param safe_name: Name of the Safe.
:return: A tuple which contains the path of the open safe file and the path
of the closed safe file.
"""
return join_path(SAFES_PATH, safe_name + '.open'), join_path(SAFES_PATH, safe_name)
def fail(message):
""" A convenience method to exit with a failure message.
:param message: The message to output.
:type message: str
:return: None
"""
exit('\n[FAILED] - {}'.format(message))
def info(message):
""" A convenience method to print out info messages to console.
:param message: The message to output.
:type message: str
:return: None
"""
print '>> {}'.format(message)
def exit_info(message):
""" A convenience method to print out an info message to console and exit.
:param message: The message to output.
:type message: str
:return: None
"""
exit('>> {}'.format(message))
def prompt(message):
""" A convenience method to prompt the used for information.
:param message: The message to output for the prompt.
:type message: str
:return: The value given by the user.
"""
return raw_input(':: {}: '.format(message))
# ========================================================================
# Encryption/Hashing Methods
# ========================================================================
def get_md5_hash(to_hash):
""" Generate an MD5 hash of the given value.
:param to_hash: Value to crate an MD5 hash of.
:type to_hash: str
:return: The hexadecimal representation of the MD5 hash.
"""
return md5(to_hash).hexdigest()
def encrypt_file(password, in_file, out_file):
""" Encrypt the contents of the given file.
Encrypts the contents of the in_file into the out_file.
:param password: The password for the file being encrypted.
:type password: str
:param in_file: The name of the file to encrypt.
:type in_file: str
:param out_file: The name of the file to create, which contains the encrypted data.
:type out_file: str
:return: None
"""
bs = AES.block_size
chunk_size = bs * 1024
key = sha256(password).digest()
iv = ''.join(chr(randint(0, 0xff)) for _ in range(16))
cipher = AES.new(key, AES.MODE_CBC, iv)
file_size = getsize(in_file)
with open(in_file, 'rb') as in_f, open(out_file, 'wb') as out_f:
out_f.write(struct.pack('<Q', file_size))
out_f.write(iv)
while True:
chunk = in_f.read(chunk_size)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += ' ' * (16 - len(chunk) % 16)
out_f.write(cipher.encrypt(chunk))
def decrypt_file(password, in_file, out_file):
""" Decrypt the contents of the given file.
Decrypts the contents of the in_file into the out_file.
:param password: The password for the file being decrypted.
:param in_file: The name of the file to decrypt.
:param out_file: The name of the file to create, which contains the decrypted
data.
:return: None
"""
bs = AES.block_size
chunk_size = bs * 1024
key = sha256(password).digest()
with open(in_file, 'rb') as in_f, open(out_file, 'wb') as out_f:
orig_size = struct.unpack('<Q', in_f.read(struct.calcsize('Q')))[0]
iv = in_f.read(16)
cipher = AES.new(key, AES.MODE_CBC, iv)
while True:
chunk = in_f.read(chunk_size)
if len(chunk) == 0:
break
out_f.write(cipher.decrypt(chunk))
out_f.truncate(orig_size)
# ========================================================================
# Safe Methods
# ========================================================================
def initialize():
""" Initialize Safe, ensuring the Safe directory is created.
This should:
* create the Safe root dir, if it does not exist
* create the safes dir to hold all user Safes, if it doesnt exist
* create the base config and metadata files, if they do not exist
:return: None
"""
if not isdir(SAFE_ROOT):
mkdir(SAFE_ROOT)
if not isdir(SAFES_PATH):
mkdir(SAFES_PATH)
if not isfile(SAFE_CONFIG):
with open(SAFE_CONFIG, 'w') as conf:
json.dump(default_cfg, conf)
if not isfile(SAFE_META):
with open(SAFE_META, 'w') as meta:
json.dump(default_meta, meta)
def initialize_safe(name=None, password=None):
""" Create a new Safe.
Creates a new Safe given a name and password. If no name and password are
provided via commandline args, the user will be prompted for both.
Safes cannot be overwritten. If a user desires to overwrite an existing
Safe with a clean Safe of the same name, the Safe should first be deleted
(using the -R or --remove-safe option) and a new safe initialized.
:param name: The name of the Safe to initialize.
:param password: The password for the new Safe.
:return: None
"""
if name is None:
name = prompt('set safe name')
if not name:
name = 'safe'
if name == ALL_SAFES:
fail('cannot create safe with name "{}". this is a reserved name.'.format(name))
if password is None:
password = getpass.getpass(':: set password for safe "{}": '.format(name))
re_password = getpass.getpass(':: re-enter password: ')
if password != re_password:
fail('entered passwords do not match.')
# load in the data from the meta file
meta = get_meta()
# check if a safe with that name already exists
for safe in meta['safes']:
if safe['name'] == name:
fail('a safe with name "{}" already exists.'.format(name))
# hash the password
password_hash = get_md5_hash(password)
# close the open safe, if there is one
active_safe = get_open_safe()
if active_safe:
set_meta(meta)
close_safe(active_safe)
meta = get_meta()
# create an entry in the meta file
new_safe = {
'name': name,
'hash': password_hash,
'is_open': True
}
meta['safes'].append(new_safe)
set_meta(meta)
# create a safe file for this safe. note that the default behavior for the init
# is to leave that safe open. otherwise, we would encrypt and close.
with open(join_path(SAFES_PATH, name + '.open'), 'w+') as f:
json.dump({'password': password, 'entries': []}, f)
if is_verbose:
info('created safe "{}"'.format(name))
def open_safe(name=None, password=None):
""" Open an existing Safe.
If no Safe is specified, the default Safe will be used. If no default Safe
is specified, the user will be notified of failure and safe will exit. Opening
a Safe will close any other Safe that is open, to prevent erroneous writes.
:param name: Name of the safe to open. If no name is provided, the user will
be prompted for a name.
:param password: Password for the safe to open. If no password is provided, the
user will be prompted for a password.
:return: None
"""
if name is None:
name = get_config_value('default_safe')
if not name:
fail('no safe name provided, and no default safe found.')
info('no safe provided; choosing default ({})'.format(name))
if password is None:
password = getpass.getpass(':: password: ')
# close the open safe, if any
active_safe = get_open_safe()
if active_safe:
close_safe(active_safe)
meta = get_meta()
for safe in meta['safes']:
if safe['name'] == name:
if safe['hash'] == get_md5_hash(password):
if not safe['is_open']:
safe_paths = get_safe_file_paths(name)
decrypt_file(password, safe_paths[1], safe_paths[0])
remove(safe_paths[1])
safe['is_open'] = True
else:
info('the safe is already open.')
else:
fail('password incorrect for safe')
elif safe['is_open']:
close_safe(safe['name'])
set_meta(meta)
if is_verbose:
info('opened safe "{}"'.format(name))
def close_safe(name=None):
""" Close an existing open Safe.
Closes the specified Safe. All Safes will be closed if no specific Safe is
specified. If a specified Safe does not exist, a message may be logged,
otherwise nothing will happen, as a non-existent Safe can technically be
considered a closed Safe.
:param name: The name of the Safe to close.
:return: None
"""
meta = get_meta()
def encrypt_safe(safe_name):
safe_paths = get_safe_file_paths(safe_name)
if isfile(safe_paths[0]):
with open(safe_paths[0], 'r') as s:
data = json.load(s)
password = data['password']
encrypt_file(password, safe_paths[0], safe_paths[1])
remove(safe_paths[0])
if name is None:
for safe in meta['safes']:
if safe['is_open']:
encrypt_safe(safe['name'])
safe['is_open'] = False
else:
for safe in meta['safes']:
if safe['name'] == name:
encrypt_safe(name)
safe['is_open'] = False
break
set_meta(meta)
if is_verbose:
info('closed safe "{}"'.format(name))
def set_default(safe_name):
""" Set the default Safe.
This updates the 'default_safe' field in the Safe config file.
:param safe_name: The name of the safe to be the default.
:return: None
"""
meta = get_meta()
safe_exists = False
for safe in meta['safes']:
if safe['name'] == safe_name:
safe_exists = True
break
if safe_exists:
overwrite_config(default_safe=safe_name)
if is_verbose:
info('"{}" is now the default safe'.format(safe_name))
else:
fail('could not set default safe. "{}" does not exist.'.format(safe_name))
def add_entry(name=None, username=None, password=None):
""" Add an entry to the currently open Safe.
An entry consists of:
1. name (entry identifier, i.e. 'github')
2. username (user identifier; be it a username or email)
3. password (the password associated with the given username)
Currently, only one username/password can be associated with a given entry
identifier. If no Safe is open, adding an entry fails. If an entry name is
already used in the open Safe, adding an entry fails.
:param name: Name of the Safe entry.
:param username: Username associated with the entry.
:param password: Password associated with the entry.
:return: None
"""
if name is None:
name = prompt('entry name')
if username is None:
username = prompt('username')
if password is None:
password = prompt('password')
entry_data = {
'name': name,
'username': username,
'pass': password
}
safe = get_open_safe()
if safe:
safe_file = join_path(SAFES_PATH, safe + '.open')
with open(safe_file, 'r') as f:
safe_data = json.load(f)
for entry in safe_data['entries']:
if entry['name'] == name:
fail('entry "{}" already exists'.format(name))
safe_data['entries'].append(entry_data)
with open(safe_file, 'w') as f:
json.dump(safe_data, f)
else:
exit_info('no open safes found. open a safe with the --open option.')
if is_verbose:
info('added entry "{}" to safe "{}"'.format(name, safe))
def get_entry(name, show_all=False, show_username=False):
""" Retrieves an entry by name from the currently open Safe.
By default, this will print out only the password associated with the
specified entry. Additional commandline flags can be added in order to
show additional information:
* -u, --username -> show the username along with the password
* -w, --whole -> show the whole json entry
:param name: Name of the entry to retrieve information for.
:param show_all: Flag designating that all JSON info be shown.
:param show_username: Flag designating that username info be shown.
:return: None
"""
safe = get_open_safe()
if safe:
safe_file = join_path(SAFES_PATH, safe + '.open')
with open(safe_file, 'r') as f:
safe_data = json.load(f)
found = False
for entry in safe_data['entries']:
if entry['name'] == name:
found = True
if show_all:
print json.dumps(entry, indent=2, separators=(',', ':\t'))
elif show_username:
print entry['username']
print entry['pass']
else:
print entry['pass']
if not found:
fail('no entry found with name "{}".'.format(name))
else:
exit_info('no open safes found. open a safe with the --open option.')
def modify_entry(name=None):
""" Modify an entry in the currently opened Safe.
:param name: The name of the entry to modify.
:return: None
"""
safe = get_open_safe()
if safe:
if name is None:
name = prompt('entry to modify')
info('leave a prompt blank to leave the record unchanged.')
new_username = prompt('new username')
new_password = prompt('new password')
if not new_username and not new_password:
exit_info('no fields specified for modification.')
safe_file = join_path(SAFES_PATH, safe + '.open')
with open(safe_file, 'r') as f:
safe_data = json.load(f)
for entry in safe_data['entries']:
if entry['name'] == name:
if new_password:
entry['pass'] = new_password
if new_username:
entry['username'] = new_username
break
with open(safe_file, 'w') as f:
json.dump(safe_data, f)
else:
exit_info('no open safes found. open a safe with the --open option.')
if is_verbose:
info('successfully modified "{}" in safe "{}"'.format(name, safe))
def modify_safe(name=None, password=None):
""" Modify a Safe.
:param name: The name of the Safe to modify.
:return: None
"""
if name is None:
name = prompt('safe to modify')
if password is None:
password = getpass.getpass(':: password: ')
meta = get_meta()
found = False
for safe in meta['safes']:
if safe['name'] == name:
found = True
if safe['hash'] == get_md5_hash(password):
info('leave a prompt blank to leave the record unchanged.')
new_password = getpass.getpass(':: new password: ')
re_rew_password = getpass.getpass(':: re-enter new password: ')
if new_password != re_rew_password:
fail('entered passwords do not match.')
safe_paths = get_safe_file_paths(name)
if not safe['is_open']:
decrypt_file(password, safe_paths[1], safe_paths[0])
with open(safe_paths[0], 'r') as f:
file_data = json.load(f)
file_data['password'] = new_password
with open(safe_paths[0], 'w') as f:
json.dump(file_data, f)
if not safe['is_open']:
encrypt_file(new_password, safe_paths[0], safe_paths[1])
safe['hash'] = get_md5_hash(new_password)
else:
fail('password incorrect for safe')
if not found:
exit_info('safe with name "{}" not found.'.format(name))
set_meta(meta)
if is_verbose:
info('successfully modified safe "{}"'.format(name))
def delete_entry(name):
""" Delete an entry from the currently open Safe.
:param name: Name of the entry to delete.
:type name: str
:return: None
"""
safe = get_open_safe()
if safe:
safe_file = get_safe_file_paths(safe)[0]
with open(safe_file, 'r') as f:
safe_data = json.load(f)
entry_list = safe_data['entries']
entry_list[:] = [x for x in entry_list if not x['name'] == name]
with open(safe_file, 'w') as f:
json.dump(safe_data, f)
else:
exit_info('no open safes found. open a safe with the --open option.')
if is_verbose:
info('successfully removed entry "{}"'.format(name))
def delete_safe(name, force=False):
""" Delete a Safe and all of its contents.
:param name: The name of the safe to delete.
:param force: Flag which designates whether to prompt for validation or not.
(default: False)
:return: None
"""
if not force:
verify = prompt('delete safe "{}" and all of its contents? (y/N)'.format(name)) or 'n'
if verify.lower() == 'n':
exit_info('aborting safe delete.')
safe_paths = get_safe_file_paths(name)
if isfile(safe_paths[0]):
remove(safe_paths[0])
if isfile(safe_paths[1]):
remove(safe_paths[1])
meta = get_meta()
safes = meta['safes']
safes[:] = [x for x in safes if not x['name'] == name]
set_meta(meta)
if is_verbose:
info('successfully deleted safe "{}"'.format(name))
def list_entries():
""" List all entries in the Safe that is currently open.
:return: None
"""
safe = get_open_safe()
if safe:
safe_file = get_safe_file_paths(safe)[0]
with open(safe_file, 'r') as f:
safe_data = json.load(f)
if len(safe_data['entries']) == 0:
exit_info('no entries in the safe "{}".'.format(safe))
entries = [entry['name'] for entry in safe_data['entries']]
print '\n '.join(['Entries:'] + entries)
else:
exit_info('no open safes found. open a safe with the --open option.')
def list_safes():
""" List all initialized Safes.
The Safe that is currently open will be denoted with a '*' next to the
name. By design, either 0 or 1 Safe should be open at any given time, so
there should never be more than one safe marked as open.
:return: None
"""
meta = get_meta()
safes = []
for safe in meta['safes']:
name = ''
if safe['is_open']:
name += '* '
name += safe['name']
safes.append(name)
if len(safes) == 0:
exit_info('no safes exist.')
print '\n '.join(['Safes:'] + safes)
# ========================================================================
# Safe Main
# ========================================================================
if __name__ == '__main__':
# initialize Safe and get any arguments passed to it.
initialize()
args = parser.parse_args()
# ---------------------------------
# Get/Set the verbosity of Safe
# ---------------------------------
if args.verbose:
is_verbose = toggle_config_value('verbose')
info('set verbosity to {}'.format(is_verbose))
else:
is_verbose = get_config_value('verbose')
# ---------------------------------
# Initialize a new Safe
# ---------------------------------
if args.init is not None:
count = len(args.init)
if count == 0:
initialize_safe()
elif count == 1:
initialize_safe(name=args.init[0])
elif count == 2:
initialize_safe(name=args.init[0], password=args.init[1])
else:
parser.error('too many arguments given for --init. (accepts 0, 1, or 2 arguments)')
# ---------------------------------
# Open a Safe
# ---------------------------------
if args.open is not None:
count = len(args.open)
if count == 0:
open_safe()
elif count == 1:
open_safe(name=args.open[0])
elif count == 2:
open_safe(name=args.open[0], password=args.open[1])
else:
parser.error('too many arguments given for --open. (accepts 0, 1, or 2 arguments)')
# ---------------------------------
# Add data to a Safe
# ---------------------------------
if args.add is not None:
count = len(args.add)
if count == 0:
add_entry()
elif count == 1:
add_entry(name=args.add[0])
elif count == 2:
add_entry(name=args.add[0], username=args.add[1])
elif count == 3:
add_entry(name=args.add[0], username=args.add[1], password=args.add[2])
else:
parser.error('too many arguments given for --add. (accepts 0, 1, 2, or 3 arguments)')
# ---------------------------------
# Modify an entry from a Safe
# ---------------------------------
if args.modify is not None:
if args.modify:
modify_entry(args.modify)
else:
modify_entry()
# ---------------------------------
# Modify a Safe
# ---------------------------------
if args.modify_safe is not None:
count = len(args.modify_safe)
if count == 0:
modify_safe()
elif count == 1:
modify_safe(name=args.modify_safe[0])
elif count == 2:
modify_safe(name=args.modify_safe[0], password=args.modify_safe[1])
else:
parser.error('too many arguments given for --modify_safe. (accepts 0, 1, or 2 arguments)')
# ---------------------------------
# Delete data from a Safe
# ---------------------------------
if args.delete:
delete_entry(args.delete)
# ---------------------------------
# Delete a Safe
# ---------------------------------
if args.delete_safe:
delete_safe(args.delete_safe, args.force)
# ---------------------------------
# Set the default Safe
# ---------------------------------
if args.default:
set_default(args.default)
# ---------------------------------
# Close an open Safe
# ---------------------------------
if args.close:
if args.close == ALL_SAFES:
close_safe()
else:
close_safe(args.close)
# ---------------------------------
# Lookup info from a Safe
# ---------------------------------
if args.entry:
get_entry(args.entry, show_all=args.whole, show_username=args.username)
# ---------------------------------
# Lookup entries in a Safe
# ---------------------------------
if args.entries:
list_entries()
# ---------------------------------
# Lookup all Safes
# ---------------------------------
if args.safes:
list_safes()
|
nilq/baby-python
|
python
|
# coding: utf-8
from __future__ import absolute_import
from unittest.mock import patch
from xcube_hub import api
from xcube_hub.controllers.callbacks import put_callback_by_cubegen_id
from xcube_hub.models.callback import Callback
from test import BaseTestCase
class TestCallbacksController(BaseTestCase):
"""CallbacksController integration test stubs"""
def test_put_callback_by_job_id(self):
"""Test case for put_callback_by_job_id
Add a callback for a job
"""
callback = Callback(state={'error': 'dasds'}, sender='on_end')
res = put_callback_by_cubegen_id(body=callback.to_dict(), cubegen_id='test_id', token_info={
'access_token': 'dfevgdf',
'user_id': 'helge',
'email': 'helge@mail.org'
})
expected = ({'progress': [{'sender': 'on_end', 'state': {'error': 'dasds'}}]}, 200)
self.assertEqual(expected, res)
# Test whether the controller returns an error when the service raises an exception
def side_effect(user_id, email, cubegen_id, value):
raise api.ApiError(400, 'test')
with patch('xcube_hub.core.callbacks.put_callback') as p:
p.side_effect = side_effect
res = put_callback_by_cubegen_id(body=callback.to_dict(), cubegen_id='test_id', token_info={
'access_token': 'dfevgdf',
'user_id': 'helge',
'email': 'helge@mail.org'
})
self.assertEqual(400, res[1])
self.assertEqual('test', res[0]['message'])
self.assertGreater(len(res[0]['traceback']), 0)
if __name__ == '__main__':
import unittest
unittest.main()
|
nilq/baby-python
|
python
|
"""AI Engines
Here is a set of AI- and ML-patterns for adavanced research of business data.
"""
|
nilq/baby-python
|
python
|
from unittest.mock import MagicMock
class AsyncMock(MagicMock):
async def __call__(self, *args, **kwargs):
return super().__call__(self, *args, **kwargs)
|
nilq/baby-python
|
python
|
from django.db import models
from django.test import TestCase
from django_fsm import FSMField, transition, can_proceed
class TestExceptTargetTransitionShortcut(models.Model):
state = FSMField(default="new")
@transition(field=state, source="new", target="published")
def publish(self):
pass
@transition(field=state, source="+", target="removed")
def remove(self):
pass
class Meta:
app_label = "testapp"
class Test(TestCase):
def setUp(self):
self.model = TestExceptTargetTransitionShortcut()
def test_usecase(self):
self.assertEqual(self.model.state, "new")
self.assertTrue(can_proceed(self.model.remove))
self.model.remove()
self.assertEqual(self.model.state, "removed")
self.assertFalse(can_proceed(self.model.remove))
|
nilq/baby-python
|
python
|
# Problem: https://www.hackerrank.com/challenges/py-check-strict-superset/problem
set_A = set(input().split())
n = int(input())
ind = 0
for _ in range(n):
set_n = set(input().split())
union_set = set_A.union(set_n)
if (union_set == set_A) and (len(set_A) > len(set_n)):
ind += 1
if ind == n:
print(True)
else: print(False)
|
nilq/baby-python
|
python
|
import sys
import os
import os.path
import glob
def compareOutputs( expected, actual, message ):
expected = expected.strip().replace('\r','').split('\n')
actual = actual.strip().replace('\r','').split('\n')
diff_line = 0
max_line_to_compare = min( len(expected), len(actual) )
for index in xrange(0,max_line_to_compare):
if expected[index].strip() != actual[index].strip():
diff_line = index + 1
break
if diff_line == 0 and len(expected) != len(actual):
diff_line = max_line_to_compare+1
if diff_line == 0:
return None
def safeGetLine( lines, index ):
index += -1
if index >= len(lines):
return ''
return lines[index].strip()
return """ Difference in %s at line %d:
Expected: '%s'
Actual: '%s'
""" % (message, diff_line,
safeGetLine(expected,diff_line),
safeGetLine(actual,diff_line) )
def safeReadFile( path ):
try:
return file( path, 'rt' ).read()
except IOError, e:
return '<File "%s" is missing: %s>' % (path,e)
def runAllTests( jsontest_executable_path, input_dir = None ):
if not input_dir:
input_dir = os.getcwd()
tests = glob.glob( os.path.join( input_dir, '*.json' ) )
failed_tests = []
for input_path in tests:
print 'TESTING:', input_path,
pipe = os.popen( "%s %s" % (jsontest_executable_path, input_path) )
process_output = pipe.read()
status = pipe.close()
base_path = os.path.splitext(input_path)[0]
actual_output = safeReadFile( base_path + '.actual' )
actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
file(base_path + '.process-output','wt').write( process_output )
if status:
print 'parsing failed'
failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
else:
expected_output_path = os.path.splitext(input_path)[0] + '.expected'
expected_output = file( expected_output_path, 'rt' ).read()
detail = ( compareOutputs( expected_output, actual_output, 'input' )
or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
if detail:
print 'FAILED'
failed_tests.append( (input_path, detail) )
else:
print 'OK'
if failed_tests:
print
print 'Failure details:'
for failed_test in failed_tests:
print '* Test', failed_test[0]
print failed_test[1]
print
print 'Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
len(failed_tests) )
return 1
else:
print 'All %d tests passed.' % len(tests)
return 0
if __name__ == '__main__':
if len(sys.argv) < 1 or len(sys.argv) > 2:
print "Usage: %s jsontest-executable-path [input-testcase-directory]" % sys.argv[0]
sys.exit( 1 )
jsontest_executable_path = os.path.normpath( os.path.abspath( sys.argv[1] ) )
if len(sys.argv) > 2:
input_path = os.path.normpath( os.path.abspath( sys.argv[2] ) )
else:
input_path = None
status = runAllTests( jsontest_executable_path, input_path )
sys.exit( status )
|
nilq/baby-python
|
python
|
from unittest import mock
import pytest
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
from django_countries.fields import Country
from django_prices_vatlayer.models import VAT
from prices import Money, MoneyRange, TaxedMoney, TaxedMoneyRange
from saleor.core.taxes.vatlayer import (
DEFAULT_TAX_RATE_NAME,
apply_tax_to_price,
get_tax_rate_by_name,
get_taxed_shipping_price,
get_taxes_for_address,
get_taxes_for_country,
)
from saleor.core.utils import get_country_name_by_code
from saleor.dashboard.taxes.filters import get_country_choices_for_vat
from ..utils import get_redirect_location
@pytest.fixture
def compare_taxes():
def fun(taxes_1, taxes_2):
assert len(taxes_1) == len(taxes_2)
for rate_name, tax in taxes_1.items():
value_1 = tax["value"]
value_2 = taxes_2.get(rate_name)["value"]
assert value_1 == value_2
return fun
def test_get_tax_rate_by_name(taxes):
rate_name = "pharmaceuticals"
tax_rate = get_tax_rate_by_name(rate_name, taxes)
assert tax_rate == taxes[rate_name]["value"]
def test_get_tax_rate_by_name_fallback_to_standard(taxes):
rate_name = "unexisting tax rate"
tax_rate = get_tax_rate_by_name(rate_name, taxes)
assert tax_rate == taxes[DEFAULT_TAX_RATE_NAME]["value"]
def test_get_tax_rate_by_name_empty_taxes(product):
rate_name = "unexisting tax rate"
tax_rate = get_tax_rate_by_name(rate_name)
assert tax_rate == 0
def test_view_checkout_with_taxes(
settings, client, request_checkout_with_item, vatlayer, address
):
settings.DEFAULT_COUNTRY = "PL"
checkout = request_checkout_with_item
checkout.shipping_address = address
checkout.save()
product = checkout.lines.first().variant.product
product.meta = {"taxes": {"vatlayer": {"code": "standard", "description": ""}}}
product.save()
response = client.get(reverse("checkout:index"))
response_checkout_line = response.context[0]["checkout_lines"][0]
line_net = Money(amount="8.13", currency="USD")
line_gross = Money(amount="10.00", currency="USD")
assert response_checkout_line["get_total"].tax.amount
assert response_checkout_line["get_total"] == TaxedMoney(line_net, line_gross)
assert response.status_code == 200
def test_view_update_checkout_quantity_with_taxes(
client, request_checkout_with_item, vatlayer, monkeypatch
):
monkeypatch.setattr(
"saleor.checkout.views.to_local_currency", lambda price, currency: price
)
variant = request_checkout_with_item.lines.get().variant
response = client.post(
reverse("checkout:update-line", kwargs={"variant_id": variant.id}),
{"quantity": 3},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
assert response.status_code == 200
assert request_checkout_with_item.quantity == 3
@pytest.mark.parametrize(
"price, charge_taxes, expected_price",
[
(
Money(10, "USD"),
False,
TaxedMoney(net=Money(10, "USD"), gross=Money(10, "USD")),
),
(
Money(10, "USD"),
True,
TaxedMoney(net=Money("8.13", "USD"), gross=Money(10, "USD")),
),
],
)
def test_get_taxed_shipping_price(
site_settings, vatlayer, price, charge_taxes, expected_price
):
site_settings.charge_taxes_on_shipping = charge_taxes
site_settings.save()
shipping_price = get_taxed_shipping_price(price, taxes=vatlayer)
assert shipping_price == expected_price
def test_view_taxes_list(admin_client, vatlayer):
url = reverse("dashboard:taxes")
response = admin_client.get(url)
tax_list = response.context["taxes"].object_list
assert response.status_code == 200
assert tax_list == list(VAT.objects.order_by("country_code"))
def test_view_tax_details(admin_client, vatlayer):
tax = VAT.objects.get(country_code="PL")
tax_rates = [(rate_name, tax["value"]) for rate_name, tax in vatlayer.items()]
tax_rates = sorted(tax_rates)
url = reverse("dashboard:tax-details", kwargs={"country_code": "PL"})
response = admin_client.get(url)
assert response.status_code == 200
assert response.context["tax"] == tax
assert response.context["tax_rates"] == tax_rates
def test_configure_taxes(admin_client, site_settings):
url = reverse("dashboard:configure-taxes")
data = {
"include_taxes_in_prices": False,
"display_gross_prices": False,
"charge_taxes_on_shipping": False,
}
response = admin_client.post(url, data)
assert response.status_code == 302
assert get_redirect_location(response) == reverse("dashboard:taxes")
site_settings.refresh_from_db()
assert not site_settings.include_taxes_in_prices
assert not site_settings.display_gross_prices
assert not site_settings.charge_taxes_on_shipping
@mock.patch("saleor.dashboard.taxes.views.messages", create=True)
@mock.patch("saleor.dashboard.taxes.views.call_command", create=True)
def test_fetch_tax_rates(mocked_call_command, mocked_messages, admin_client):
"""Ensure a valid fetch VAT rates request is correctly handled,
and is leading to the proper VAT fetching command being invoked."""
url = reverse("dashboard:fetch-tax-rates")
response = admin_client.post(url)
# Ensure the request was successful
assert response.status_code == 302
assert get_redirect_location(response) == reverse("dashboard:taxes")
assert mocked_messages.success.call_count == 1
# Ensure the get VAT rates (mocked) command was invoked
mocked_call_command.assert_called_once_with("get_vat_rates")
@mock.patch("saleor.dashboard.taxes.views.messages", create=True)
@mock.patch("saleor.dashboard.taxes.views.logger", create=True)
@mock.patch(
"saleor.dashboard.taxes.views.call_command",
side_effect=ImproperlyConfigured("Test"),
create=True,
)
def test_fetch_tax_rates_improperly_configured(
mocked_call_command, mocked_logger, mocked_messages, admin_client
):
"""Ensure a failing VAT rate fetching is leading to an error being
returned, and that error is handled."""
url = reverse("dashboard:fetch-tax-rates")
response = admin_client.post(url)
# Ensure the request was successful
assert response.status_code == 302
assert get_redirect_location(response) == reverse("dashboard:taxes")
# Ensure error was logged to the logger
# and the error was returned to the client
assert mocked_logger.exception.call_count == 1
assert mocked_messages.warning.call_count == 1
# Ensure the get VAT rates (mocked) command was invoked
mocked_call_command.assert_called_once_with("get_vat_rates")
def test_fetch_tax_rates_invalid_method(admin_client):
"""Ensure the GET method is not allowed for tax rates fetching"""
url = reverse("dashboard:fetch-tax-rates")
assert admin_client.get(url).status_code == 405
def test_tax_list_filters_empty(admin_client, vatlayer):
qs = VAT.objects.order_by("country_code")
url = reverse("dashboard:taxes")
data = {"country_code": [""], "sort_by": [""]}
response = admin_client.get(url, data)
assert response.status_code == 200
assert list(response.context["filter_set"].qs) == list(qs)
def test_tax_list_filters_country_code(admin_client, vatlayer):
qs = VAT.objects.filter(country_code="PL")
url = reverse("dashboard:taxes")
data = {"country_code": ["PL"], "sort_by": [""]}
response = admin_client.get(url, data)
assert response.status_code == 200
assert list(response.context["filter_set"].qs) == list(qs)
def test_tax_list_filters_sort_by(admin_client, vatlayer):
qs = VAT.objects.order_by("-country_code")
url = reverse("dashboard:taxes")
data = {"country_code": [""], "sort_by": ["-country_code"]}
response = admin_client.get(url, data)
assert response.status_code == 200
assert list(response.context["filter_set"].qs) == list(qs)
def test_get_country_choices_for_vat(vatlayer):
expected_choices = [("DE", "Germany"), ("PL", "Poland")]
choices = get_country_choices_for_vat()
assert choices == expected_choices
def test_get_taxes_for_address(address, vatlayer, compare_taxes):
taxes = get_taxes_for_address(address)
compare_taxes(taxes, vatlayer)
def test_get_taxes_for_address_fallback_default(settings, vatlayer, compare_taxes):
settings.DEFAULT_COUNTRY = "PL"
taxes = get_taxes_for_address(None)
compare_taxes(taxes, vatlayer)
def test_get_taxes_for_address_other_country(address, vatlayer, compare_taxes):
address.country = "DE"
address.save()
tax_rates = get_taxes_for_country(Country("DE"))
taxes = get_taxes_for_address(address)
compare_taxes(taxes, tax_rates)
def test_get_taxes_for_country(vatlayer, compare_taxes):
taxes = get_taxes_for_country(Country("PL"))
compare_taxes(taxes, vatlayer)
def test_get_country_name_by_code():
country_name = get_country_name_by_code("PL")
assert country_name == "Poland"
def test_apply_tax_to_price_do_not_include_tax(site_settings, taxes):
site_settings.include_taxes_in_prices = False
site_settings.save()
money = Money(100, "USD")
assert apply_tax_to_price(taxes, "standard", money) == TaxedMoney(
net=Money(100, "USD"), gross=Money(123, "USD")
)
assert apply_tax_to_price(taxes, "medical", money) == TaxedMoney(
net=Money(100, "USD"), gross=Money(108, "USD")
)
taxed_money = TaxedMoney(net=Money(100, "USD"), gross=Money(100, "USD"))
assert apply_tax_to_price(taxes, "standard", taxed_money) == TaxedMoney(
net=Money(100, "USD"), gross=Money(123, "USD")
)
assert apply_tax_to_price(taxes, "medical", taxed_money) == TaxedMoney(
net=Money(100, "USD"), gross=Money(108, "USD")
)
def test_apply_tax_to_price_do_not_include_tax_fallback_to_standard_rate(
site_settings, taxes
):
site_settings.include_taxes_in_prices = False
site_settings.save()
money = Money(100, "USD")
taxed_money = TaxedMoney(net=Money(100, "USD"), gross=Money(123, "USD"))
assert apply_tax_to_price(taxes, "space suits", money) == taxed_money
def test_apply_tax_to_price_include_tax(taxes):
money = Money(100, "USD")
assert apply_tax_to_price(taxes, "standard", money) == TaxedMoney(
net=Money("81.30", "USD"), gross=Money(100, "USD")
)
assert apply_tax_to_price(taxes, "medical", money) == TaxedMoney(
net=Money("92.59", "USD"), gross=Money(100, "USD")
)
def test_apply_tax_to_price_include_fallback_to_standard_rate(taxes):
money = Money(100, "USD")
assert apply_tax_to_price(taxes, "space suits", money) == TaxedMoney(
net=Money("81.30", "USD"), gross=Money(100, "USD")
)
taxed_money = TaxedMoney(net=Money(100, "USD"), gross=Money(100, "USD"))
assert apply_tax_to_price(taxes, "space suits", taxed_money) == TaxedMoney(
net=Money("81.30", "USD"), gross=Money(100, "USD")
)
def test_apply_tax_to_price_raise_typeerror_for_invalid_type(taxes):
with pytest.raises(TypeError):
assert apply_tax_to_price(taxes, "standard", 100)
def test_apply_tax_to_price_no_taxes_return_taxed_money():
money = Money(100, "USD")
taxed_money = TaxedMoney(net=Money(100, "USD"), gross=Money(100, "USD"))
assert apply_tax_to_price(None, "standard", money) == taxed_money
assert apply_tax_to_price(None, "medical", taxed_money) == taxed_money
def test_apply_tax_to_price_no_taxes_return_taxed_money_range():
money_range = MoneyRange(Money(100, "USD"), Money(200, "USD"))
taxed_money_range = TaxedMoneyRange(
TaxedMoney(net=Money(100, "USD"), gross=Money(100, "USD")),
TaxedMoney(net=Money(200, "USD"), gross=Money(200, "USD")),
)
assert apply_tax_to_price(None, "standard", money_range) == taxed_money_range
assert apply_tax_to_price(None, "standard", taxed_money_range) == taxed_money_range
def test_apply_tax_to_price_no_taxes_raise_typeerror_for_invalid_type():
with pytest.raises(TypeError):
assert apply_tax_to_price(None, "standard", 100)
|
nilq/baby-python
|
python
|
#These variables are needed to make local variables in functions global
custom_end=''
homework_end=''
assignment_end=''
test_end=''
quiz_end=''
final_end=''
custom_advance_details=''
def quizzes():
while True:
quiz_weight=input('How much does your quizzes weigh? or if not applicable type n/a ')
#category_weight ask for the weight of the current category in this case, it would be quizzes
if quiz_weight=='n/a':
Menu()
elif quiz_weight.isdecimal()==False:
print('Please use a integer')
else:
while True:
quiz_amount=input('How many quizzes have you taken? ')
#ask for the number of assignments completed in this category
if quiz_amount.isdigit()==False:
print('Please use a integer')
elif quiz_amount.isalpha():
print('Please use an integer')
else:
quiz_value=[]
for scores in range(int(quiz_amount)):
quiz_scores=input('Please insert scores one at a time ')
while quiz_scores.isalpha()==True:
print('Please use an integer')
quiz_scores=input('Please insert scores one at a time ')
else:
quiz_scores=float(quiz_scores)
#quiz_scores has to be converted into a float in order to do the calculations
quiz_value.append(quiz_scores)
#This allows the user to input grades and have the iterrated and added to a list for future reference
quiz_weighp=int(quiz_weight)/100
# Quiz_weighp converts the category weight from an integer to a float
quiz_total=sum(quiz_value)
# Adds all the inputted quiz grades
quiz_final=quiz_total/int(quiz_amount)
# Divides the sum of all the quiz grades by the amount resulting in the final grade for the category excluding its weight
global quiz_end
quiz_end=int(quiz_final)*float(quiz_weighp)
# Multiplies the final grade of the cateogry to corresponding weight to output how much this category affects the final grade
print('Your quiz average was',quiz_final,"%. Your quizzes weigh", quiz_end,"% of your final grade.")
Menu()
# This function is repeated for the other hardcoded categories like test and assignments except for the custom
def Test():
while True:
test_weight=input('How much does your Test weigh? or if not applicable type n/a ')
if test_weight=='n/a':
Menu()
elif test_weight.isdecimal()==False:
print('Please use a integer')
else:
while True:
test_amount=input('How many test have you taken? ')
if test_amount.isdigit()==False:
print('Please use a integer')
else:
test_value=[]
for scores in range(int(test_amount)):
test_scores=input('Please insert scores one at a time ')
while test_scores.isalpha()==True:
print("Please use an integer")
test_scores=input('Please insert scores one at a time ')
else:
test_scores=float(test_scores)
test_value.append(test_scores)
test_weighp=int(test_weight)/100
test_total=sum(test_value)
test_final=test_total/int(test_amount)
global test_end
test_end=int(test_final)*float(test_weighp)
print('Your test average was',test_final,"%. Your test weigh", test_end,"% of your final grade.")
Menu()
def assignments():
while True:
assignment_weight=input('How much does your assignments weigh? or if not applicable type n/a ')
if assignment_weight=='n/a':
Menu()
elif assignment_weight.isdecimal()==False:
print('Please use a integer')
else:
while True:
assignment_amount=input('How many assignments have you completed? ')
if assignment_amount.isdigit()==False:
print('Please use a integer')
else:
assignment_value=[]
for scores in range(int(assignment_amount)):
assignment_scores=input('Please insert scores one at a time ')
while assignment_scores.isalpha()==True:
print("Please use an integer")
assignment_scores=input('Please insert scores one at a time ')
else:
assignment_scores=float(assignment_scores)
assignment_value.append(assignment_scores)
assignment_weighp=int(assignment_weight)/100
assignment_total=sum(assignment_value)
assignment_final=assignment_total/int(assignment_amount)
global assignment_end
assignment_end=int(assignment_final)*float(assignment_weighp)
print('Your assignments average was',assignment_final,"%. Your assignments weigh", assignment_end,"% of your final grade.")
Menu()
def homework():
while True:
homework_weight=input('How much does your homework weigh? or if not applicable type n/a ')
if homework_weight=='n/a':
Menu()
elif homework_weight.isdecimal()==False:
print('Please use a integer')
else:
while True:
homework_amount=input('How many homework assignments have you completed? ')
if homework_amount.isdigit()==False:
print('Please use a integer')
else:
homework_value=[]
for scores in range(int(homework_amount)):
homework_scores=input('Please insert scores one at a time ')
while homework_scores.isalpha()==True:
print("Please use an integer")
homework_scores=input("Please insert scores one at a time")
else:
homework_scores=float(homework_scores)
homework_value.append(homework_scores)
homework_weighp=int(homework_weight)/100
homework_total=sum(homework_value)
homework_final=homework_total/int(homework_amount)
global homework_end
homework_end=int(homework_final)*float(homework_weighp)
print('Your homework average was',homework_final,"%. Your homework weigh", homework_end,"% of your final grade.")
Menu()
def custom():
#The custom function allows the user to add categories they do not see on the menu, however, it only displays the average of all the categories added
#They will only see the custom average weight of the categories together, if they want more details, they must use custom_advance_details
custom_list=[]
custom_scores=[]
global custom_advance_details
custom_advance_details=[]
# The custom_advanced_details allows the user to view all the categories they added and their pertaining grade instead of just the average of all of them on the menu.
global custom_end
custom_end=sum(custom_list)
#The counters job is to signal the program to end the loop once they have entered all their categories in
counter=0
#the variable_customs functions job is to make the program more versatile, when adding new categories. It is nearly the same function except has more parameters.
#the menu's custom grade will the the product of all the categories in this function added together and multiplied by the weight.
def variable_customs(name,weight,amount):
#print('so the category is', name,'with a', weight, '%', 'and a total of', amount )
for numbers in range(int(amount)):
scores=input('Please provide your scores for this category one at a time ')
while scores.isalpha()==True:
print("Please use an integer")
scores=input('Please provide your scores for this category one at a time ')
else:
scores=float(scores)
custom_scores.append(scores)
weigh_percentage=int(weight)/100
custom_totalgrades=sum(custom_scores)
custom_final=custom_totalgrades/int(amount)
custom_final_weight=int(custom_final)*float(weigh_percentage)
custom_details=custom_name,'average is', custom_final, '% and weighs', custom_final_weight,'% of your final grade.'
custom_advance_details.append(custom_details)
custom_list.append(custom_final_weight)
custom_amount=input('How many custom categories would you like to add? or enter n/a to return to menu ')
while custom_amount:
if custom_amount=='n/a':
Menu()
elif custom_amount.isalpha()==True:
print('Please use a integer or decimal ')
custom_amount=input('How many custom categories would you like to add? or enter n/a to return to menu ')
else:
print('You will now be asked to insert information for each category one at a time\n ')
for numbers in range(int(custom_amount)):
custom_name=input('Please provide a name for category ')
while custom_name.isdigit()==True:
print("Please use letters only")
custom_name=input('Please provide a name for category ')
else:
#custom_scores is mentioned here again to clear the value because without it, it would just keep adding from the previous values
custom_scores=[]
custom_weight=input('Enter the weight of category ')
while custom_weight.isdecimal()==False:
print('Please use an integer')
custom_weight=input('Enter the weight of category ')
else:
number_custom_grades=input('Please provide the number assignments completed for this category ')
while number_custom_grades.isdecimal()==False:
print('Please use an integer')
number_custom_grades=input('Please provide the number assignments completed for this category ')
else:
counter+=1
variable_customs(custom_name,custom_weight,number_custom_grades)
custom_end=sum(custom_list)
while counter>=int(custom_amount):
Menu()
if custom_end=="":
custom_end=0
elif custom_end>-1:
custom_end=custom_end
if homework_end=='':
homework_end=0
elif homework_end>-1:
homework_end=homework_end
if test_end=='':
test_end=0
elif test_end=='':
test_end=test_end
if quiz_end=="":
quiz_end=0
elif quiz_end>-1:
quiz_end=quiz_end
if assignment_end=='':
assignment_end=0
elif assignment_end>-1:
assignment_end=assignment_end
if final_end=='':
final_end=0
elif final_end>-1:
final_end=final_end
# The codes above allows the function final to determine which categories to use. If the user did not insert any grade in a category, it will default to a 0
def final():
global final_end
final_end=custom_end+homework_end+test_end+assignment_end+quiz_end+homework_end
if final_end<70:
print("Your final grade is",final_end,"% Unfortunately you failed.")
while True:
rmenu=input("Would you like to return to menu? y/n")
if rmenu=="y":
Menu()
elif rmenu=="n":
exit()
else:
print("Please use a valid option")
elif 79>=final_end>=70:
print("Your final grade is",final_end,"% So you passed good job!")
while True:
rmenu=input("Would you like to return to menu? y/n")
if rmenu=="y":
Menu()
elif rmenu=="n":
exit()
else:
print("Please use a valid option")
elif 89>=final_end>=80:
print("Your final grade is",final_end,"% Good job you are above average!")
while True:
rmenu=input("Would you like to return to menu? y/n")
if rmenu=="y":
Menu()
elif rmenu=="n":
exit()
else:
print("Please use a valid option")
elif final_end>=90:
print( 'Your final Grade is',final_end,'% Congratulations, you aced the class!')
while True:
rmenu=input("Would you like to return to menu? y/n")
if rmenu=="y":
Menu()
elif rmenu=="n":
exit()
else:
print("Please use a valid option")
# function final is the final function that adds all the final grades together
def Menu():
print("Hello, welcome to the Will I fail Calculator")
print('This program will help calculate your final grade by finding the average weight of each pertaining category of your grade\nand then adding them together to output the final grade.')
print('1: Grade Quizzes Current weight is',quiz_end,'%')
print('2: Grade Test Current weight is',test_end,'%')
print('3: Grade Assignments Current weight is',assignment_end,'%')
print('4: Grade Homework Current weight is',homework_end,'%')
print('5: Grade Custom Current weight is',custom_end,'%')
print("6: Display each individual custom course's grade instead of the entire average")
print('7: Calculate Final Grade (all other pertaining grades have to be filled before hand) Current final grade is', final_end,'%')
print('8 Close program')
Grader=input('Please Choose a category to start with by entering any of the following numbers above and press return key to confirm ')
if Grader=='1':
quizzes()
elif Grader=='2':
Test()
elif Grader=='3':
assignments()
elif Grader=='4':
homework()
elif Grader=='5':
custom()
elif Grader=='6':
print('Below are all the categories you added along with their grades')
for details in custom_advance_details:
details=str(details)
details=details.replace("'","")
details=details.replace(",","")
#These replacements allows the list to be outputted like a normal sentence without all the punctuations.
print(details)
menu_return=input('would you like to return to the Menu? y/n? ')
if menu_return=='y':
Menu()
else:
exit()
elif Grader=='7':
final()
elif Grader=='8':
exit()
else:
print('That is not one of the options')
Menu()
Menu()
|
nilq/baby-python
|
python
|
from msal.authority import *
from msal.exceptions import MsalServiceError
from tests import unittest
class TestAuthority(unittest.TestCase):
COMMON_AUTH_ENDPOINT = \
'https://login.microsoftonline.com/common/oauth2/v2.0/authorize'
COMMON_TOKEN_ENDPOINT = \
'https://login.microsoftonline.com/common/oauth2/v2.0/token'
def test_wellknown_host_and_tenant(self):
# Test one specific sample in straightforward way, for readability
a = Authority('https://login.microsoftonline.com/common')
self.assertEqual(a.authorization_endpoint, self.COMMON_AUTH_ENDPOINT)
self.assertEqual(a.token_endpoint, self.COMMON_TOKEN_ENDPOINT)
# Test all well known authority hosts, using same real "common" tenant
for host in WELL_KNOWN_AUTHORITY_HOSTS:
a = Authority('https://{}/common'.format(host))
# Note: this "common" tenant endpoints always point to its real host
self.assertEqual(
a.authorization_endpoint, self.COMMON_AUTH_ENDPOINT)
self.assertEqual(a.token_endpoint, self.COMMON_TOKEN_ENDPOINT)
@unittest.skip("As of Jan 2017, the server no longer returns V1 endpoint")
def test_lessknown_host_will_return_a_set_of_v1_endpoints(self):
# This is an observation for current (2016-10) server-side behavior.
# It is probably not a strict API contract. I simply mention it here.
less_known = 'login.windows.net' # less.known.host/
v1_token_endpoint = 'https://{}/common/oauth2/token'.format(less_known)
a = Authority('https://{}/common'.format(less_known))
self.assertEqual(a.token_endpoint, v1_token_endpoint)
self.assertNotIn('v2.0', a.token_endpoint)
def test_unknown_host(self):
with self.assertRaisesRegexp(MsalServiceError, "invalid_instance"):
Authority('https://unknown.host/tenant_doesnt_matter_in_this_case')
def test_unknown_host_valid_tenant_and_skip_host_validation(self):
# When skipping host (a.k.a. instance) validation,
# the Tenant Discovery will always use WORLD_WIDE service as instance,
# so, if the tenant happens to exist there, it will find some endpoints.
a = Authority('https://incorrect.host/common', validate_authority=False)
self.assertEqual(a.authorization_endpoint, self.COMMON_AUTH_ENDPOINT)
self.assertEqual(a.token_endpoint, self.COMMON_TOKEN_ENDPOINT)
def test_unknown_host_unknown_tenant_and_skip_host_validation(self):
with self.assertRaisesRegexp(MsalServiceError, "invalid_tenant"):
Authority('https://unknown.host/invalid', validate_authority=False)
class TestAuthorityInternalHelperCanonicalize(unittest.TestCase):
def test_canonicalize_tenant_followed_by_extra_paths(self):
self.assertEqual(
canonicalize("https://example.com/tenant/subpath?foo=bar#fragment"),
("https://example.com/tenant", "example.com", "tenant"))
def test_canonicalize_tenant_followed_by_extra_query(self):
self.assertEqual(
canonicalize("https://example.com/tenant?foo=bar#fragment"),
("https://example.com/tenant", "example.com", "tenant"))
def test_canonicalize_tenant_followed_by_extra_fragment(self):
self.assertEqual(
canonicalize("https://example.com/tenant#fragment"),
("https://example.com/tenant", "example.com", "tenant"))
def test_canonicalize_rejects_non_https(self):
with self.assertRaises(ValueError):
canonicalize("http://non.https.example.com/tenant")
def test_canonicalize_rejects_tenantless(self):
with self.assertRaises(ValueError):
canonicalize("https://no.tenant.example.com")
def test_canonicalize_rejects_tenantless_host_with_trailing_slash(self):
with self.assertRaises(ValueError):
canonicalize("https://no.tenant.example.com/")
class TestAuthorityInternalHelperInstanceDiscovery(unittest.TestCase):
def test_instance_discovery_happy_case(self):
self.assertEqual(
instance_discovery("https://login.windows.net/tenant"),
"https://login.windows.net/tenant/.well-known/openid-configuration")
def test_instance_discovery_with_unknown_instance(self):
with self.assertRaisesRegexp(MsalServiceError, "invalid_instance"):
instance_discovery('https://unknown.host/tenant_doesnt_matter_here')
def test_instance_discovery_with_mocked_response(self):
mock_response = {'tenant_discovery_endpoint': 'http://a.com/t/openid'}
endpoint = instance_discovery(
"https://login.microsoftonline.in/tenant.com", response=mock_response)
self.assertEqual(endpoint, mock_response['tenant_discovery_endpoint'])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
class BarItem(object):
valid_options = set(['full_text', 'short_text', 'color', 'min_width',
'align', 'name', 'instance', 'urgent', 'separator',
'separator_block_width'])
COLOR_DEFAULT = '#FFFFFF'
def __init__(self, name):
assert(len(name) > 0)
self.name = name
self.output = {'name': name.lower()}
def update(self):
pass
def get(self):
return self.output
def set(self, option, value):
assert option in self.valid_options
self.output[option] = value
|
nilq/baby-python
|
python
|
# a1.py advice for manhattan distance taken from
# https://stackoverflow.com/questions/39759721/calculating-the-manhattan-distance-in-the-eight-puzzle
# https://www.geeksforgeeks.org/sum-manhattan-distances-pairs-points/
from search import *
import time
import random
# ...
SOLVED_STATE = (1, 2, 3, 4, 5, 6, 7, 8, 0)
NUM_RANDOM_MOVES = 100
#PYCHARM_DEBUG = True
# ______________________________________________________________________________
# A* heuristics
# Duck Puzzle
class DuckPuzzle(Problem):
""" almost the same as eightpuzzle but now the board looks like a duck facing to the left
1 2
3 4 5 6 goal state
7 8 *
Tiles slide into the blank (the *) as in the regular 8-puzzle, but now the board has a different shape which changes
the possible moves.
"""
def __init__(self, initial, goal=(1, 2, 3, 4, 5, 6, 7, 8, 0)):
""" Define goal state and initialize a problem """
super().__init__(initial, goal)
def find_blank_square(self, state):
"""Return the index of the blank square in a given state"""
return state.index(0)
def actions(self, state):
""" Return the actions that can be executed in the given state.
The result would be a list, since there are only four possible actions
in any given state of the environment """
possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT']
index_blank_square = self.find_blank_square(state)
unable_to_move_up = (0, 1, 4, 5)
unable_to_move_down = (2, 6, 7, 8)
unable_to_move_left = (0, 2, 6)
unable_to_move_right = (1, 5, 8)
# check which tuples the blank square is in and remove corresponding move option
if index_blank_square in unable_to_move_left:
possible_actions.remove('LEFT')
if index_blank_square in unable_to_move_up:
possible_actions.remove('UP')
if index_blank_square in unable_to_move_right:
possible_actions.remove('RIGHT')
if index_blank_square in unable_to_move_down:
possible_actions.remove('DOWN')
return possible_actions
def result(self, state, action):
""" Given state and action, return a new state that is the result of the action.
Action is assumed to be a valid action in the state
duckPuzzle Shape should induce some special cases here.
1 2
3 4 5 6
7 8 *
the numbers at index's 0, 1, and 2 will forever be trapped in that corner of the house. Therefore index
tile 3 is a special case. Tile index 0, 1, & 2 are also their own special cases.
"""
# blank is the index of the blank square
blank = self.find_blank_square(state)
new_state = list(state)
delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1} # for most numbers in normal places
delta_case1 = {'UP': -2, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1} # specifically for tile at index #3
delta_case2 = {'UP': -2, 'DOWN': 2, 'LEFT': -1, 'RIGHT': 1} # Special case for tiles at index 0, 1, & 2
blank_case2 = (0, 1, 2)
if blank is 3:
neighbor = blank + delta_case1[action]
elif blank in blank_case2:
neighbor = blank + delta_case2[action]
else:
neighbor = blank + delta[action]
new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank]
return tuple(new_state)
def goal_test(self, state):
""" Given a state, return True if state is a goal state or False, otherwise """
return state == self.goal
def check_solvability(self, state):
""" Checks if the given state is solvable """
inversion = 0
for i in range(len(state)):
for j in range(i + 1, len(state)):
if (state[i] > state[j]) and state[i] != 0 and state[j] != 0:
inversion += 1
return inversion % 2 == 0
def h(self, node):
""" Return the heuristic value for a given state. Default heuristic function used is
h(n) = number of misplaced tiles """
return sum(s != g for (s, g) in zip(node.state, self.goal))
def manhattan(self, node):
# Goal state is puzzle = (1, 2, 3, 4, 5, 6, 7, 8, 0)
# for my simplicity index has been shortened to `i`
# adapted from the manhattan function in test_search.py
currentstate = node.state # Grab the current state of the EightPuzzle object passed via the node
i_target = {0: [2, 2], 1: [0, 0], 2: [0, 1], 3: [0, 2], 4: [1, 0], 5: [1, 1], 6: [1, 2], 7: [2, 0], 8: [2, 1]}
i_state = {}
index = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
for i in range(len(currentstate)):
i_state[currentstate[i]] = index[i] # initialize i_state dictionary
manhattan_distance_x = 0
manhattan_distance_y = 0
for i in range(9):
manhattan_distance_x += abs(i_target[i][0] - i_state[i][0]) # two indices because index was 2d array
manhattan_distance_y += abs(i_target[i][1] - i_state[i][1])
return manhattan_distance_y + manhattan_distance_x
def a_max(self, node):
""" Return the biggest heuristic value from either manhattan or h
Always grantee most efficient result and larger euristic dominates
the smaller one"""
h = self.h(node)
manhattan = self.manhattan(node)
return max(h, manhattan)
def get_state(self):
"""Obtain Current state of EightPuzzle Object, could be useful for display"""
return self.initial
# END duck_puzzle class
def display_Dpuzz(state):
""" Helper function that displays duck_puzzle formatted properly (like a duck)"""
# I am going to call the state the "board". Since the state is a tuple, all the object values are stored in an
# array so that they may be printed. The blank square or 0 will be notated by a *
# logically the same as display(state) above but for duckpuzzle()
board = [0, 0, 0, 0, 0, 0, 0, 0, 0] # Initialize the array with zeroes
for i in range(9):
board[i] = state[i]
if board[i] == 0:
board[i] = '*'
# logic below for printing a duck array QUACK
if i <= 1:
print(board[i], ' ', end='')
if i == 1:
print()
elif 5 >= i > 1:
print(board[i], ' ', end='')
if i == 5:
print()
print(' ', end='')
else:
print(board[i], ' ', end='')
print()
print()
def make_rand_duckPuzz():
state = SOLVED_STATE
puzz = DuckPuzzle(state)
for _ in range(NUM_RANDOM_MOVES):
possible_actions = puzz.actions(state=state) # All currently valid moves for 0
action = random.choice(possible_actions) # Pick a valid move at random
state = puzz.result(state=state, action=action) # Apply it to random state, set state to new state
return DuckPuzzle(state) # Will always be a solvable puzzle as valid moves have been applied to a solved state
def make_n_Dpuzz(n): # Create an Array of n Random puzzles
puzzles = []
for _ in range(n):
puzzles.append(make_rand_duckPuzz())
return puzzles
#-----------------End Duck Puzzle functions-----------------------
class EightPuzzle(Problem):
""" The problem of sliding tiles numbered from 1 to 8 on a 3x3 board, where one of the
squares is a blank. A state is represented as a tuple of length 9, where element at
index i represents the tile number at index i (0 if it's an empty square) """
def __init__(self, initial, goal=(1, 2, 3, 4, 5, 6, 7, 8, 0)):
""" Define goal state and initialize a problem """
super().__init__(initial, goal)
def find_blank_square(self, state):
"""Return the index of the blank square in a given state"""
return state.index(0)
def actions(self, state):
""" Return the actions that can be executed in the given state.
The result would be a list, since there are only four possible actions
in any given state of the environment """
possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT']
index_blank_square = self.find_blank_square(state)
if index_blank_square % 3 == 0:
possible_actions.remove('LEFT')
if index_blank_square < 3:
possible_actions.remove('UP')
if index_blank_square % 3 == 2:
possible_actions.remove('RIGHT')
if index_blank_square > 5:
possible_actions.remove('DOWN')
return possible_actions
def result(self, state, action):
""" Given state and action, return a new state that is the result of the action.
Action is assumed to be a valid action in the state """
# blank is the index of the blank square
blank = self.find_blank_square(state)
new_state = list(state)
delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1}
neighbor = blank + delta[action]
new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank]
return tuple(new_state)
def goal_test(self, state):
""" Given a state, return True if state is a goal state or False, otherwise """
return state == self.goal
def check_solvability(self, state):
""" Checks if the given state is solvable """
inversion = 0
for i in range(len(state)):
for j in range(i + 1, len(state)):
if (state[i] > state[j]) and state[i] != 0 and state[j] != 0:
inversion += 1
return inversion % 2 == 0
def h(self, node):
""" Return the heuristic value for a given state. Default heuristic function used is
h(n) = number of misplaced tiles """
return sum(s != g for (s, g) in zip(node.state, self.goal))
def manhattan(self, node):
# Goal state is puzzle = (1, 2, 3, 4, 5, 6, 7, 8, 0)
# for my simplicity index has been shortened to `i`
# adapted from the manhattan function in test_search.py
currentstate = node.state # Grab the current state of the EightPuzzle object passed via the node
i_target = {0: [2, 2], 1: [0, 0], 2: [0, 1], 3: [0, 2], 4: [1, 0], 5: [1, 1], 6: [1, 2], 7: [2, 0], 8: [2, 1]}
i_state = {}
index = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
# x = 0
# y = 0
for i in range(len(currentstate)):
i_state[currentstate[i]] = index[i] # initialize i_state dictionary
manhattan_distance_x = 0
manhattan_distance_y = 0
for i in range(9):
manhattan_distance_x += abs(i_target[i][0] - i_state[i][0]) # two indices because index was 2d array
manhattan_distance_y += abs(i_target[i][1] - i_state[i][1])
return manhattan_distance_y + manhattan_distance_x
def a_max(self, node):
""" Return the biggest heuristic value from either manhattan or h
Always grantee most efficient result and larger euristic dominates
the smaller one"""
h = self.h(node)
manhattan = self.manhattan(node)
return max(h, manhattan)
def get_state(self):
"""Obtain Current state of EightPuzzle Object, could be useful for display"""
return self.initial
def display(state):
""" Helper function that displays the state of 8 puzzle (tuple) in 3x3 form"""
# I am going to call the state the "board". Since the state is a tuple, all the object values are stored in an
# array so that they may be printed. The blank square or 0 will be notated by a *
board = [0, 0, 0, 0, 0, 0, 0, 0, 0] # Initialize the array with zeroes
for i in range(9):
board[i] = state[i]
if board[i] == 0:
board[i] = '*'
# logic below for printing a 3x3 array
if i <= 2:
print(board[i], ' ', end='')
if i == 2:
print()
elif 5 >= i > 2:
print(board[i], ' ', end='')
if i == 5:
print()
else:
print(board[i], ' ', end='')
print()
print()
def make_rand_8puzzle():
state = SOLVED_STATE
puzz = EightPuzzle(state)
for _ in range(NUM_RANDOM_MOVES):
possible_actions = puzz.actions(state=state) # All currently valid moves for 0
action = random.choice(possible_actions) # Pick a valid move at random
state = puzz.result(state=state, action=action) # Apply it to random state, set state to new state
return EightPuzzle(state) # Will always be a solvable puzzle as valid moves have been applied to a solved state
def make_n_puzzles(n): # Create an Array of n Random puzzles
puzzles = []
for _ in range(n):
puzzles.append(make_rand_8puzzle())
return puzzles
# -------------------------------------End EightPuzzle Functions--------------------------------
# ----------------------------- A* Search variations!-------------------------------------------
def astar_search(problem, h=None, display=False):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search, or
else in your Problem subclass."""
h = memoize(h or problem.h, 'h')
return best_first_graph_search(problem, lambda n: n.path_cost + h(n), display)
# Modify astar_search to use Manhattan Distance Heuristic
def astar_manhattan(problem, h=None):
"""Modification on A* search to use Manhattan Distance as Heuristic"""
h = memoize(h or problem.manhattan, 'h')
return best_first_graph_search(problem, lambda n: n.path_cost + h(n))
# modified astar_search to use maximum of misplaced tile heuristic
def astar_max(problem, h=None):
h = memoize(h or problem.a_max, 'h')
return best_first_graph_search(problem, lambda n: n.path_cost + h(n))
# ---------------------------------END A* Search Variations-------------------------------------
def best_first_graph_search(problem, f, display=False):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have breadth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
f = memoize(f, 'f')
node = Node(problem.initial)
frontier = PriorityQueue('min', f)
frontier.append(node)
explored = set()
FledTheFrontier = 0 # Tracks how many nodes have fled the frontier (were removed from it) yeeehaw
test = 0
while frontier:
test += 1
node = frontier.pop()
FledTheFrontier += 1 # Account for all fleeing nodes
if problem.goal_test(node.state):
if display:
print(len(explored), "paths have been expanded and", len(frontier), "paths remain in the frontier")
return [node, FledTheFrontier] # Return FledFromFrontier here so that we may access it outside the func
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.append(child)
elif child in frontier:
if f(child) < frontier[child]:
del frontier[child]
frontier.append(child)
return None
# ----------Small Functions to generate 10 (or n) puzzles for some statistical analysis----------
def eight_puzzle_analysis():
# print("data for 10 puzzles using A* search using misplaced tile Heuristic (default)")
puzzles = make_n_puzzles(10)
for puzz in puzzles:
display(puzz.get_state())
start_time = time.time_ns()
finished_puzzle = astar_search(puzz)
elapsed_time = (time.time_ns() - start_time) / 1000000000
# Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER-
print(elapsed_time, end='')
print(',', finished_puzzle[0].path_cost, end='')
print(',', finished_puzzle[1])
print()
# print("data for 10 puzzles using modified A* with manhattan distance heuristic")
for puzz in puzzles:
start_time = time.time_ns()
finished_puzzle = astar_manhattan(puzz)
elapsed_time = (time.time_ns() - start_time) / 1000000000
# Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER-
print(elapsed_time, end='')
print(',', finished_puzzle[0].path_cost, end='')
print(',', finished_puzzle[1])
print()
# print("data for 10 puzzles using modified A* with max misplaced distance heuristic")
for puzz in puzzles:
start_time = time.time_ns()
finished_puzzle = astar_max(puzz)
elapsed_time = (time.time_ns() - start_time) / 1000000000
# Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER-
print(elapsed_time, end='')
print(',', finished_puzzle[0].path_cost, end='')
print(',', finished_puzzle[1])
def duck_puzzle_analysis():
puzzles = make_n_Dpuzz(10)
for puzz in puzzles:
#display_Dpuzz(puzz.get_state())
start_time = time.time_ns()
finished_puzzle = astar_search(puzz)
elapsed_time = (time.time_ns() - start_time) / 1000000000
# Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER-
print(elapsed_time, end='')
print(',', finished_puzzle[0].path_cost, end='')
print(',', finished_puzzle[1])
print()
# print("data for 10 puzzles using modified A* with manhattan distance heuristic")
for puzz in puzzles:
start_time = time.time_ns()
finished_puzzle = astar_manhattan(puzz)
elapsed_time = (time.time_ns() - start_time) / 1000000000
# Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER-
print(elapsed_time, end='')
print(',', finished_puzzle[0].path_cost, end='')
print(',', finished_puzzle[1])
print()
# print("data for 10 puzzles using modified A* with max misplaced distance heuristic")
for puzz in puzzles:
start_time = time.time_ns()
finished_puzzle = astar_max(puzz)
elapsed_time = (time.time_ns() - start_time) / 1000000000
# Output data in CSV Format where first column is TIME (s), Second is LENGTH, and Third is FRONTIER-
print(elapsed_time, end='')
print(',', finished_puzzle[0].path_cost, end='')
print(',', finished_puzzle[1])
return 0
def single_duck_puzzle():
#debugging to find where misplaced action
puzzle = make_rand_duckPuzz()
#display_Dpuzz(puzzle.get_state())
#single_duck_puzzle()
# eight_puzzle_analysis()
duck_puzzle_analysis()
|
nilq/baby-python
|
python
|
from requests_oauthlib import OAuth2Session
from flask import Flask, request, redirect, session, url_for
from flask.json import jsonify
import os
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
app = Flask(__name__)
# This information is obtained upon registration of a new GitHub OAuth
# application here: https://github.com/settings/applications/new
client_id = "354731726140076"
client_secret = "ce6012f0684ade3c7cb3938ba20f7446"
authorization_base_url = 'https://www.facebook.com/dialog/oauth'
token_url = 'https://graph.facebook.com/oauth/access_token'
redirect_uri = 'https://bstore21.heliohost.us/loginsucess.html'
@app.route("/")
def demo():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
facebook = OAuth2Session(client_id, redirect_uri=redirect_uri)
facebook = facebook_compliance_fix(facebook)
authorization_url, state = facebook.authorization_url(authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
# Step 2: User authorization, this happens on the provider.
@app.route("/callback", methods=["GET"])
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
facebook = OAuth2Session(client_id, state=session['oauth_state'])
facebook.fetch_token(token_url, client_secret=client_secret,authorization_response=redirect_response)
# At this point you can fetch protected resources but lets save
# the token and show how this is done from a persisted token
# in /profile.
session['oauth_token'] = token
return redirect(url_for('.profile'))
@app.route("/profile", methods=["GET"])
def profile():
"""Fetching a protected resource using an OAuth 2 token.
"""
github = OAuth2Session(client_id, token=session['oauth_token'])
return jsonify(github.get('https://api.github.com/user').json())
if __name__ == "__main__":
# This allows us to use a plain HTTP callback
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
app.secret_key = os.urandom(24)
app.run(debug=True)
|
nilq/baby-python
|
python
|
############################################################################
# This Python file is part of PyFEM, the code that accompanies the book: #
# #
# 'Non-Linear Finite Element Analysis of Solids and Structures' #
# R. de Borst, M.A. Crisfield, J.J.C. Remmers and C.V. Verhoosel #
# John Wiley and Sons, 2012, ISBN 978-0470666449 #
# #
# The code is written by J.J.C. Remmers, C.V. Verhoosel and R. de Borst. #
# #
# The latest stable version can be downloaded from the web-site: #
# http://www.wiley.com/go/deborst #
# #
# A github repository, with the most up to date version of the code, #
# can be found here: #
# https://github.com/jjcremmers/PyFEM #
# #
# The code is open source and intended for educational and scientific #
# purposes only. If you use PyFEM in your research, the developers would #
# be grateful if you could cite the book. #
# #
# Disclaimer: #
# The authors reserve all rights but do not guarantee that the code is #
# free from errors. Furthermore, the authors shall not be liable in any #
# event caused by the use of the program. #
############################################################################
############################################################################
# Description: The Python file of the bezier interpolation example as #
# presented in section 15.3, page 487-490. #
# #
# Usage: python beziertest.py #
############################################################################
from numpy import zeros,dot
C = zeros(shape=[4,4,4])
C[0,0,0] = 1.0
C[0,1,1] = 1.0
C[0,1,2] = 0.5
C[0,1,3] = 0.25
C[0,2,2] = 0.5
C[0,2,3] = 0.5
C[0,3,3] = 0.25
C[1,0,0] = 0.25
C[1,1,0] = 0.5
C[1,1,1] = 0.5
C[1,2,0] = 0.25
C[1,2,1] = 0.5
C[1,2,2] = 1.0
C[1,2,3] = 0.5
C[1,3,3] = 0.5
C[2,0,0] = 0.5
C[2,1,0] = 0.5
C[2,1,1] = 1.0
C[2,2,2] = 1.0
C[2,3,3] = 1.0
C[3,0,0] = 1.0
C[3,1,1] = 1.0
C[3,2,2] = 1.0
C[3,3,3] = 1.0
coords = zeros( shape=(10,2) )
coords[0,:] = [0.0 , 0.0 ]
coords[1,:] = [1.0 , 1.0 ]
coords[2,:] = [1.0 , 3.0 ]
coords[3,:] = [2.0 , 3.0 ]
coords[4,:] = [2.5 , 1.5 ]
coords[5,:] = [1.5 , 0.5 ]
coords[6,:] = [3.0 , 0.0 ]
coords[7,:] = [3.2 , 2.0 ]
coords[8,:] = [3.8 , 2.5 ]
coords[9,:] = [4.0 , 0.0 ]
elems = zeros( shape=(4,4) , dtype=int )
elems[0,:] = [ 0 , 1 , 2 , 3 ]
elems[1,:] = [ 1 , 2 , 3 , 4 ]
elems[2,:] = [ 3 , 4 , 5 , 6 ]
elems[3,:] = [ 6 , 7 , 8 , 9 ]
output = []
length = 0.
from pyfem.util.BezierShapeFunctions import getElemBezierData
for elemNodes,Celem in zip(elems,C):
sdata = getElemBezierData ( coords[elemNodes,:] , Celem , \
order = 100 , elemType='Line4')
for idata in sdata:
x = dot(idata.h,coords[elemNodes,:])
output.append(x)
length += idata.weight
print("The length of the curve is ",length)
from pylab import plot, show, xlabel, ylabel
plot( [x[0] for x in output], [x[1] for x in output], '-' )
plot( [x[0] for x in coords], [x[1] for x in coords], 'ro-' )
show()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''
@todo: turn this into a real test runner for everything in the test subdir.
'''
import sys
from aisutils.BitVector import BitVector
from aisutils import binary
import ais_msg_1
import ais_msg_8
import sls.waterlevel
if __name__=='__main__':
# Try to parse some binary message
if False:
nmeaStr='!AIVDM,1,1,,A,85OpLV1Kf98p96dWWPLSViUfJlU@SV>cDF2Wq5>`=u8CnEFGCIOq,0*70,r003669983,1165795916'
msgPayload = nmeaStr.split(',')[5]
print 'nmea string: ',nmeaStr
print 'message payload:',msgPayload
bv = binary.ais6tobitvec(msgPayload)
print len(bv), bv
msgDict = ais_msg_8.bin_broadcastDecode(bv)
ais_msg_8.bin_broadcastPrintFields(msgDict)
bv = bv[39:]
print 'dac: ',bv[:10],int(bv[:10])
bv = bv[10:]; print 'fid: ',bv[: 6],int(bv[: 6])
bv = bv[ 6:]; print 'bits:',bv[:16],int(bv[:10])
bv = bv[10:]; print 'len: ',len(bv)
# Position message
if False:
nmeaStr = '!AIVDM,1,1,,B,15Mt9B001;rgAFhGKLaRK1v2040@,0*2A'
msgPayload = nmeaStr.split(',')[5]
print 'nmea string: ',nmeaStr
print 'message payload:',msgPayload
bv = binary.ais6tobitvec(msgPayload)
msgDict = ais_msg_1.positionDecode(bv)
ais_msg_1.positionPrint(msgDict)
# SLS try for waterlevel
if True:
bvStr = '010111101000001000100101000001010100110101001100011000001000000000110001100101110101000000001001010011101101000000000000001000000100000000000000'
bv = BitVector(bitstring=bvStr)
print type(bv)
msgDict= sls.waterlevel.decode(bv)
sls.waterlevel.printFields(msgDict)
|
nilq/baby-python
|
python
|
import streamlit as st
import pandas as pd
from itertools import groupby
from datetime import datetime
import re
from pretty_html_table import build_table
st.set_page_config(layout='wide')
JIRA_REGEX= "[A-Z]{2,}-\d+"
def parse_blame(chunk):
branch = chunk[0].split()[0]
line_start = chunk[0].split()[1]
author = chunk[1][7:]
author_mail = chunk[2][13:-1]
author_time_int = chunk[3][12:]
author_time = datetime.fromtimestamp(int(author_time_int))
filename = chunk[-2][9:]
comment_text = chunk[-1]
comment = comment_text[comment_text.find("TODO"):]
jira_tickets = re.findall(JIRA_REGEX, comment)
jira_ticket = jira_tickets[0] if jira_tickets else None
return author, author_mail, author_time, filename, comment, branch, line_start, jira_ticket
def apply_tags(comment):
comment = comment.lower()
tags = []
uncertainty_words = {"uncertainty": ["?", "maybe", "perhaps", "should we", "probably", "might", "not sure"]}
hacky_words = {"hacky": ["temporary", "hack", "hacky"]}
fixme_words = {"fix": ["fixme", "fix", "bug", "incorrect"]}
dependency_words = {"dependency": ["once", "when", "blocked"]}
for tw in [uncertainty_words, hacky_words, fixme_words, dependency_words]:
for k, v in tw.items():
for w in v:
if w in comment:
tags.append(k)
return tags
def make_table(df):
return build_table(df, 'blue_light', font_size='medium',
font_family='Century Gothic, sans-serif',
text_align='left',
width='auto',
index=False,
escape=False)
def make_path_clickable(branch, path, line_start, prefix='https://github.com/pytorch/pytorch'):
return f'<a target="_blank" href="{prefix}/blob/{branch}/{path}#L{line_start}">{path}</a>'
col1, col2, col3 = st.columns(3)
with col1:
st.title("✅ Let's Do it")
with col3:
st.header("#TODO tracker")
st.sidebar.write("Run **todo_script.sh** to get blame results for a repo")
blame_txt = st.sidebar.file_uploader("Submit the git blame results.txt")
if blame_txt:
content = blame_txt.readlines()
content = [line.decode("utf-8").strip() for line in content]
chunks = (list(g) for k, g in groupby(content, key=lambda x: x != '--blame-end--') if k)
data = []
for chunk in chunks:
data.append(list(parse_blame(chunk)))
df = pd.DataFrame(data, columns=['author', 'author_mail', 'author_time', 'filename', 'comment', "branch", "line_start", "jira_ticket"])
df['tags'] = df.comment.map(apply_tags)
df['clickable_filename'] = df.apply(lambda x: make_path_clickable(x['branch'], x['filename'], x['line_start']), axis=1)
unique_emails = df.author_mail.unique()
search_option = st.sidebar.selectbox("search option", ['by email', 'by path prefix'])
if search_option == 'by email':
author_mail_input = st.sidebar.selectbox("email (e.g. rongr@fb.com)", [""] + unique_emails.tolist())
if author_mail_input != '':
to_show_df = df[df.author_mail == author_mail_input].copy()
st.write(make_table(to_show_df[['author', 'author_mail', 'author_time', 'clickable_filename', 'comment', 'tags', 'jira_ticket']]), unsafe_allow_html=True)
if search_option == 'by path prefix':
path_input = st.sidebar.text_input("path prefix (e.g. caffe2/contrib)")
if path_input:
length = len(path_input)
to_show_df = df[df.filename.map(lambda x: x[:length] == path_input)].copy()
st.write(make_table(to_show_df[['author', 'author_mail', 'author_time', 'clickable_filename', 'comment', 'tags', 'jira_ticket']]), unsafe_allow_html=True)
|
nilq/baby-python
|
python
|
import ast
import csv
import korbinian
import korbinian.utils as utils
import numpy as np
import os
import pandas as pd
import sys
import time
# import debugging tools
from korbinian.utils import pr, pc, pn, aaa
def get_TM_indices_from_TMSEG_topo_str(topo_str, TM_symbol="H"):
"""Get TM indices from TMSEG topology string.
Code is not very elegant in comparison to a regex approach, but works fine.
Parameters
----------
topo_str : str
Topology string output from TMSEG.
H = TM helix
Note that TM orientation (N-cyto or N-out) is currently not extracted.
E.g. "11111111111111HHHHHHHHHHHHHHHHHHH222222222222222222222222222222222222222222222222222222222222222222222222HHHHHHHHHHHHHHHHHHHHHHHHH"
"111111111111111111111111HHHHHHHHHHHHHHHHHHHHH222222222222222HHHHHHHHHHHHHHHHHHHH111111111111111111111111111111111111HHHHHHHHHHHHHHHHHHHHHHH"
"22222222222222222222222222222222HHHHHHHHHHHHHHHHHHHHHH1111111111111111111111111HHHHHHHHHHHHHHHHHHHHHHH22222222222222222222222222222222222222"
"2222HHHHHHHHHHHHHHHHHHHHH11111111111111111111111111111111111111"
Returns
-------
TM_indices : tuple
Nested tuple with start and end of all TM helices in topology string.
UniProt indexing is used ("HHH111" is (1:3), not (0:3))
E.g.
((15, 33),1
(106, 130),
(155, 175),
(191, 210),
(247, 269),
(302, 323),
(349, 371),
(414, 434))
"""
if TM_symbol in topo_str:
# get indices (eg. [28, 29, 30, 31, 32, 33, 34, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 72, 73, 74, 75, 76])
M_indices = get_list_TM_residues_from_topo_string(topo_str, TM_symbol)
#SiPe_indices = get_signal_peptide_indices_from_TMSEG_topo(topo_str)
# get borders to TM regions(eg. [28, 34, 58, 68, 72, 76])
m_borders = []
m_borders.append(M_indices[0])
m_borders = korbinian.prot_list.parse_OMPdb.check_for_border(M_indices, m_borders)
# add the last membrane index (e.g. 33)
m_borders.append(M_indices[-1])
# convert to nested tuples
TM_indices = convert_alternating_list_to_nested_tuples(m_borders)
return TM_indices
else:
return ()
def slice_nonTMD_in_prot_list(df):
"""Using existing indices and sequence, slices out all the TMD sequences.
Originally from TMSEG fasta parse code.
Parameters
----------
df : pd.DataFrame
Returns
-------
df : pd.Dataframe
returns the same dataframe, with added sliced sequences
"""
# glance at the watch
start = time.clock()
# set to be an empty string, which avoids the error related to inserting a python list into a cell
# "ValueError: Must have equal len keys and value when setting with an iterable"
df['list_of_TMDs_excl_SP'] = ""
for n, acc in enumerate(df.index):
''' ~~ SLICE nonTMD sequence ~~ '''
# list of TMDs excluding signal peptides
list_of_TMDs_excl_SP = df.loc[acc, 'list_of_TMDs']
# set value to avoid errors adding a list to a cell
df.set_value(acc, 'list_of_TMDs_excl_SP', list_of_TMDs_excl_SP)
seqstart = 0
# if any protein in list conatains a SP
if 'SP01_end' in df.columns:
# if THIS PARTICULAR PROTEIN contains a signal peptide sequence
if isinstance(df.loc[acc, 'SP01_seq'], str):
# change sequence start for nonTM to the end of the signal peptide
seqstart = int(df.loc[acc, 'SP01_end'])
# add the SP01 to the list of TMDs
df.set_value(acc, 'list_of_TMDs', ["SP01"] + list_of_TMDs_excl_SP)
# sequence from N-term. to first TMD
TM01_start = int(df.loc[acc, 'TM01_start'])
# NOTE THAT THIS USED TO BE nonTMD_first = df.loc[acc, 'full_seq'][0: TM01_start -1], but indexing missed the last nonTM residue.
nonTMD_first = df.loc[acc, 'full_seq'][seqstart: TM01_start - 1]
# start the sequence with the first segment
sequence_list = [nonTMD_first]
# only for multipass proteins, generate sequences between TMDs
if len(list_of_TMDs_excl_SP) == 0:
# no TMDs are annotated, skip to next protein
continue
# for multipass proteins
elif len(list_of_TMDs_excl_SP) > 1:
for TM_Nr in range(len(list_of_TMDs_excl_SP) - 1):
# the TMD is the equivalent item in the list
TMD = list_of_TMDs_excl_SP[TM_Nr]
# the next TMD, which contains the end index, is the next item in the list
next_TMD = list_of_TMDs_excl_SP[TM_Nr + 1]
# define start of next TMD
start_next = int(df.loc[acc, '%s_start' % next_TMD])
# end of current TMD
end_current = int(df.loc[acc, '%s_end' % TMD])
# middle sequence between TMDs
# note the "start_next - 1", used to convert uniprot indices to python indices
between_TM_and_TMplus1 = df.loc[acc, 'full_seq'][end_current: start_next - 1]
sequence_list.append(between_TM_and_TMplus1)
last_TMD = list_of_TMDs_excl_SP[-1]
# sequence from last TMD to C-term.
lastTM_end = int(df.loc[acc, '%s_end' % last_TMD])
seqlen = int(df.loc[acc, 'seqlen'])
nonTMD_last = df.loc[acc, 'full_seq'][lastTM_end:seqlen]
sequence_list.append(nonTMD_last)
# join all the sequences together
sequence = "".join(sequence_list)
df.loc[acc, 'nonTMD_seq'] = sequence
df.loc[acc, 'len_nonTMD'] = len(sequence)
if n % 50 == 0 and n != 0:
sys.stdout.write(".")
sys.stdout.flush()
if n % 500 == 0:
sys.stdout.write("\n")
sys.stdout.flush()
# glance at the watch again. Ruminate on time passed
sys.stdout.write("\ntime taken to slice nonTMD sequences : {:0.03f} s".format(time.clock() - start))
return df
def get_list_TM_residues_from_topo_string(Topo_data, TM_symbol):
# get list of membrane indices
# note that this is UNIPROT indexing, not python indexing
m_list = [i+1 for i, topo in enumerate(Topo_data) if topo == TM_symbol] # find(Topo_data)
return m_list
def convert_alternating_list_to_nested_tuples(x):
return tuple(zip(x[::2], x[1::2]))
def parse_TMSEG_results_DEPRECATED(pathdict, s, logging):
"""DEPRECATED METHOD BASED ON LARGE FILE OF ALL TMSEG RESULTS
USE METHODS BASED ON INDIVIDUAL TMSEG DATAFILES INSTEAD.
"""
logging.info("~~~~~~~~~~~~ starting parse_TMSEG_results_DEPRECATED ~~~~~~~~~~~~")
# create or open dataframe for protein list summary
if os.path.isfile(pathdict["prot_list_summary_csv"]):
df_PLS = pd.read_csv(pathdict["prot_list_summary_csv"], index_col=0)
else:
df_PLS = pd.DataFrame(columns=["v", "date"])
# get the timestamp for current time
t = time.ctime(time.time())
list_number = s['list_number']
# define the uniprot directory with selected records
uniprot_dir = os.path.join(s["data_dir"], 'uniprot')
selected_uniprot_records_flatfile = os.path.join(uniprot_dir, 'selected', 'List%02d_selected_uniprot_records_flatfile.txt' % list_number)
n_aa_before_tmd = s["n_aa_before_tmd"]
n_aa_after_tmd = s["n_aa_after_tmd"]
list_parsed_csv = pathdict["list_parsed_csv"]
# check if the lists tab says to analyse the signal peptides
analyse_sp = True if "SiPe" in s["regions"] else False
output = korbinian.prot_list.uniprot_parse.parse_flatfile_to_csv(selected_uniprot_records_flatfile, n_aa_before_tmd, n_aa_after_tmd, analyse_sp, logging, list_parsed_csv, slice=False)
logging.info(output)
TMSEG_fastalike_path = pathdict['TMSEG_fastalike']
TMSEG_top_txtoutput_path = pathdict['TMSEG_top_txtoutput']
TMSEG_nonTM_outpath = pathdict['TMSEG_nonTM']
df_parsed = pd.read_csv(pathdict["list_parsed_csv"], sep=",", quoting=csv.QUOTE_NONNUMERIC, index_col=0, low_memory=False)
columns_to_keep = ['organism_domain', 'uniprot_acc', 'uniprot_all_accessions', 'uniprot_entry_name', 'uniprot_features',
'uniprot_orgclass', 'uniprot_SiPe', 'singlepass', 'typeI', 'typeII', 'uniprot_KW', 'organism', 'prot_descr', 'membrane',
'multipass', 'gene_name', 'comments_subcellular_location_uniprot', 'uniprot_SiPe', 'full_seq']
# # for datasets without SP found, turn off analyse_sp
# if analyse_sp == True and 'SP01_start' in df_parsed.columns:
# columns_to_keep = ['SP01_start', 'SP01_end', 'SP01_seq']
# else:
# analyse_sp == False
acc_list_orig = list(df_parsed.index)
if os.path.isfile(TMSEG_fastalike_path):
df_PLS.loc["TMSEG_fastalike_path", :] = ("exists", t)
sys.stdout.write("Extracting topology from TMSEG_fastalike file.")
# DEPRECATED drop the full sequence, and get from TMSEG
#df_parsed.drop('full_seq', axis=1, inplace=True)
# read data from file
# list will have acc, seq, topology, acc, seq, topology etc
input_data = []
with open(TMSEG_fastalike_path) as data_file:
for line in data_file:
line = line.strip()
if line[0] == '>':
line = line[1:]
line = line.split(' ')
line = line[0].split('|')
uniprot_acc = line[0]
input_data.append(uniprot_acc)
else:
input_data.append(line)
# initialise pandas dataframe with uniprot accession as index
df_TMSEG = pd.DataFrame(index=input_data[0::3])
# add the signal peptide definitions from UniProt, to be used for slicing the nonTMD etc later
if analyse_sp:
for col in ['SP01_start', 'SP01_end', 'SP01_seq']:
df_TMSEG[col] = df_parsed[col]
# drop unnecessary columns from df_parsed, to be merged later
df_parsed = df_parsed[columns_to_keep]
# add selected columns from input_data list
#df_TMSEG['uniprot_entry_name'] = input_data[1::5]
#df_TMSEG['prot_descr'] = input_data[2::5]
df_TMSEG['full_seq'] = input_data[1::3]
df_TMSEG['topo'] = input_data[2::3]
acc_list_TMSEG = df_TMSEG.index.tolist()
TMSEG_avail_list = set(acc_list_TMSEG).intersection(set(acc_list_orig))
TMSEG_unavail_list = list(set(acc_list_orig) - set(acc_list_TMSEG))
df_PLS.loc["n_prot_TMSEG_file"] = (len(acc_list_TMSEG), t)
# create a boolean whether the TMSEG topology is available
df_parsed.loc[TMSEG_avail_list,"TMSEG_avail"] = True
df_parsed.loc[TMSEG_unavail_list, "TMSEG_avail"] = False
# drop proteins from df_TMSEG that are not in the listxx_parsed.csv
df_TMSEG = df_TMSEG.loc[TMSEG_avail_list, :]
fa_dir = pathdict['TMSEG_unavail_fa_dir']
utils.make_sure_path_exists(fa_dir)
for acc in TMSEG_unavail_list:
out_fasta = os.path.join(fa_dir, "{}.fasta".format(acc))
seq = df_parsed.loc[acc, "full_seq"]
with open(out_fasta, "w") as f:
f.write(">{}\n{}".format(acc, seq))
n_prot_TMSEG_file_not_in_list = len(set(acc_list_TMSEG) - set(acc_list_orig))
logging.info("n_prot_TMSEG_file_not_in_list as not in listxx_parsed.csv = {} ({} remaining)".format(n_prot_TMSEG_file_not_in_list, len(TMSEG_avail_list)))
df_PLS.loc["n_prot_TMSEG_file_not_in_list"] = (n_prot_TMSEG_file_not_in_list, t)
if df_TMSEG.shape[0] == 0:
return sys.stdout.write('no remaining proteins in list!')
# get list of uniprot accessions of proteins where no transmembrane region was predicted
list_nonTMD = []
for acc in df_TMSEG.index:
if 'N' in df_TMSEG.loc[acc, 'topo']:
list_nonTMD.append(acc)
# write list of nonTM proteins to file
# outpath = '/Volumes/Musik/Databases/TMSEG/humanU90_nonTMD.txt'
file = open(TMSEG_nonTM_outpath, 'w')
for line in list_nonTMD:
file.write('{}\n'.format(line))
file.close()
# drop proteins that do not contain TM regions
df_TMSEG = df_TMSEG.drop(list_nonTMD)
# create a boolean whether the TMSEG topology is available
TMSEG_avail_and_TM = set(TMSEG_avail_list) - set(list_nonTMD)
TMSEG_avail_but_SOL = set(acc_list_orig).intersection(set(list_nonTMD))
df_parsed["membrane"] = np.nan
df_parsed.loc[TMSEG_avail_and_TM, "membrane"] = True
df_parsed.loc[TMSEG_avail_but_SOL, "membrane"] = False
# add seqlen and indices for all TMD and SiPe regions
df_TMSEG["seqlen"] = df_TMSEG.full_seq.apply(lambda x: len(x))
#df_TMSEG['M_indices'] = df_TMSEG.topo.apply(get_list_TM_residues_from_topo_string)
#df_TMSEG['SiPe_indices'] = df_TMSEG.topo.apply(get_list_TM_residues_from_topo_string, args=("S"))
df_TMSEG['TM_indices'] = df_TMSEG.topo.apply(get_TM_indices_from_TMSEG_topo_str)
df_TMSEG['SiPe_indices'] = df_TMSEG.topo.apply(get_TM_indices_from_TMSEG_topo_str, args=("S"))
# # Creating new list (nested list)
# nested_list_of_membrane_borders = []
#
# ########################################################################################
# # #
# # Extract the membrane indices in UniProt Indexing style #
# # #
# ########################################################################################
# # Filling nest with lists of start and end-points
# for m_index_list in df_TMSEG.M_indices:
# m_borders = []
# # add the first membrane index (e.g. 13)
# m_borders.append(m_index_list[0])
# m_borders = korbinian.prot_list.parse_OMPdb.check_for_border(m_index_list, m_borders)
# # add the last membrane index (e.g. 33)
# m_borders.append(m_index_list[-1])
# nested_list_of_membrane_borders.append(m_borders)
#
# # DEPRECATED
# #FOR CONSISTENCY, LEAVE INDEXING STYLE AS UNIPROT
# # ########################################################################################
# # # #
# # # Convert to python indexing style (NECESSARY?? NOT COMPAT WITH UNIPROT!) #
# # # #
# # ########################################################################################
# # array_membrane_borders = np.array(nested_list_of_membrane_borders)
# # nested_list_m_borders_python_indexstyle = []
# # for subarray in array_membrane_borders:
# # # convert to array
# # subarray = np.array(subarray)
# # # add 1 to the second index number, to allow slicing
# # subarray[1::2] = subarray[1::2] + 1
# # # add to list with corrected values, python index style
# # nested_list_m_borders_python_indexstyle.append(list(subarray))
#
# # Creating new column, which contains start and end-points
# #df_TMSEG["Membrane_Borders"] = nested_list_m_borders_python_indexstyle
#
# df_TMSEG["Membrane_Borders"] = nested_list_of_membrane_borders
#
# # Creating new column, which contains the number of TMDS
# #df_TMSEG["number_of_TMDs"] = df_TMSEG.Membrane_Borders.apply(lambda x: len(x) / 2)
#
# df_TMSEG["TM_indices"] = df_TMSEG["Membrane_Borders"].apply(lambda x: tuple(zip(x[::2], x[1::2])))
# create a list of [TM01, TM02, TM03, etc.
long_list_of_TMDs = []
for i in range(1, 50):
long_list_of_TMDs.append("TM{:02d}".format(i))
## for the .set_value function, set dtype as object
df_TMSEG["list_of_TMDs"] = ""
df_TMSEG["list_of_TMDs"].astype(object)
sys.stdout.write('slicing TMD and nonTMD sequences:\n')
for n, acc in enumerate(df_TMSEG.index):
# get nested tuple of TMDs
nested_tup_TMs = df_TMSEG.loc[acc, "TM_indices"]
# slice long list of TMD names to get an appropriate list for that protein [TM01, TM02, TM03, etc.
len_nested_tup_TMs = len(nested_tup_TMs)
list_of_TMDs = long_list_of_TMDs[:len_nested_tup_TMs]
# add that list to the dataframe (could also be added as a stringlist, but that's irritating somehow)
#df_TMSEG.loc[acc, 'list_of_TMDs'] = list_of_TMDs
df_TMSEG.set_value(acc, "list_of_TMDs", list_of_TMDs)
# set seq for slicing
full_seq = df_TMSEG.loc[acc, "full_seq"]
# topo = dft.loc[acc, "Topology"]
# iterate through all the TMDs of that protein, slicing out the sequences
for i, TMD in enumerate(list_of_TMDs):
TMD = list_of_TMDs[i]
start, end = nested_tup_TMs[i]
# with UniProt indexing, need to slice with -1, not like python index style
df_TMSEG.loc[acc, "%s_start" % TMD] = start
df_TMSEG.loc[acc, "%s_end" % TMD] = end
# for python indexing of the TMD rather than uniprot, the start should be minus 1
python_indexing_tuple = (start - 1, end)
df_TMSEG.loc[acc, "%s_seq" % TMD] = utils.slice_with_listlike(full_seq, python_indexing_tuple)
df_TMSEG.loc[acc, "%s_seqlen" % TMD] = len(df_TMSEG.loc[acc, "%s_seq" % TMD])
# dft.loc[acc, TMD + "_top"] = utils.slice_with_listlike(topo, tup)
#DEPRECATED, ONLY REINSTATE IF YOU REALLY WANT TMSEG SP DEFINITIONS TO STAY
# # add signal peptides and their corresponding values to list_of_TMDs
# if analyse_sp == True:
# if type(df_parsed.loc[acc, 'SP01_seq']) == str:
# list_of_TMDs.append('SP01')
# df_TMSEG.set_value(acc, "list_of_TMDs", list_of_TMDs)
# # code necessary for TMSEG signal peptides - depreciated by MO 20.04.2017
# SiPe_indices = df_TMSEG.loc[acc, 'SiPe_indices']
# if SiPe_indices != []:
# df_TMSEG.loc[acc, 'SP01_start'] = SiPe_indices[0]
# df_TMSEG.loc[acc, 'SP01_end'] = SiPe_indices[-1]
# df_TMSEG.loc[acc, 'SP01_seq'] = full_seq[SiPe_indices[0]:SiPe_indices[-1]+1]
# list_of_TMDs.append('SP01')
# df_TMSEG.set_value(acc, "list_of_TMDs", list_of_TMDs)
if n % 50 == 0 and n != 0:
sys.stdout.write(". ")
sys.stdout.flush()
if n % 500 == 0:
sys.stdout.write("\n")
sys.stdout.flush()
# slice out the nonTM segments with a function
# note that for some reason, this is very slow after merging the dataframes
df_TMSEG = slice_nonTMD_in_prot_list(df_TMSEG)
#df_TOP = pd.merge(df_parsed, df_TMSEG, how="left", left_on=True, suffixes=('_list_parsed', ""))# left_index=True, right_index=False,
df_TOP = df_parsed.merge(df_TMSEG, how="left", suffixes=('_list_parsed', "")) # left_index=True, right_index=False,
# actually, I'd prefer to keep these for troubleshooting purposes
# cols_to_drop = ['M_indices', 'SiPe_indices', 'Membrane_Borders', 'TM_indices']
# df_TMSEG.drop(cols_to_drop, axis=1, inplace=True)
elif os.path.isfile(TMSEG_top_txtoutput_path):
df_PLS.loc["TMSEG_top_txtoutput_path", :] = ("exists", t)
""" PARSE DATA WITH THE FOLLOWING FORMAT, proteins listed one after each other
IMPORTANT : this format is sub-optimal, because the sequences come from uniprot, and the predictions from TMPRED
Can only be trusted when they are from the same date: best to use TMPRED output which also contains the orig sequence.
---
ID: A4ZUB1
# TRANSMEM 6 18 4
# TRANSMEM 50 67 7
SIG: SIGNAL 1 22 {ECO:0000255}.
TMH: TRANSMEM 53 69 Helical. {ECO:0000255}.
---
"""
# if the regions column in the lists tab is "TM01" instead of the usual "TM", take only the first TM
take_only_the_first_TM = s["regions"] == "TM01"
# create dataframe for text topology (dftt)
dftt = pd.DataFrame()
with open(TMSEG_top_txtoutput_path, "r") as f:
acc_counter = 0
for line in f:
if line[0:4] == "ID: ":
acc = line.split(" ")[1].strip("\n")
dftt.loc[acc_counter, "acc"] = acc
acc_counter += 1
# reset the TM_counter
TM_counter = 1
if line[0:10] == "# TRANSMEM":
if TM_counter > 1:
if take_only_the_first_TM:
# skip to next line, as the first TM is already taken
continue
# split by tab
split = line.split("\t")
# the start is split[1] (end is not really necessary here)
start = split[1]
# note that acc_counter += 1 is already + 1 for the next protein,
# therefore the dftt.loc is acc_counter-1
dftt.loc[acc_counter - 1, "TM{:02d}_start".format(TM_counter)] = start
end = split[2]
# note that acc_counter += 1 is already + 1 for the next protein,
# therefore the dftt.loc is acc_counter-1
dftt.loc[acc_counter - 1, "TM{:02d}_end".format(TM_counter)] = end
TM_counter += 1
# add an extra number_of_TMDs column, so they can be counted consistently
dftt["number_of_TMDs"] = 0
for row in dftt.index:
# drop TM02_start etc if they don't contain data
subset = dftt.loc[row, :].dropna()
# count columns
n_cols = subset.shape[0]
# calculate number of columns (TM01_start, TM01_end) /2, which is the number of TMDs
number_of_TMDs = int((n_cols - 2) / 2)
dftt.loc[row, "number_of_TMDs"] = number_of_TMDs
dftt.loc[row, "list_of_TMDs"] = str(["TM{:02d}".format(n) for n in range(1, number_of_TMDs + 1)])
# set the acc as the index, so it can be merged with df_parsed
dftt.set_index("acc", drop=False, inplace=True)
# save temp csv with TMSEG output
TMSEG_txtoutput_parsed_csv = TMSEG_top_txtoutput_path[:-4] + "TMSEG_txtoutput_parsed.csv"
dftt.to_csv(TMSEG_txtoutput_parsed_csv)
df = pd.merge(dftt, df_parsed, left_index=True, right_index=True, suffixes=('', '_list_parsed'))
# convert from string to python list
if isinstance(df['list_of_TMDs'][0], str):
df['list_of_TMDs'] = df['list_of_TMDs'].dropna().apply(lambda x: ast.literal_eval(x))
# (re)define sequence length
df["seqlen"] = df["full_seq"].str.len()
# slice out all the TMD sequences
for n, acc in enumerate(df.index):
list_of_TMDs = df.loc[acc, "list_of_TMDs"]
# add that list to the dataframe (could also be added as a stringlist, but that's irritating somehow)
# set seq for slicing
full_seq = df.loc[acc, "full_seq"]
# iterate through all the TMDs of that protein, slicing out the sequences
for i in range(len(list_of_TMDs)):
TMD = list_of_TMDs[i]
tuple_slice_indices = (df.loc[acc, "%s_start" % TMD], df.loc[acc, "%s_end" % TMD])
df.loc[acc, "%s_seq" % TMD] = utils.slice_with_listlike(full_seq, tuple_slice_indices)
df.loc[acc, "%s_seqlen" % TMD] = len(df.loc[acc, "%s_seq" % TMD])
# add signal peptides and their corresponding values to list_of_TMDs
if analyse_sp == True:
if type(df_parsed.loc[acc, 'SP01_seq']) == str:
list_of_TMDs.append('SP01')
df.set_value(acc, "list_of_TMDs", list_of_TMDs)
start = time.clock()
# slice out the nonTM segments with a function
# note that for some reason, this is very slow after merging the dataframes
df_TOP = slice_nonTMD_in_prot_list(df)
sys.stdout.write("\ntime taken : {:0.03f} s".format(time.clock() - start))
else:
raise FileNotFoundError("None of the TMSEG combined output files were found.")
# define number of TMDs (includes Signal peptides!)
df_TOP["number_of_TMDs"] = df_TOP["list_of_TMDs"].dropna().apply(lambda x : len(x))
df_TOP['parse_TMSEG'] = True
df_TOP.to_csv(pathdict["list_parsed_csv"], sep=",", quoting=csv.QUOTE_NONNUMERIC)
logging.info("\n~~~~~~~~~~~~ parse_TMSEG_results_DEPRECATED is finished ~~~~~~~~~~~~")
# def get_signal_peptide_indices_from_TMSEG_topo(Topo_data):
# # as above for membrane regions
# sp_list = [i for i, topo in enumerate(Topo_data) if topo == "S"] # find(Topo_data)
# return sp_list
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Swets NDVI filtering
author: Laust Færch @ DHI GRAS
Created on 2020/08/29
Based on the article:
Swets, D.L, Reed, B.C., Rowland, J.D., Marko, S.E., 1999. A weighted least-squares approach to temporal
NDVI smoothing. In: Proceedings of the 1999 ASPRS Annual Conference, Portland, Oregon, pp. 526-536.
"""
import numpy as np
from numba import jit
from scipy import interpolate
from scipy.ndimage.filters import generic_filter
def _interpolate1d(data):
# we need at least 2 non-nan elements
if np.sum(~np.isnan(data)*1) < 2:
return data
good = ~np.isnan(data)
# scipy interpolation
finterp = interpolate.interp1d(np.flatnonzero(good),
data[good], kind='linear',
fill_value=np.nan,
bounds_error=False)
yinterp = finterp(np.arange(data.shape[0]))
return yinterp
# calculate the weight of each sample based on neighbours
def _calc_weights(y):
# for class local_peak, sloping_points, local_valley
class_weights = [1.5, 0.5, 0.005] # weights defined in article
left_shift = (y - np.roll(y, -1)) >= 0
right_shift = (y - np.roll(y, 1)) >= 0
peaks = left_shift & right_shift
valleys = (~left_shift) & (~right_shift)
slopes = (~peaks) & (~valleys)
weights = np.zeros_like(y)
weights[peaks] = class_weights[0]
weights[slopes] = class_weights[1]
weights[valleys] = class_weights[2]
return weights
# calculate the weighted linear regression
@jit(nopython=True)
def _calc_linreg(x, y, w):
eps = 1e-8
sw = np.sum(w)
sy = np.sum(w * y)
sx = np.sum(w * x)
sxy = np.sum(w * x * y)
sx2 = np.sum(w * x ** 2)
num = (sw * sxy - sx * sy)
denom = (sw * sx2 - sx ** 2)
if denom == 0:
b = 0
else:
b = num / denom
a = (sy - b * sx) / (sw + eps)
return a, b
@jit(nopython=True)
def _calc_linreg_wrapper_a(xyw):
n = int(np.round(xyw.shape[0] / 3))
xyw = xyw.reshape(3, n)
a, b = _calc_linreg(xyw[0, :], xyw[1, :], xyw[2, :])
return a
@jit(nopython=True)
def _calc_linreg_wrapper_b(xyw):
n = int(np.round(xyw.shape[0] / 3))
xyw = xyw.reshape(3, n)
a, b = _calc_linreg(xyw[0, :], xyw[1, :], xyw[2, :])
return b
def _piecewise_linreg(xyw, window_width=3):
n = int(np.round((window_width - 1) / 2))
piece_a = generic_filter(xyw.T, _calc_linreg_wrapper_a, size=(3, window_width), mode='nearest')
piece_b = generic_filter(xyw.T, _calc_linreg_wrapper_b, size=(3, window_width), mode='nearest')
# pad array
piece_a = np.pad(piece_a[1, :], n, 'edge')
piece_b = np.pad(piece_b[1, :], n, 'edge')
smooth_a = np.convolve(piece_a, np.ones(window_width) / window_width, mode='valid')
smooth_b = np.convolve(piece_b, np.ones(window_width) / window_width, mode='valid')
y_est = smooth_b * xyw[:, 0] + smooth_a
return y_est
# Apply the swets filter on 1d array
def _apply_swets1d(y):
window_width = 3 # window width
# dont smooth if all nan
if np.all(np.isnan(y)):
return y
y_smoothed = np.zeros_like(y)
y_smoothed[:] = np.nan
x = np.flatnonzero(~np.isnan(y))
w = _calc_weights(y[x])
xyw = np.stack((x, y[x], w), axis=1)
y_smoothed[x] = _piecewise_linreg(xyw, window_width)
return y_smoothed
# slow: 3024x3024x7 array takes approx 1 hour on my machine
def swets_filter(data, do_interpolate=True, axis=2, invert=False):
"""
:param data: np.array((y,x,t))
NDVI raster timeseries. Each image in timeseries is shape y,x. Images are stacked along t (time dimension)
nan values are initially ignored by swets. At the last step, nan values are replaced by linear interpolation
:param do_interpolate: bool
True if we want to apply interpolation of nan values
:param axis: int
Axis for the time-dimension in the array. Filtering/interpolation will be apply along this axis
:param invert: bool
Inversion of the data and output. This is useful for albedo where bad clous maksing will force the values up,
instead of down in NDVI.
:return: y_smoothed: np.array(y,x,t)
"""
if invert:
data = data * -1
print('running filter...')
y_smoothed = np.apply_along_axis(_apply_swets1d, axis, data)
if do_interpolate:
print('running interpolation...')
y_smoothed = np.apply_along_axis(_interpolate1d, axis, y_smoothed)
if invert:
y_smoothed = y_smoothed * -1
return y_smoothed.astype(data.dtype)
|
nilq/baby-python
|
python
|
import json
from django import forms
from commons.file import file_utils
from commons import file_name_tools
from wikis.attachments import attachment_tool
from django.core.exceptions import ValidationError
from datetime import datetime
from uzuwiki.settings_static_file_engine import MAX_FILE_SIZE, MAX_FILE_SIZE_MESSAGE
from logging import getLogger
logger = getLogger(__name__)
def validate_attachment_file_size(file):
# ページサイズが巨大過ぎないかチェック
if file.size > MAX_FILE_SIZE:
raise ValidationError(MAX_FILE_SIZE_MESSAGE)
class FileUploadFileForm(forms.Form):
wiki_id = forms.CharField(label="WikiID")
page_name = forms.CharField(label="ページ名")
file = forms.FileField(label="ファイル", validators=[validate_attachment_file_size])
def __init__(self, wiki_id, page_dirs, **kwargs):
super().__init__(**kwargs)
self.fields['wiki_id'].initial = wiki_id
self.fields['page_name'].initial = file_name_tools.page_dirs_to_page_name(page_dirs)
def put(self, request):
file = request.FILES['file']
# ページのファイルパスを取得
file_name = file_name_tools.page_name_to_file_name(self.data["page_name"])
# 添付ファイル一覧を取得
attachment_file_data = attachment_tool.get_or_new(self.data["wiki_id"], file_name)
# 添付ファイルを保存する。
record = file_utils.put_static_file(self.data["wiki_id"], file.name, file)
attachment_file_data["attachments"].append(record)
timestamp = datetime.now().isoformat()
if "created_at" not in attachment_file_data:
attachment_file_data["created_at"] = timestamp
attachment_file_data["updated_at"] = timestamp
# 添付ファイルを保存する。
file_utils.put_file(self.data["wiki_id"], file_name + ".attachments.json", json.dumps(attachment_file_data))
class Meta:
fields = ("wiki_id", "page_name", "file")
|
nilq/baby-python
|
python
|
# Jared Dyreson
# CPSC 386-01
# 2021-11-29
# jareddyreson@csu.fullerton.edu
# @JaredDyreson
#
# Lab 00-04
#
# Some filler text
#
"""
This module contains a basic "factory" pattern for generating new Display instances
"""
import abc
import dataclasses
import functools
import json
import pathlib
import pygame
import sys
import time
import typing
from datetime import datetime
from Invaders.UI.button import Button
from Invaders.Dataclasses.direction import Direction
from Invaders.Dataclasses.player import Player
from Invaders.Dataclasses.point import Point
class Display:
"""
Not fully virtual class for each display to
inherit from
"""
def __init__(
self, width: int = 900, height: int = 900, color=pygame.Color("black")
):
# Checks for errors encountered
_, num_fail = pygame.init()
if num_fail > 0:
print(f"[FATAL] There were {num_fail} error(s) produced!")
sys.exit(-1)
else:
print("[+] Game successfully initialised")
pygame.font.init()
self.width, self.height = width, height
self._display_surface = pygame.display.set_mode(
(self.width, self.height), pygame.HWSURFACE
)
self.last_position = Point(-1, -1)
self.background_color = color
self.fps_meter = pygame.time.Clock()
@abc.abstractmethod
def draw(self):
"""
Abstract draw class that must be implemented
"""
raise NotImplementedError(
f"Display.draw isn abstract method and should not be invoked directly"
)
def get_surface(self) -> pygame.Surface:
"""
Obtain the current display surface
to a given window
@return - pygame.Surface
"""
return self._display_surface
def clear_text(self) -> None:
"""
This removes all text from the screen
"""
self._display_surface.fill(self.background_color)
def draw_image(self, img_object: pygame.Surface, position: Point) -> None:
"""
Draw an image object (in the form of a surface) to the screen
at a given position
@param img_object : currently loaded pygame surface that represents an image
@param position : Cartesian coordinates that represent where on the screen to be drawn to
"""
self._display_surface.blit(img_object, dataclasses.astuple(position))
def write_text(
self, text: str, position: Point, font, color=pygame.Color("white")
) -> None:
"""
Write text to the screen, thanks to @NICE
for helping with this!
@param text - stuff we want to write to the screen
@param position - where on the screen should it be writing to
@param font - current font used
@param color - selected color
"""
lines = [line.split(" ") for line in text.splitlines()]
space = font.size(" ")[0]
x, y = dataclasses.astuple(position)
self.last_position = position
for line in lines:
for word in line:
word_surface = font.render(word, 0, color)
width, height = word_surface.get_size()
if x + width >= self.width + 100:
x = position.x
y += height
self._display_surface.blit(word_surface, (x, y))
x += width + space
x = position.x
y += height
def center(self) -> Point:
"""
Obtain the center of the current scene
@return Point
"""
return Point(self.width // 4, self.height // 4)
class HighScoreDisplay(Display):
"""
Class that represents the high score display
"""
def __init__(self, current_score: int, username: str):
super().__init__()
self.title_position = Point(250, 45)
self.logo_position = Point(575, 435)
self.break_from_draw = False
self.back_button = Button(
self._display_surface,
Point(300, 575),
300,
50,
"Quit",
functools.partial(self.terminate_intro),
)
self.scoreboard_file = pathlib.Path("scores/scoreboard.json")
self.scores = self.obtain_high_score_list(self.scoreboard_file)
self.scores.append(
Player(username, current_score,
datetime.now().strftime("%m/%d/%Y %H:%M"))
)
self.scores = sorted(self.scores, reverse=True)
def obtain_high_score_list(self, path: pathlib.Path) -> typing.List[Player]:
"""
Read in high score list found in a json file
that is then loaded and sorted by the score obtained
by a given player
@param path - path to JSON file
@return - typing.List[Player]
"""
with open(path, "r") as fp:
contents = json.load(fp)
return [Player(**element) for element in contents["players"]]
def terminate_intro(self):
"""
This terminates the current scene
"""
self.break_from_draw = True
self._display_surface.fill(self.background_color)
master = {"players": []}
for score in self.scores:
master["players"].append(dataclasses.asdict(score))
with open(self.scoreboard_file, "w") as fp:
json.dump(master, fp)
pygame.quit()
sys.exit()
def draw(self):
"""
Draw all the high scores in a row like
manner
"""
draw_loop = True
while draw_loop and not self.break_from_draw:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.terminate_intro()
self.write_text(
f"HIGH SCORES", self.title_position, pygame.font.SysFont(
None, 50)
)
self.write_text(
self.back_button.contents,
self.back_button.center(),
pygame.font.SysFont(None, 30),
)
self.back_button.draw()
for i, score in enumerate(self.scores[0:5]):
x, y = dataclasses.astuple(self.center())
self.write_text(
score.name,
Point((x - 50), y + i * 50),
pygame.font.SysFont(None, 33),
)
self.write_text(
str(score.score),
Point((x - 50) + 200, y + i * 50),
pygame.font.SysFont(None, 33),
)
self.write_text(
score.tod,
Point((x - 50) + 400, y + i * 50),
pygame.font.SysFont(None, 33),
)
pygame.display.flip()
|
nilq/baby-python
|
python
|
from intent_parser.server.intent_parser_server import app, IntentParserServer
import os
import logging.config
import os
logger = logging.getLogger(__name__)
def _setup_logging():
logging.basicConfig(level=logging.INFO,
format="[%(levelname)-8s] %(asctime)-24s %(filename)-23s line:%(lineno)-4s %(message)s")
logger.addHandler(logging.FileHandler('intent_parser_server.log'))
# Switch flask to production mode using WSGI
def run():
app.config['DEBUG'] = False
_setup_logging()
intent_parser_server = IntentParserServer(os.environ.get("SBH_USERNAME"),
os.environ.get("SBH_PASSWORD"),
os.environ.get("AUTHN"),
'')
intent_parser_server.initialize()
return app
my_app = run()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 10 21:32:34 2019
@author: teddy
"""
from docx import Document
from docx.shared import RGBColor
#from docx.dml.color import ColorFormat
def getText(filename):
doc = Document(filename)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
hidden = '\n'.join(fullText)
# hidden_word = list(hidden.split('\n'))
document = Document()
run = document.add_paragraph().add_run(hidden)
font = run.font
font.color.rgb = RGBColor(0, 0, 0)
run = document.add_paragraph().add_run('Warning! Keyword stuffing!')
font = run.font
font.color.rgb = RGBColor(255, 0, 0)
document.save('Show_All.docx')
return 'Done!'
|
nilq/baby-python
|
python
|
########################################################
# Copyright (c) 2015-2017 by European Commission. #
# All Rights Reserved. #
########################################################
extends("BaseKPI.py")
"""
Consumption (Wh)
------------------
Indexed by
* scope
* delivery point
* energy (including fuels)
* test case
* technology
* asset name
Return the annual volumes of energy demand for a given technology or contract.
We here consider the flexible demand after optimization (using the consumption of the corresponding assets).
"""
TECHNO_TO_CONSIDER = DEMAND_TYPES|PRODUCTION_TYPES|{F_GAS_CONSUMPTION}
def computeIndicator(context, indexFilter, paramsIndicator, kpiDict):
timeStepDuration = getTimeStepDurationInHours(context)
selectedScopes = indexFilter.filterIndexList(0, getScopes())
selectedDeliveryPoints = indexFilter.filterIndexList(1, getDeliveryPoints(context))
selectedEnergies = indexFilter.filterIndexList(2, getEnergies(context, includedEnergies=PRODUCED_ENERGIES))
selectedTestCases = indexFilter.filterIndexList(3, context.getResultsIndexSet())
selectedTechnologies = indexFilter.filterIndexList(4, getTechnologies(context, includeFinancialAssetTypes=True, includedTechnologies=TECHNO_TO_CONSIDER))
selectedAssets = indexFilter.filterIndexList(5, getAssets(context, includeFinancialAssets=True, includedTechnologies=TECHNO_TO_CONSIDER))
selectedAssetsByScope = getAssetsByScope(context, selectedScopes, includeFinancialAssets=True, includedAssetsName=selectedAssets, includedTechnologies = selectedTechnologies)
consumptionDict = getConsumptionDict(context, selectedScopes, selectedTestCases, selectedEnergies, selectedDeliveryPoints, selectedAssetsByScope, indexByAsset=True)
for index in consumptionDict:
kpiDict[index] = consumptionDict[index].getSumValue() * timeStepDuration * MW_TO_W_CONVERSION
return kpiDict
def get_indexing(context) :
baseIndexList = [getScopesIndexing(), getDeliveryPointsIndexing(context), getEnergiesIndexing(context, includedEnergies=PRODUCED_ENERGIES), getTestCasesIndexing(context),
getTechnologiesIndexing(context, includeFinancialAssetTypes=True, includedTechnologies=TECHNO_TO_CONSIDER),
getAssetsIndexing(context, includeFinancialAssets=True, includedTechnologies=TECHNO_TO_CONSIDER)]
return baseIndexList
IndicatorLabel = "Consumption"
IndicatorUnit = "Wh"
IndicatorDeltaUnit = "Wh"
IndicatorDescription = "Total consumption per technology"
IndicatorParameters = []
IndicatorIcon = ""
IndicatorCategory = "Results"
IndicatorTags = " Power System, Gas System, Power Markets "
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
##########################################################################
ZipMe : GAE Content Downloader
##########################################################################
Just add this lines in your app.yaml :
- url: /zipme
script: zipme.py
##########################################################################
""" # manatlan
#from google.appengine.ext import webapp
#from google.appengine.ext.webapp.util import run_wsgi_app
import webapp2 as webapp
from google.appengine.api import users
#import wsgiref.handlers
import zipfile
import datetime
import os,re,sys,stat
from cStringIO import StringIO
def createZip(path):
def walktree (top = ".", depthfirst = True):
names = os.listdir(top)
if not depthfirst:
yield top, names
for name in names:
try:
st = os.lstat(os.path.join(top, name))
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
for (newtop, children) in walktree (os.path.join(top, name),
depthfirst):
yield newtop, children
if depthfirst:
yield top, names
list=[]
for (basepath, children) in walktree(path,False):
for child in children:
f=os.path.join(basepath,child)
if os.path.isfile(f):
f = f.encode(sys.getfilesystemencoding())
list.append( f )
f=StringIO()
file = zipfile.ZipFile(f, "w")
for fname in list:
nfname=os.path.join(os.path.basename(path),fname[len(path)+1:])
file.write(fname, nfname , zipfile.ZIP_DEFLATED)
file.close()
f.seek(0)
return f
class ZipMaker(webapp.RequestHandler):
def get(self):
if users.is_current_user_admin():
folder = os.path.dirname(__file__)
self.response.headers['Cache-Control'] = 'public, max-age=60'
# self.response.headers['Last-Modified'] = lastmod.strftime("%a, %d %b %Y %H:%M:%S GMT")
expires = datetime.datetime.now() + datetime.timedelta(minutes=1)
self.response.headers['Expires'] = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
ffdate = datetime.datetime.now()
fdate = ffdate.strftime("%d-%b-%Y_%H-%M-%S")
self.response.headers['Content-Type'] ='application/zip; name="zipme_%s_%s.zip"' % (fdate, os.path.basename(folder))
self.response.headers['Content-Disposition'] = 'attachment; filename="zipme_%s_%s.zip"' % (fdate, os.path.basename(folder))
fid=createZip(folder)
while True:
buf=fid.read(2048)
if buf=="": break
self.response.out.write(buf)
fid.close()
else:
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write("<a href=\"%s\">You must be admin</a>." %
users.create_login_url("/zipme"))
#def main():
app = webapp.WSGIApplication(
[('/zipme', ZipMaker)],
debug=False)
# wsgiref.handlers.CGIHandler().run(application)
# run_wsgi_app(application)
#if __name__ == "__main__":
# main()
|
nilq/baby-python
|
python
|
""" Models for mongo database """
# from pymongo.write_concern import WriteConcern
from pymodm import MongoModel, fields
class Testing(MongoModel):
onefield = fields.CharField()
# NOTE: do not touch connection here, see experiments/mongo.py
# class Meta:
# connection_alias = 'test'
# # write_concern = WriteConcern(j=True)
# FIXME: two fields are missing in ExternalAccounts
class wf_do(MongoModel):
dc_identifier = fields.CharField()
dc_title = fields.CharField()
dc_subject = fields.CharField()
dc_creator = fields.CharField()
dc_contributor = fields.CharField()
dc_publisher = fields.CharField()
dc_type = fields.CharField()
dc_format = fields.CharField()
dc_date = fields.DateTimeField()
dc_coverage_x = fields.FloatField()
dc_coverage_y = fields.FloatField()
dc_coverage_z = fields.FloatField()
dc_coverage_t_min = fields.DateTimeField()
dc_coverage_t_max = fields.DateTimeField()
dcterms_available = fields.DateTimeField()
dcterms_dateAccepted = fields.DateTimeField()
dc_rights = fields.CharField()
dcterms_isPartOf = fields.CharField()
fileId = fields.CharField()
irods_path = fields.CharField()
# class Meta:
# write_concern = WriteConcern(j=True)
# connection_alias = MYDB
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.1 on 2019-05-10 22:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wells', '0082_auto_20190510_0000'),
('wells', '0082_merge_20190510_1926'),
]
operations = [
]
|
nilq/baby-python
|
python
|
from unittest import TestCase
from semanticpy.vector_space import VectorSpace
from nose.tools import *
class TestSemanticPy(TestCase):
def setUp(self):
self.documents = ["The cat in the hat disabled", "A cat is a fine pet ponies.", "Dogs and cats make good pets.","I haven't got a hat."]
def it_should_search_test(self):
vectorSpace = VectorSpace(self.documents)
eq_(vectorSpace.search(["cat"]), [0.14487566959813258, 0.1223402602604157, 0.07795622058966725, 0.05586504042763477])
def it_should_find_return_similarity_rating_test(self):
vectorSpace = VectorSpace(self.documents)
eq_(vectorSpace.related(0), [1.0, 0.9922455760198575, 0.08122814162371816, 0.0762173599906487])
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
from yices import *
cfg = Config()
cfg.default_config_for_logic('QF_BV')
ctx = Context(cfg)
bv32_t = Types.bv_type(32)
x = Terms.new_uninterpreted_term(bv32_t, 'x')
y = Terms.new_uninterpreted_term(bv32_t, 'y')
zero = Terms.bvconst_integer(32, 0)
fmla0 = Terms.bvsgt_atom(x, zero)
fmla1 = Terms.bvsgt_atom(y, zero)
fmla2 = Terms.bvslt_atom(Terms.bvadd(x, y), x)
ctx.assert_formulas([fmla0, fmla1, fmla2])
status = ctx.check_context()
if status == Status.SAT:
model = Model.from_context(ctx, 1)
model_string = model.to_string(80, 100, 0)
print(model_string)
xval = model.get_value(x)
yval = model.get_value(y)
print('x = {0}\ny = {1}'.format(xval, yval))
cfg.dispose()
ctx.dispose()
Yices.exit()
|
nilq/baby-python
|
python
|
"""A virtual pumpkin which flash neopixels and play sound"""
import random
import math
import time
import board
import digitalio
import audioio
import busio
import adafruit_vl53l0x
import adafruit_thermistor
import neopixel
#########################
# -- slide switch to enable/disable running loop
slide_switch = digitalio.DigitalInOut(board.SLIDE_SWITCH)
#########################
# -- Audio setup
spkren = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
spkren.switch_to_output()
spkren.value = 0
audioout = audioio.AudioOut(board.SPEAKER)
laugh1 = audioio.WaveFile(open("laugh1.wav", "rb"))
laugh2 = audioio.WaveFile(open("laugh2.wav", "rb"))
laughs = [laugh1, laugh2]
music1 = audioio.WaveFile(open("thriller16k.wav", "rb"))
music2 = audioio.WaveFile(open("ghostbusters16k.wav", "rb"))
musics = [music1, music2]
# -- intialise random generator
temp = adafruit_thermistor.Thermistor(board.TEMPERATURE,
10000, 10000, 25, 3950)
seed = int(math.modf(temp.temperature)[0]*10000000)
random.seed(seed)
#########################
# -- Distance sensor
i2c = busio.I2C(board.SCL, board.SDA)
vl53 = adafruit_vl53l0x.VL53L0X(i2c)
#########################
# -- neopixels
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10)
ORANGE = (255, 75, 0)
COLORS = [(0, 0, 0), (255, 0, 0), (255, 255, 0), (0, 255, 0), (0, 255, 255),
(0, 0, 255), (255, 0, 255), (255, 255, 255)]
MAXBRIGHT = 0.7
pixels.brightness = 0.0
pixels.fill(ORANGE)
#########################
# -- animation 1
def anim1(ao):
pixels.fill(ORANGE)
while not ao or ao.playing:
pixels.brightness = MAXBRIGHT
time.sleep(0.15)
pixels.brightness = 0.0
time.sleep(0.2)
def anim2(ao):
pixels.fill(COLORS[0])
while not ao or ao.playing:
pix1 = random.randrange(10)
pix2 = random.randrange(10)
while pix2 == pix1:
pix2 = random.randrange(10)
pix3 = random.randrange(10)
while pix3 == pix1 or pix3 == pix2:
pix3 = random.randrange(10)
pixels[pix1] = COLORS[random.randrange(1, 8)]
pixels[pix2] = COLORS[random.randrange(1, 8)]
pixels[pix3] = COLORS[random.randrange(1, 8)]
pixels.brightness = MAXBRIGHT
time.sleep(0.2)
pixels.brightness = MAXBRIGHT
pixels[pix1] = COLORS[0]
pixels[pix2] = COLORS[0]
pixels[pix3] = COLORS[0]
#########################
# -- Main loop
def pumpkin():
# -- Wait for trigger
print("WAITING TRIGGER")
distance = 1000000
while distance > 1000:
time.sleep(1)
distance = vl53.range
print("Distance: ", distance)
random.randrange(5)
# -- Play random laugh
laugh = random.randrange(len(laughs))
print("laugh: ", laugh)
audioout.play(laughs[laugh])
anim1(audioout)
# -- Play random music
music = random.randrange(len(musics))
print("music: ", music)
audioout.play(musics[music])
anim2(audioout)
print("completed")
time.sleep(10)
while slide_switch.value:
pumpkin()
|
nilq/baby-python
|
python
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import json
import ctypes as ct
from .._constants import VSCODE_CREDENTIALS_SECTION
def _c_str(string):
return ct.c_char_p(string.encode("utf-8"))
class _SECRET_SCHEMA_ATTRIBUTE(ct.Structure):
_fields_ = [
("name", ct.c_char_p),
("type", ct.c_uint),
]
class _SECRET_SCHEMA(ct.Structure):
_fields_ = [
("name", ct.c_char_p),
("flags", ct.c_uint),
("attributes", _SECRET_SCHEMA_ATTRIBUTE * 2),
]
_PSECRET_SCHEMA = ct.POINTER(_SECRET_SCHEMA)
try:
_libsecret = ct.cdll.LoadLibrary("libsecret-1.so.0")
_libsecret.secret_password_lookup_sync.argtypes = [
ct.c_void_p,
ct.c_void_p,
ct.c_void_p,
ct.c_char_p,
ct.c_char_p,
ct.c_char_p,
ct.c_char_p,
ct.c_void_p,
]
_libsecret.secret_password_lookup_sync.restype = ct.c_char_p
_libsecret.secret_password_free.argtypes = [ct.c_char_p]
except OSError:
_libsecret = None
def _get_user_settings_path():
app_data_folder = os.environ["HOME"]
return os.path.join(app_data_folder, ".config", "Code", "User", "settings.json")
def _get_user_settings():
path = _get_user_settings_path()
try:
with open(path) as file:
data = json.load(file)
environment_name = data.get("azure.cloud", "Azure")
return environment_name
except IOError:
return "Azure"
def _get_refresh_token(service_name, account_name):
if not _libsecret:
return None
err = ct.c_int()
attributes = [_SECRET_SCHEMA_ATTRIBUTE(_c_str("service"), 0), _SECRET_SCHEMA_ATTRIBUTE(_c_str("account"), 0)]
pattributes = (_SECRET_SCHEMA_ATTRIBUTE * 2)(*attributes)
schema = _SECRET_SCHEMA()
pschema = _PSECRET_SCHEMA(schema)
ct.memset(pschema, 0, ct.sizeof(schema))
schema.name = _c_str("org.freedesktop.Secret.Generic") # pylint: disable=attribute-defined-outside-init
schema.flags = 2 # pylint: disable=attribute-defined-outside-init
schema.attributes = pattributes # pylint: disable=attribute-defined-outside-init
p_str = _libsecret.secret_password_lookup_sync(
pschema,
None,
ct.byref(err),
_c_str("service"),
_c_str(service_name),
_c_str("account"),
_c_str(account_name),
None,
)
if err.value == 0:
return p_str.decode("utf-8")
return None
def get_credentials():
try:
environment_name = _get_user_settings()
credentials = _get_refresh_token(VSCODE_CREDENTIALS_SECTION, environment_name)
return credentials
except Exception: # pylint: disable=broad-except
return None
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Northwestern University.
#
# invenio-subjects-mesh is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""MeSH subjects_mesh.yaml writer."""
from pathlib import Path
import yaml
def write_yaml(
entries,
filepath=Path(__file__).parent / "vocabularies/subjects_mesh.yaml"
):
"""Write the MeSH yaml file.
Return filepath to written file.
"""
with open(filepath, "w") as f:
yaml.dump(list(entries), f)
return filepath
|
nilq/baby-python
|
python
|
# Copyright (c) 2013 eBay Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The QoS Specs Implementation"""
from oslo_db import exception as db_exc
from oslo_log import log as logging
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.i18n import _, _LE, _LW
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
CONTROL_LOCATION = ['front-end', 'back-end', 'both']
def create(context, name, specs=None):
"""Creates qos_specs.
:param specs dictionary that contains specifications for QoS
e.g. {'consumer': 'front-end',
'total_iops_sec': 1000,
'total_bytes_sec': 1024000}
"""
consumer = specs.get('consumer')
if consumer:
# If we need to modify specs, copy so we don't cause unintended
# consequences for the caller
specs = specs.copy()
del specs['consumer']
values = dict(name=name, consumer=consumer, specs=specs)
LOG.debug("Dict for qos_specs: %s", values)
qos_spec = objects.QualityOfServiceSpecs(context, **values)
qos_spec.create()
return qos_spec
def update(context, qos_specs_id, specs):
"""Update qos specs.
:param specs: dictionary that contains key/value pairs for updating
existing specs.
e.g. {'consumer': 'front-end',
'total_iops_sec': 500,
'total_bytes_sec': 512000,}
"""
LOG.debug('qos_specs.update(): specs %s' % specs)
try:
qos_spec = objects.QualityOfServiceSpecs.get_by_id(context,
qos_specs_id)
if 'consumer' in specs:
qos_spec.consumer = specs['consumer']
# If we need to modify specs, copy so we don't cause unintended
# consequences for the caller
specs = specs.copy()
del specs['consumer']
# Update any values in specs dict
qos_spec.specs.update(specs)
qos_spec.save()
except db_exc.DBError:
LOG.exception(_LE('DB error:'))
raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id,
qos_specs=specs)
return qos_spec
def delete(context, qos_specs_id, force=False):
"""Marks qos specs as deleted.
'force' parameter is a flag to determine whether should destroy
should continue when there were entities associated with the qos specs.
force=True indicates caller would like to mark qos specs as deleted
even if there was entities associate with target qos specs.
Trying to delete a qos specs still associated with entities will
cause QoSSpecsInUse exception if force=False (default).
"""
if qos_specs_id is None:
msg = _("id cannot be None")
raise exception.InvalidQoSSpecs(reason=msg)
qos_spec = objects.QualityOfServiceSpecs.get_by_id(
context, qos_specs_id)
qos_spec.destroy(force)
def delete_keys(context, qos_specs_id, keys):
"""Marks specified key of target qos specs as deleted."""
if qos_specs_id is None:
msg = _("id cannot be None")
raise exception.InvalidQoSSpecs(reason=msg)
qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, qos_specs_id)
# Previous behavior continued to delete keys until it hit first unset one,
# so for now will mimic that. In the future it would be useful to have all
# or nothing deletion of keys (or at least delete all set keys),
# especially since order of keys from CLI to API is not preserved currently
try:
for key in keys:
try:
del qos_spec.specs[key]
except KeyError:
raise exception.QoSSpecsKeyNotFound(
specs_key=key, specs_id=qos_specs_id)
finally:
qos_spec.save()
def get_associations(context, qos_specs_id):
"""Get all associations of given qos specs."""
try:
types = objects.VolumeTypeList.get_all_types_for_qos(context,
qos_specs_id)
except db_exc.DBError:
LOG.exception(_LE('DB error:'))
msg = _('Failed to get all associations of '
'qos specs %s') % qos_specs_id
LOG.warning(msg)
raise exception.CinderException(message=msg)
result = []
for vol_type in types:
result.append({
'association_type': 'volume_type',
'name': vol_type.name,
'id': vol_type.id
})
return result
def associate_qos_with_type(context, specs_id, type_id):
"""Associate qos_specs with volume type.
Associate target qos specs with specific volume type.
:param specs_id: qos specs ID to associate with
:param type_id: volume type ID to associate with
:raises VolumeTypeNotFound: if volume type doesn't exist
:raises QoSSpecsNotFound: if qos specs doesn't exist
:raises InvalidVolumeType: if volume type is already associated
with qos specs other than given one.
:raises QoSSpecsAssociateFailed: if there was general DB error
"""
try:
get_qos_specs(context, specs_id)
res = volume_types.get_volume_type_qos_specs(type_id)
if res.get('qos_specs', None):
if res['qos_specs'].get('id') != specs_id:
msg = (_("Type %(type_id)s is already associated with another "
"qos specs: %(qos_specs_id)s") %
{'type_id': type_id,
'qos_specs_id': res['qos_specs']['id']})
raise exception.InvalidVolumeType(reason=msg)
else:
db.qos_specs_associate(context, specs_id, type_id)
except db_exc.DBError:
LOG.exception(_LE('DB error:'))
LOG.warning(_LW('Failed to associate qos specs '
'%(id)s with type: %(vol_type_id)s'),
dict(id=specs_id, vol_type_id=type_id))
raise exception.QoSSpecsAssociateFailed(specs_id=specs_id,
type_id=type_id)
def disassociate_qos_specs(context, specs_id, type_id):
"""Disassociate qos_specs from volume type."""
try:
get_qos_specs(context, specs_id)
db.qos_specs_disassociate(context, specs_id, type_id)
except db_exc.DBError:
LOG.exception(_LE('DB error:'))
LOG.warning(_LW('Failed to disassociate qos specs '
'%(id)s with type: %(vol_type_id)s'),
dict(id=specs_id, vol_type_id=type_id))
raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
type_id=type_id)
def disassociate_all(context, specs_id):
"""Disassociate qos_specs from all entities."""
try:
get_qos_specs(context, specs_id)
db.qos_specs_disassociate_all(context, specs_id)
except db_exc.DBError:
LOG.exception(_LE('DB error:'))
LOG.warning(_LW('Failed to disassociate qos specs %s.'), specs_id)
raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
type_id=None)
def get_all_specs(context, filters=None, marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Get all non-deleted qos specs."""
return objects.QualityOfServiceSpecsList.get_all(
context, filters=filters, marker=marker, limit=limit, offset=offset,
sort_keys=sort_keys, sort_dirs=sort_dirs)
def get_qos_specs(ctxt, spec_id):
"""Retrieves single qos specs by id."""
if spec_id is None:
msg = _("id cannot be None")
raise exception.InvalidQoSSpecs(reason=msg)
if ctxt is None:
ctxt = context.get_admin_context()
return objects.QualityOfServiceSpecs.get_by_id(ctxt, spec_id)
|
nilq/baby-python
|
python
|
# Задача 3. Вариант 30.
#Напишите программу, которая выводит имя "Илья Арнольдович Файзильберг", и запрашивает его псевдоним.
# Shemenev A.V
# 14.03.16
print("Герой нашей программы Илья Арнольдович Файзильберг")
print("Под каким именем мы знаем этого человека? Ваш ответ:")
x=input()
print ("Все верно, псевдоним - " +x)
input()
|
nilq/baby-python
|
python
|
from models.generators.fcn32s import FCN32s
from models.generators.fcn16s import FCN16s
from models.generators.fcn8s import FCN8s
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from messenger import views
urlpatterns = [
url(r'^messenger/send', views.send, name='send'),
url(r'^messenger/read', views.read, name='read')
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import argparse
import progressbar
import requests
import os
import sys
sourceapp = "AS50559-DIVD_NL"
def rest_get(call,resource,retries=3):
url = "https://stat.ripe.net/data/{}/data.json?resource={}&sourceapp={}".format(call,resource,sourceapp)
try:
response = requests.get(url, timeout = 1)
except KeyboardInterrupt:
sys.exit()
except:
if retries > 0:
return rest_get(call,resource,retries-1)
else:
return "Timeout"
reply = response.json()
return reply['data']
def get_info(line) :
# Get abuse info
# https://stat.ripe.net/data/abuse-contact-finder/data.<format>?<parameters>
abuse_reply = rest_get("abuse-contact-finder",line)
contacts = abuse_reply['anti_abuse_contacts']['abuse_c']
if len(contacts) > 0 :
abuse_email = contacts[0]['email']
else:
abuse_email = "Not found"
# Get ASN
# https://stat.ripe.net/data/network-info/data.json?resource=194.5.73.5
asn_reply = rest_get("network-info",line)
asn = asn_reply['asns'][0]
prefix = asn_reply['prefix']
# Get ASN info
if asn in asns:
asn_data = asns[asn]
else:
asn_data = rest_get("as-overview",asn)
asns[asn] = asn_data
holder = asn_data['holder']
# Get geolocation
if prefix in locations:
location_data = locations[prefix]
else:
location_data = rest_get("maxmind-geo-lite",prefix)
city=location_data['located_resources'][0]['locations'][0]['city']
country=location_data['located_resources'][0]['locations'][0]['country']
print('"{}","{}","{}","{}","{}","{}","{}"'.format(line,abuse_email,prefix,asn,holder,country,city))
if args.output :
outfile.write('"{}","{}","{}","{}","{}","{}","{}"\n'.format(line,abuse_email,prefix,asn,holder,country,city))
outfile.flush()
parser = argparse.ArgumentParser(description='Get abuse and location information for IPs', allow_abbrev=False)
parser.add_argument('input', type=str, metavar="INPUT.txt", nargs="*", default="/dev/stdin", help="Either a list files with one IP address per line or a IP address [default: stdin]")
parser.add_argument('--output', "-o", type=str, metavar="OUTPUT.csv", help="output csv file")
args = parser.parse_args()
if isinstance(args.input,str):
files = [args.input]
else :
files = args.input
asns = {}
locations = {}
if args.output :
outfile = open(args.output,"w")
if args.output :
outfile.write('ip,abuse,prefix,asn,holder,country,city\n')
print('ip,abuse,prefix,asn,holder,country,city')
for f in files:
if os.path.isfile(f):
file = open(f,"r")
for line in file.readlines():
line = line.strip()
try:
get_info(line)
except:
print("Error with '{}'".format(line), file=sys.stderr)
else:
try:
get_info(f)
except:
print("Error with '{}'".format(line), file=sys.stderr)
|
nilq/baby-python
|
python
|
# Every line of these files consists of an image, i.e. 785 numbers between 0 and 255. size 28 x 28
# first no is label
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
img_rows, img_cols = 28, 28
#plot the first image in the dataset
# plt.imshow(x_train[0])
# reshape for channel last
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
#one-hot encoding
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
#create model
model = Sequential()
# layers
model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=input_shape)) # kernel size is filter matrix size
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32, kernel_size=3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(10, activation='softmax')) # softmax makes output sum up to 1
# compile model using accuracy to measure model performance
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
# train the model
seqModel = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=3)
# evaluate model on test set
model.evaluate(x_train,y_train,batch_size=128) # returns loss and accuracy
# predict first 10 images in the test set
print(model.predict(x_test[:10]))
# actual results for first 10 images in test set
print(y_test[:10])
# save model as hdf5 file
from tensorflow.keras.models import load_model
model.save('mnist.h5')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test
@file marine-integrations/mi/dataset/parser/test/
@author Jeff Roy
@brief Test code for a wc_sbe_cspp data parser
wc_sbe_cspp is based on cspp_base.py
test_wc_sbe_cspp.py fully tests all of the capabilities of the
base parser. That level of testing is omitted from this test suite
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.wc_sbe.cspp.resource import RESOURCE_PATH
from mi.core.exceptions import RecoverableSampleException
from mi.dataset.parser.cspp_base import \
METADATA_PARTICLE_CLASS_KEY, \
DATA_PARTICLE_CLASS_KEY
from mi.dataset.parser.wc_sbe_cspp import \
WcSbeCsppParser, \
WcSbeEngRecoveredDataParticle, \
WcSbeEngTelemeteredDataParticle, \
WcSbeMetadataRecoveredDataParticle, \
WcSbeMetadataTelemeteredDataParticle, \
WcSbeDataTypeKey
log = get_logger()
@attr('UNIT', group='mi')
class WcSbeCsppParserUnitTestCase(ParserUnitTestCase):
"""
wc_sbe_cspp Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
WcSbeDataTypeKey.WC_SBE_CSPP_TELEMETERED: {
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: WcSbeMetadataTelemeteredDataParticle,
DATA_PARTICLE_CLASS_KEY: WcSbeEngTelemeteredDataParticle,
}
},
WcSbeDataTypeKey.WC_SBE_CSPP_RECOVERED: {
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: WcSbeMetadataRecoveredDataParticle,
DATA_PARTICLE_CLASS_KEY: WcSbeEngRecoveredDataParticle,
}
},
}
def test_simple(self):
"""
Read test data and pull out data particles
Assert that the results are those we expected.
"""
file_path = os.path.join(RESOURCE_PATH, '11079364_WC_SBE.txt')
stream_handle = open(file_path, 'rU')
# Note: since the recovered and telemetered parser and particles are common
# to each other, testing one is sufficient, will be completely tested
# in driver tests
parser = WcSbeCsppParser(self.config.get(WcSbeDataTypeKey.WC_SBE_CSPP_RECOVERED),
stream_handle,
self.exception_callback)
particles = parser.get_records(20)
log.debug("*** test_simple Num particles %s", len(particles))
self.assert_particles(particles, '11079364_WC_SBE_recov.yml', RESOURCE_PATH)
stream_handle.close()
def test_simple_telem(self):
"""
Read test data and pull out data particles
Assert that the results are those we expected.
"""
file_path = os.path.join(RESOURCE_PATH, '11079364_WC_SBE.txt')
stream_handle = open(file_path, 'rU')
# Note: since the recovered and telemetered parser and particles are common
# to each other, testing one is sufficient, will be completely tested
# in driver tests
parser = WcSbeCsppParser(self.config.get(WcSbeDataTypeKey.WC_SBE_CSPP_TELEMETERED),
stream_handle,
self.exception_callback)
particles = parser.get_records(20)
log.debug("*** test_simple Num particles %s", len(particles))
self.assert_particles(particles, '11079364_WC_SBE_telem.yml', RESOURCE_PATH)
# check the first particle, which should be the metadata particle (recovered)
stream_handle.close()
def test_get_many(self):
"""
Read test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
file_path = os.path.join(RESOURCE_PATH, '11079364_WC_SBE.txt')
stream_handle = open(file_path, 'rU')
# Note: since the recovered and telemetered parser and particles are common
# to each other, testing one is sufficient, will be completely tested
# in driver tests
parser = WcSbeCsppParser(self.config.get(WcSbeDataTypeKey.WC_SBE_CSPP_TELEMETERED),
stream_handle,
self.exception_callback)
# try to get 2000 particles, there are more data records in the file
# so should get 2000 including the meta data
particles = parser.get_records(2000)
log.debug("*** test_get_many Num particles %s", len(particles))
self.assertEqual(len(particles), 2000)
stream_handle.close()
def test_bad_data(self):
"""
Ensure that bad data is skipped when it exists.
"""
# the first useful record in this file is corrupted and will be ignored
# we expect to get the metadata particle with the
# timestamp from the 2nd data record and all of the valid engineering
# data records
file_path = os.path.join(RESOURCE_PATH, '11079364_BAD_WC_SBE.txt')
stream_handle = open(file_path, 'rU')
log.info(self.exception_callback_value)
parser = WcSbeCsppParser(self.config.get(WcSbeDataTypeKey.WC_SBE_CSPP_RECOVERED),
stream_handle,
self.exception_callback)
parser.get_records(20)
self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))
stream_handle.close()
|
nilq/baby-python
|
python
|
from datetime import datetime
from os.path import dirname, join
import pytest # noqa
from city_scrapers_core.constants import ADVISORY_COMMITTEE, PASSED
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.cuya_audit import CuyaAuditSpider
test_response = file_response(
join(dirname(__file__), "files", "cuya_audit.html"),
url="http://bc.cuyahogacounty.us/en-US/Audit-Committee.aspx",
)
test_detail_response = file_response(
join(dirname(__file__), "files", "cuya_audit_detail.html"),
url="http://bc.cuyahogacounty.us/en-US/AuditCommitteeMtg-090519.aspx",
)
spider = CuyaAuditSpider()
freezer = freeze_time("2019-09-19")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
parsed_item = [item for item in spider._parse_detail(test_detail_response)][0]
freezer.stop()
def test_count():
assert len(parsed_items) == 5
def test_title():
assert parsed_item["title"] == "Audit Committee"
def test_description():
assert parsed_item["description"] == ""
def test_start():
assert parsed_item["start"] == datetime(2019, 9, 5, 9, 0)
def test_end():
assert parsed_item["end"] == datetime(2019, 9, 5, 23, 30)
def test_time_notes():
assert parsed_item["time_notes"] == ""
def test_id():
assert parsed_item["id"] == "cuya_audit/201909050900/x/audit_committee"
def test_status():
assert parsed_item["status"] == PASSED
def test_location():
assert parsed_item["location"] == spider.location
def test_source():
assert (
parsed_item["source"]
== "http://bc.cuyahogacounty.us/en-US/AuditCommitteeMtg-090519.aspx"
)
def test_links():
assert parsed_item["links"] == [
{
"href": "http://bc.cuyahogacounty.us/ViewFile.aspx?file=9RBPLk%2fewj3DObNVTeTkjQ%3d%3d", # noqa
"title": "Agenda",
}
]
def test_classification():
assert parsed_item["classification"] == ADVISORY_COMMITTEE
def test_all_day():
assert parsed_item["all_day"] is False
|
nilq/baby-python
|
python
|
from django.db import models
from djchoices import DjangoChoices, ChoiceItem
class UserStatuses(DjangoChoices):
enter_address = ChoiceItem()
enter_name = ChoiceItem()
start = ChoiceItem()
allowed = ChoiceItem()
enter_org_name = ChoiceItem()
enter_role = ChoiceItem()
allowed_group = ChoiceItem()
approve = ChoiceItem()
enter_user_org = ChoiceItem()
# enter_family = ChoiceItem()
class Organisation(models.Model):
vk_id = models.IntegerField()
name = models.CharField(max_length=100, null=True, blank=True)
class UserData(models.Model):
address = models.CharField(max_length=100, null=True, blank=True)
name = models.CharField(max_length=100, null=True, blank=True)
vk_id = models.IntegerField(null=True)
is_organisation = models.BooleanField(default=False)
approved = models.BooleanField(default=None, null=True)
organisation = models.ForeignKey(to=Organisation, on_delete=models.SET_NULL, null=True)
def __str__(self):
return f"{self.name} vk.com/id{self.vk_id}\n"
class VkSession(models.Model):
user = models.ForeignKey(UserData, null=True, blank=True, on_delete=models.SET_NULL)
status = models.CharField(max_length=100, choices=UserStatuses.choices, default=UserStatuses.start)
|
nilq/baby-python
|
python
|
my_list= [3, 4, 6, 2]
my_list1 = list(("Hello World"))
|
nilq/baby-python
|
python
|
from itertools import product as product
from math import sqrt as sqrt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Function
# from utils.box_utils import decode, nms
# from utils.config import Config
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gama = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.tensor(self.n_channels))
self.reset_parameters()
def resrt_parameters(self):
init.constant_(self.weight, self.gama)
def forward(self,x):
norm=x.pow(2).sum(dim=1,keepdim=True).sqrt()+self.eps
# x/norm
x=torch.div(x,norm)
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x
return out
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from django.template import RequestContext, loader
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.http import JsonResponse
import simplejson as json
import requests
from django import template
import urllib
from collections import defaultdict
from itertools import islice
from django.shortcuts import render
from django.http import HttpResponse,HttpResponseRedirect
from django.template import loader
from os import listdir
from os.path import isfile, join, isdir
from .models import Jobs
from django.db import IntegrityError
import time
import json
from django.http import JsonResponse
from django.shortcuts import redirect
import requests
from urllib.parse import urlparse
from datetime import datetime
import os
import os.path
from django.utils.dateparse import parse_datetime
from datetime import timedelta
from datetime import datetime
from django.urls import reverse
import pytz
import boto3
import base64
from operator import itemgetter
import subprocess
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import os
import zipfile
import uuid
from django.http import HttpResponse
import io
from django import template
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from console.models import *
from urllib.parse import urlparse
def credentials_check(f):
def wrap(request, *args, **kwargs):
count = credentials.objects.filter().count()
count1 = github.objects.filter().count()
count2 = local_directory.objects.filter().count()
if (count != 0 and count1 != 0 and count2 != 0):
result = credentials.objects.raw('SELECT * FROM console_credentials LIMIT 1;')
global AWS_ACCESS_KEY_ID
global AWS_SECRET_ACCESS_KEY
global bucket_name
AWS_ACCESS_KEY_ID = result[0].aws_access_key_id
AWS_SECRET_ACCESS_KEY = result[0].aws_secret_access_key
bucket_name = result[0].bucket_name
else:
return HttpResponseRedirect("/settings/")
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
@credentials_check
def drive(request):
try:
Local_directory = local_directory.objects.latest('id')
updated_local_directory_name = Local_directory.name
except:
updated_local_directory_name = ''
global proc
if 'start' in request.POST:
try:
exist_controller = controller.objects.latest('id')
controller_mode = exist_controller.training
except:
controller_mode = ''
if controller_mode != '':
proc = subprocess.Popen(["python", updated_local_directory_name+"/manage.py", "drive",controller_mode])
else:
proc = subprocess.Popen(["python", updated_local_directory_name+"/manage.py", "drive"])
elif 'stop' in request.POST:
try:
proc.kill()
except:
print("No proc is running")
template = loader.get_template('console/home.html')
return HttpResponse(template.render({}, request))
def kill_proc(request):
try:
autopilot_proc.kill()
except:
print("no autopilot proc")
return HttpResponseRedirect('/jobs/')
def save_local_directory(request):
message = ""
try:
credential = credentials.objects.latest('id')
aws_key_id = credential.aws_access_key_id
except:
aws_key_id = ''
if request.method == "POST":
local_directory_name = request.POST.get('local_directory')
if local_directory_name != None:
try:
exist_local_directory= local_directory.objects.latest('id')
local_directory.objects.filter(id=exist_local_directory.id).update(name=local_directory_name)
message = "Local Directory has been updated"
except:
new_local_directory = local_directory(name=local_directory_name)
new_local_directory.save()
message = "Local Directory has been saved"
try:
updated_name = github.objects.latest('id')
updated_repo_name = updated_name.name
updated_extension = updated_name.extension
except:
updated_repo_name = ''
updated_extension = ''
try:
updated_controller = controller.objects.latest('id')
updated_training_controller = updated_controller.training
except:
updated_training_controller = ''
try:
updated_local_directory = local_directory.objects.latest('id')
updated_local_directory_name = updated_local_directory.name
except:
updated_local_directory_name = ''
template = loader.get_template('console/local_directory.html')
return HttpResponse(template.render({'status': message,'local_directory': updated_local_directory_name, 'training_controller': updated_training_controller,
'updated_extension': updated_extension, 'updated_repo': updated_repo_name,
'AWS_KEY': aws_key_id}, request))
@credentials_check
def display_data_folders(request):
try:
Local_directory = local_directory.objects.latest('id')
updated_local_directory_name = Local_directory.name
except:
updated_local_directory_name = ''
list_data = os.popen('ls '+updated_local_directory_name+'/data/').read()
directories = list_data.split()
dataFolders = []
for dir in directories:
direcPath = os.popen('echo '+updated_local_directory_name+'/data/' + dir).read()
direcPath = direcPath.split()
if os.path.isdir(direcPath[0]):
try:
if os.path.exists(direcPath[0] + '/donkeycar-console.json') == True:
print("it exists")
else:
with open(direcPath[0] + '/donkeycar-console.json', 'w') as outfile:
noImages = os.popen('ls -l '+updated_local_directory_name+'/data/' + dir + ' | grep .jpg | wc -l').read()
noImages.strip()
print(noImages)
noImages = int(noImages)
year = os.popen('date +"%Y"').read()
time = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $8}'").read()
month = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $6}'").read()
day = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $7}'").read()
date = year + " " + month + " " + day + " " + time
d = datetime.strptime(date, '%Y\n %b\n %d\n %H:%M\n')
d = d.strftime('%Y-%m-%d %H:%M')
json.dump({"name": dir, "no": noImages, "date": d, "remarks": []}, outfile)
with open(direcPath[0] + '/donkeycar-console.json', 'r') as result:
data = json.load(result)
dataFolders.append(data)
except json.JSONDecodeError:
os.system('sudo rm -r ' + direcPath[0] + '/donkeycar-console.json')
dataFolders.sort(key=itemgetter('date'), reverse=True)
iterator = islice(dataFolders, 10)
for item in iterator:
print(item)
dir = item["name"]
direcPath = os.popen('echo ' + updated_local_directory_name + '/data/' + dir).read()
direcPath = direcPath.split()
with open(direcPath[0] + '/donkeycar-console.json', 'r') as outfile:
data = json.load(outfile)
tmp = data["no"]
noImages = os.popen('ls -l ' + updated_local_directory_name + '/data/' + dir + ' | grep .jpg | wc -l').read()
data["no"] = noImages
with open(direcPath[0] + '/donkeycar-console.json', 'w') as jsonFile:
json.dump(data, jsonFile)
print(dataFolders)
context = {
'result': dataFolders,
}
return render(request, 'console/data_folders.html', context)
@credentials_check
def getfiles(request):
try:
Local_directory = local_directory.objects.latest('id')
updated_local_directory_name = Local_directory.name
except:
updated_local_directory_name = ''
result = request.GET.get('dir', '')
print(result)
zip_io = io.BytesIO()
direcPath = os.popen('echo '+updated_local_directory_name+'/data/').read()
direcPath = direcPath.split()
with zipfile.ZipFile(zip_io, mode='w', compression=zipfile.ZIP_DEFLATED) as backup_zip:
for f in os.listdir(direcPath[0] + result):
backup_zip.write(direcPath[0] + result + '/' + f)
response = HttpResponse(zip_io.getvalue(), content_type='application/x-zip-compressed')
response['Content-Disposition'] = 'attachment; filename=%s' % result + ".zip"
response['Content-Length'] = zip_io.tell()
return response
@credentials_check
def delete_data(request):
name= request.GET.get('name', '')
try:
Local_directory = local_directory.objects.latest('id')
updated_local_directory_name = Local_directory.name
except:
updated_local_directory_name = ''
os.system('sudo rm -r '+updated_local_directory_name+'/data/'+name)
return HttpResponseRedirect('/data/')
@credentials_check
def delete_data_folder_comment(request):
comment= request.GET.get('comment', '')
name= request.GET.get('name', '')
try:
Local_directory = local_directory.objects.latest('id')
updated_local_directory_name = Local_directory.name
except:
updated_local_directory_name = ''
if (id and name):
direcPath = os.popen('echo '+updated_local_directory_name+'/data/' + name).read()
direcPath = direcPath.split()
with open(direcPath[0] + '/donkeycar-console.json', 'r') as outfile:
data = json.load(outfile)
with open(direcPath[0] + '/donkeycar-console.json', 'w') as writefile:
(data['remarks']).remove(comment)
json.dump(data, writefile)
return HttpResponseRedirect('/data/')
@credentials_check
def add_data_folder_comment(request):
data_name = request.POST['name']
print(data_name)
data_comment = request.POST['var']
try:
Local_directory = local_directory.objects.latest('id')
updated_local_directory_name = Local_directory.name
except:
updated_local_directory_name = ''
direcPath = os.popen('echo '+updated_local_directory_name+'/data/' + data_name).read()
direcPath = direcPath.split()
with open(direcPath[0] + '/donkeycar-console.json', 'r') as outfile:
data = json.load(outfile)
print(data['remarks'])
print(len(data['remarks']))
with open(direcPath[0] + '/donkeycar-console.json', 'w') as writefile:
(data['remarks']).append(data_comment)
json.dump(data, writefile)
return HttpResponse('success')
def sizify(value):
if value < 512000:
value = value / 1024.0
ext = 'kb'
elif value < 4194304000:
value = value / 1048576.0
ext = 'mb'
else:
value = value / 1073741824.0
ext = 'gb'
return '%s %s' % (str(round(value, 2)), ext)
@credentials_check
def list_jobs(request):
jobs = Jobs.objects.order_by('-date')[:30]
for job in jobs:
import re
list = re.findall("'(.*?)'", job.tubs)
job.tubs = list
if job.size != 'N/A':
job.size=sizify(int(job.size))
context = {
'models': jobs,
}
template = loader.get_template('console/jobs.html')
return HttpResponse(template.render(context, request))
def grouping(l):
d = defaultdict(list)
print(d)
for key,value, role in l:
print("key_l",key,value,role)
new_key = str(key) + "?" + value
d[new_key].append(role)
for new_key in d:
d[new_key] = ' | '.join(d[new_key])
print(d.items())
return list(d.items())
def save_controller_settings(request):
message = ""
try:
credential = credentials.objects.latest('id')
aws_key_id = credential.aws_access_key_id
except:
aws_key_id = ''
if request.method == "POST":
training_controller = request.POST.get('training_controller')
if training_controller != None :
try:
exist_controller = controller.objects.latest('id')
controller.objects.filter(id=exist_controller.id).update(training=training_controller)
message = "Controller settings have been updated"
except:
new_controller = controller(
training=training_controller)
new_controller.save()
message = "Controller settings have been updated"
try:
updated_name = github.objects.latest('id')
updated_repo_name = updated_name.name
updated_extension = updated_name.extension
except:
updated_repo_name = ''
updated_extension = ''
try:
updated_controller = controller.objects.latest('id')
updated_training_controller = updated_controller.training
except:
updated_training_controller = ''
try:
updated_local_directory = local_directory.objects.latest('id')
updated_local_directory_name = updated_local_directory.name
except:
updated_local_directory_name = ''
template = loader.get_template('console/controller.html')
return HttpResponse(template.render({'local_directory': updated_local_directory_name,'controller_message': message,'training_controller':updated_training_controller,'updated_extension':updated_extension,'updated_repo':updated_repo_name,'AWS_KEY':aws_key_id}, request))
@credentials_check
def list_jobs_success(request):
jobs = Jobs.objects.order_by('-date')[:30]
for job in jobs:
import re
list = re.findall("'(.*?)'", job.tubs)
job.tubs = list
if job.size != 'N/A':
job.size=sizify(int(job.size))
context = {
'models': jobs,
'success': "New Job Added !"
}
template = loader.get_template('console/jobs.html')
return HttpResponse(template.render(context, request))
def save_credentials(request):
message = ""
if request.method == "POST":
id = uuid.uuid4()
bucket_name = "donkeycar-console-"+ str(id)
UPDATED_AWS_ACCESS_KEY_ID = request.POST.get('key1')
UPDATED_AWS_SECRET_ACCESS_KEY = request.POST.get('key2')
if ((UPDATED_AWS_ACCESS_KEY_ID != None) & (UPDATED_AWS_SECRET_ACCESS_KEY != None)):
client = boto3.client('s3', aws_access_key_id=UPDATED_AWS_ACCESS_KEY_ID,
aws_secret_access_key=UPDATED_AWS_SECRET_ACCESS_KEY)
sts = boto3.client('sts', aws_access_key_id=UPDATED_AWS_ACCESS_KEY_ID,
aws_secret_access_key=UPDATED_AWS_SECRET_ACCESS_KEY)
try:
response = sts.get_caller_identity()
try:
client.create_bucket(Bucket=bucket_name)
conn = S3Connection(aws_access_key_id=UPDATED_AWS_ACCESS_KEY_ID,
aws_secret_access_key=UPDATED_AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(bucket_name)
k = bucket.new_key('models/')
k.set_contents_from_string('')
k = bucket.new_key('data/')
k.set_contents_from_string('')
count = credentials.objects.filter().count()
if count == 0:
credential = credentials(
aws_access_key_id=UPDATED_AWS_ACCESS_KEY_ID,
aws_secret_access_key=UPDATED_AWS_SECRET_ACCESS_KEY,
bucket_name= bucket_name)
credential.save()
message = "Credentials have been updated !"
else:
credential = credentials.objects.latest('id')
credentials.objects.filter(id=credential.id).update(aws_access_key_id=UPDATED_AWS_ACCESS_KEY_ID,
aws_secret_access_key=UPDATED_AWS_SECRET_ACCESS_KEY)
message = "Credentials have been updated !"
except Exception as e1:
print(e1)
message = "Can't Create S3 bucket: Check IAM Permissions and re-enter your credentials"
except Exception as e:
print(e)
message = "Incorrect Credentials"
try:
credential = credentials.objects.latest('id')
aws_key_id = credential.aws_access_key_id
except:
aws_key_id = ''
try:
updated_name = github.objects.latest('id')
updated_repo_name = updated_name.name
updated_extension = updated_name.extension
except:
updated_repo_name = ''
updated_extension = ''
try:
updated_controller = controller.objects.latest('id')
updated_training_controller = updated_controller.training
except:
updated_training_controller = ''
try:
updated_local_directory = local_directory.objects.latest('id')
updated_local_directory_name = updated_local_directory.name
except:
updated_local_directory_name = ''
template = loader.get_template('console/credentials.html')
return HttpResponse(template.render({'message': message,'local_directory': updated_local_directory_name,'training_controller':updated_training_controller,'AWS_KEY': aws_key_id,'updated_repo':updated_repo_name,'updated_extension':updated_extension}, request))
def save_github_repo(request):
message = ""
try:
credential = credentials.objects.latest('id')
aws_key_id = credential.aws_access_key_id
except:
aws_key_id = ''
if request.method == "POST":
repo = request.POST.get('repo')
extension = request.POST.get('extension')
print(repo)
result = os.system('git ls-remote ' + repo)
if result == 0:
if repo != None:
try:
exist_repo = github.objects.latest('id')
github.objects.filter(id=exist_repo.id).update(name=repo)
github.objects.filter(id=exist_repo.id).update(extension=extension)
message = "Github Repository has been updated"
except:
new_github = github(name=repo,extension=extension)
new_github.save()
message = "Github Repository has been updated"
else:
message = "Please enter a git repository"
try:
updated_name = github.objects.latest('id')
updated_repo_name = updated_name.name
updated_extension = updated_name.extension
except:
updated_repo_name = ''
updated_extension = ''
try:
updated_controller = controller.objects.latest('id')
updated_training_controller = updated_controller.training
except:
updated_training_controller = ''
try:
updated_local_directory = local_directory.objects.latest('id')
updated_local_directory_name = updated_local_directory.name
except:
updated_local_directory_name = ''
template = loader.get_template('console/github.html')
return HttpResponse(template.render({'status': message,'local_directory': updated_local_directory_name,'training_controller':updated_training_controller,'updated_extension':updated_extension,'updated_repo':updated_repo_name,'AWS_KEY':aws_key_id}, request))
@credentials_check
def delete_remark(request):
id = request.GET.get('id', '')
remarks.objects.filter(id=id).delete()
return HttpResponseRedirect('/jobs/')
@credentials_check
def delete_job(request):
id= request.GET.get('id', '')
Jobs.objects.filter(id=id).delete()
return HttpResponseRedirect('/jobs/')
def add_remark(request):
job_id = request.POST['id']
print(job_id)
comment = request.POST['var']
print(comment)
remark = remarks(remark=comment)
remark.save()
job = Jobs.objects.get(id=job_id)
job.Comments.add(remark)
return HttpResponse('success')
def verify_logs(id,AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,bucket_name):
conn = S3Connection(aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(bucket_name)
s3 = boto3.resource('s3',aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
for key in bucket.list():
if key.name == 'job_'+ str(id) +'.log':
url_to_download= "https://s3.amazonaws.com/"+bucket_name+"/"+ key.name
Jobs.objects.filter(id=id).update(log_url=url_to_download)
object_acl = s3.ObjectAcl(bucket_name, key.name)
object_acl.put(ACL='public-read')
if key.name == 'job_'+ str(id) +'_commands.log':
url1_to_download= "https://s3.amazonaws.com/"+bucket_name+"/"+ key.name
Jobs.objects.filter(id=id).update(commands_log_url=url1_to_download)
object_acl = s3.ObjectAcl(bucket_name, key.name)
object_acl.put(ACL='public-read')
@credentials_check
def cancel_request(request):
client = boto3.client('ec2', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name='us-east-1')
id = request.GET.get('id', '')
job = Jobs.objects.get(id=id)
client.terminate_instances(
InstanceIds=[
job.instance_id
]
)
Jobs.objects.filter(id=id).update(state='Canceled')
Jobs.objects.filter(id=id).update(duration='0')
return HttpResponseRedirect('/jobs/')
def convert_timedelta(duration):
days, seconds = duration.days, duration.seconds
hours = days * 24 + seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 60)
return hours, minutes, seconds
@credentials_check
def copy_local(request):
id = request.GET.get('id', '')
try:
Local_directory = local_directory.objects.latest('id')
updated_local_directory_name = Local_directory.name
except:
updated_local_directory_name = ''
path = os.popen('echo '+updated_local_directory_name+'/models/').read()
path = path.split()
try:
updated_repo = github.objects.latest('id')
extension = updated_repo.extension
except:
extension = ''
if extension != '':
model_name = 'job_' + str(id) + extension
else:
model_name = 'job_' + str(id)
response_url = download_s3(AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,bucket_name)
o = urlparse(response_url)
key_path = o.path.split('/', 1)[1]
s3 = boto3.resource('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
if( os.path.exists(path[0]+model_name) == True ):
print("it exists")
else:
s3.Object(bucket_name,key_path.split('/', 1)[1] + '/' + model_name).download_file(path[0] + model_name)
return HttpResponseRedirect('/jobs/')
def download_s3(AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,bucket_name):
s3 = boto3.client('s3',aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name='us-east-1')
url = '{}/{}/{}'.format(s3.meta.endpoint_url,bucket_name,'models')
return url
@credentials_check
def autopilot(request):
id = request.GET.get('id', '')
try:
Local_directory = local_directory.objects.latest('id')
updated_local_directory_name = Local_directory.name
except:
updated_local_directory_name = ''
path = os.popen('echo '+updated_local_directory_name+'/models/').read()
path = path.split()
try:
updated_repo = github.objects.latest('id')
extension = updated_repo.extension
except:
extension = ''
if extension != '':
model_name = 'job_' + str(id) + extension
else:
model_name = 'job_' + str(id)
job_name = 'job_' + str(id)
response_url = download_s3(AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,bucket_name)
o = urlparse(response_url)
key_path = o.path.split('/', 1)[1]
s3 = boto3.resource('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
if (os.path.exists(path[0] + job_name) == True):
print("it exists")
else:
s3.Object(bucket_name, key_path.split('/', 1)[1] + '/' + model_name).download_file(
path[0] + job_name)
global autopilot_proc
autopilot_proc = subprocess.Popen(["python", updated_local_directory_name+"/manage.py", "drive", "--model", updated_local_directory_name+"/models/" + job_name])
return HttpResponseRedirect('/jobs/')
def get_car_status_autopilot(request):
try:
poll = autopilot_proc.poll()
if poll == None:
response = 'Autopilot'
else:
response = ''
except:
response = ''
return HttpResponse(response)
def get_car_status_training(request):
try:
poll = proc.poll()
if poll == None:
response = 'Training'
return HttpResponse(response)
else:
response = ''
except:
response = ''
return HttpResponse(response)
@credentials_check
def home(request):
template = loader.get_template('console/home.html')
return HttpResponse(template.render({}, request))
def upload_to_s3(AWS_SECRET_ACCESS_KEY,AWS_ACCESS_KEY_ID,bucket_name):
s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name='us-east-1')
url = '{}/{}/{}'.format(s3.meta.endpoint_url, bucket_name, 'data')
return url
@credentials_check
def create_job(request):
try:
Local_directory = local_directory.objects.latest('id')
updated_local_directory_name = Local_directory.name
except:
updated_local_directory_name = ''
choices = ['g2.2xlarge', 'g2.8xlarge', 'p2.xlarge', 'p3.2xlarge', 'p3.8xlarge']
errorMessage = ""
conn = S3Connection(aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
message = ""
job_number = Jobs.objects.filter().count()
if request.method == "POST":
checked_data = request.POST.getlist('chk[]')
instance_type = request.POST.get('choice')
availability_zone = request.POST.get('AZ')
max_time = request.POST.get('max_time')
request_time = request.POST.get('request_time')
if max_time == '':
max_time = 15
if request_time == '':
request_time = 2
try:
availability_zone = availability_zone.split()
price = availability_zone[1]
except:
print("no availability")
if len(checked_data) == 0 or int(max_time) >= 60:
if len(checked_data) == 0 and int(max_time) >= 60:
message = " No selected items and EC2 Termination Time maximum must be 60 minutes "
elif len(checked_data) == 0:
message = " No selected items"
elif int(max_time) >= 60:
message = "EC2 Termination Time maximum must be 60 minutes "
else:
job = Jobs(
tubs=checked_data,
state="Pending",
job_number=job_number + 1,
instance=instance_type,
price=price,
availability_zone=availability_zone[0],
instance_max=max_time)
job.save()
selected_data = ""
dataPath = os.popen('echo '+updated_local_directory_name+'/data/').read()
dataPath = dataPath.split()
for dir in checked_data:
selected_data += " " + dir
print(selected_data)
if len(selected_data) != 0:
try:
updated_repo = github.objects.latest('id')
extension= updated_repo.extension
except:
extension = ''
if extension != '' :
model_name = 'job_' + str(job.id)+ extension
else:
model_name = 'job_' + str(job.id)
job_name = 'job_' + str(job.id)
os.chdir(dataPath[0])
current_path = os.popen('pwd').read()
print(current_path)
os.system('tar -zcf job_' + str(job.id) + '.tar.gz ' + selected_data)
tarfile_size = os.popen("ls -sh job_" + str(job.id) + ".tar.gz | awk '{print $1}'").read()
print(tarfile_size)
Jobs.objects.filter(id=job.id).update(tarfile_size=tarfile_size)
current_path = os.popen('pwd').read()
current_path = current_path.split()
response_url = upload_to_s3(AWS_SECRET_ACCESS_KEY,AWS_ACCESS_KEY_ID,bucket_name)
o = urlparse(response_url)
path = o.path.split('/', 1)[1]
s3 = boto3.resource('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
tarfile_name = 'job_' + str(job.id) + '.tar.gz'
s3.meta.client.upload_file(os.path.join(current_path[0], tarfile_name), bucket_name,
path.split('/', 1)[1] + '/' + tarfile_name)
if instance_type != '':
termination_time = (Jobs.objects.get(id=job.id)).instance_max
github_repo = github.objects.latest('id')
try:
request_id = launch_ec2_instance(model_name,job_name,AWS_SECRET_ACCESS_KEY,AWS_ACCESS_KEY_ID,github_repo.name,request_time,availability_zone[0],instance_type,termination_time,bucket_name)
Jobs.objects.filter(id=job.id).update(request_id=request_id)
except Exception as e:
print(e)
job.delete()
Jobs.objects.filter(id=job.id).update(date=datetime.now())
else:
errorMessage = " Enter an instance type "
print(errorMessage)
job.delete()
os.system('rm -r job_' + str(job.id) + '.tar.gz ')
return HttpResponseRedirect('/jobs/success/')
list_data = os.popen('ls '+updated_local_directory_name+'/data/').read()
directories = list_data.split()
dataFolders = []
print(directories)
for dir in directories:
direcPath = os.popen('echo '+updated_local_directory_name+'/data/' + dir).read()
direcPath = direcPath.split()
if os.path.isdir(direcPath[0]):
if os.path.exists(direcPath[0] + '/donkeycar-console.json') == True:
with open(direcPath[0] + '/donkeycar-console.json', 'r') as outfile:
data = json.load(outfile)
print(data)
tmp = data["no"]
noImages = os.popen('ls -l ' + updated_local_directory_name + '/data/' + dir + ' | grep .jpg | wc -l').read()
data["no"] = noImages
with open(direcPath[0] + '/donkeycar-console.json', 'w') as jsonFile:
json.dump(data, jsonFile)
else:
with open(direcPath[0] + '/donkeycar-console.json', 'w') as outfile:
noImages = os.popen('ls -l '+updated_local_directory_name+'/data/' + dir + ' | grep .jpg | wc -l').read()
noImages.strip()
noImages = int(noImages)
year = os.popen('date +"%Y"').read()
time = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $8}'").read()
month = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $6}'").read()
day = os.popen("ls -ldc "+updated_local_directory_name+"/data/" + dir + " | awk '{print $7}'").read()
date = year + " " + month + " " + day + " " + time
d = datetime.strptime(date, '%Y\n %b\n %d\n %H:%M\n')
d = d.strftime('%Y-%m-%d %H:%M')
json.dump({"name": dir, "no": noImages, "date": d, "remarks": []}, outfile)
with open(direcPath[0] + '/donkeycar-console.json', 'r') as result:
data = json.load(result)
dataFolders.append(data)
dataFolders.sort(key=itemgetter('date'), reverse=True)
jobs = Jobs.objects.order_by('-date')[:30]
for job in jobs:
if job.size != 'N/A':
job.size = sizify(int(job.size))
context = {
'models': jobs,
'result': dataFolders,
'message': message,
'errorMessage': errorMessage,
'choices': choices,
}
return render(request, 'console/create_job.html',context)
@credentials_check
def update_status_by_id(request):
client = boto3.client('ec2', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name='us-east-1')
now = datetime.now(pytz.utc)
id = request.GET.get('id', '')
job = Jobs.objects.get(id=id)
verify_logs(job.id, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY,bucket_name)
if job.request_id != "0":
if now > job.date + timedelta(minutes=job.request_time):
try:
response = client.describe_spot_instance_requests(
SpotInstanceRequestIds=[
job.request_id
]
)
value = response['SpotInstanceRequests'][0]['Status']['Code']
Jobs.objects.filter(id=job.id).update(request_state=value)
instance_id = response['SpotInstanceRequests'][0]['InstanceId']
Jobs.objects.filter(id=job.id).update(instance_id=instance_id)
except Exception as e:
print(e)
now = datetime.now(pytz.utc)
print("now", now)
if job.state == 'Pending':
if job.request_state == 'schedule-expired':
Jobs.objects.filter(id=job.id).update(state='Failed')
Jobs.objects.filter(id=job.id).update(duration='0')
else:
conn = S3Connection(aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(bucket_name)
for key in bucket.list('models'):
name = key.name.split('/')
print(key)
date = key.last_modified
print(date)
print("job.date" + str(job.date))
try:
updated_repo = github.objects.latest('id')
extension = updated_repo.extension
except:
extension = ''
if extension != '':
model_name = 'job_' + str(job.id) + extension
else:
model_name = 'job_' + str(job.id)
if name[1] == model_name:
Jobs.objects.filter(id=job.id).update(state='succeeded')
Jobs.objects.filter(id=job.id).update(size=key.size)
duration = parse_datetime(date) - job.date
hours, minutes, seconds = convert_timedelta(duration)
time = str(minutes) + " m and " + str(seconds) + " s"
print(time)
Jobs.objects.filter(id=job.id).update(duration=time)
elif now > job.date + timedelta(minutes=job.instance_max):
Jobs.objects.filter(id=job.id).update(state='Failed')
Jobs.objects.filter(id=job.id).update(duration='0')
job = Jobs.objects.get(id=id)
if job.request_state == 'instance-terminated-by-user' and job.state == 'Pending':
Jobs.objects.filter(id=job.id).update(state='Failed')
Jobs.objects.filter(id=job.id).update(duration='0')
return HttpResponseRedirect('/jobs/')
def launch_ec2_instance(model_name,job_name,AWS_SECRET_ACCESS_KEY,AWS_ACCESS_KEY_ID,github_repo,request_time,availability_zone,instance_type,termination_time,bucket_name):
print("Launching Ec2 Instance")
client = boto3.client('ec2', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name='us-east-1')
print("Starting")
tarfile = job_name + '.tar.gz'
termination_time_s = str(termination_time)
termination_time_before_minute = str(termination_time - 2)
try:
print("trying")
print(github_repo)
response = client.request_spot_instances(
InstanceCount=1,
Type='one-time',
ValidFrom=datetime.now(pytz.utc) + timedelta(seconds=4),
ValidUntil=datetime.now(pytz.utc) + timedelta(minutes=int(request_time)),
LaunchSpecification={
'ImageId': 'ami-f3a9c18c',
'InstanceType': instance_type,
'Placement': {
'AvailabilityZone': availability_zone
},
'BlockDeviceMappings': [
{'DeviceName': '/dev/sda1',
'Ebs': {
'DeleteOnTermination': True,
'VolumeSize': 40
}
}
],
'UserData': base64.b64encode(b'''#!/bin/bash
timestamp() {
date +"%T"
}
echo " Start The Script "
timestamp
echo "sudo halt" | at now + ''' + (termination_time_s).encode('utf8') + b''' minutes
export LC_ALL="en_US.UTF-8"
export LC_CTYPE="en_US.UTF-8"
source /home/ubuntu/env/bin/activate
echo " Configure AWS credentials "
timestamp
aws --version
aws configure set aws_access_key_id ''' + (AWS_ACCESS_KEY_ID).encode('utf8')
+ b''' && aws configure set aws_secret_access_key ''' + (
AWS_SECRET_ACCESS_KEY).encode('utf8')
+ b''' && aws s3 cp s3://''' + (bucket_name).encode(
'utf8') + b'''/data/''' + tarfile.encode('utf8') + b''' /home/ubuntu
echo "aws s3 cp /var/log/cloud-init-output.log s3://''' + (
bucket_name).encode(
'utf8') + b'''/''' + job_name.encode(
'utf8') + b'''_commands.log && aws s3 cp /''' + job_name.encode(
'utf8') + b'''.log s3://''' + (bucket_name).encode(
'utf8') + b'''/" | at now + ''' + (termination_time_before_minute).encode('utf8') + b''' minutes
echo " Cloning the github repository "
timestamp
git clone -b master --single-branch ''' + (github_repo).encode('utf8') + b''' donkeycar
echo " Install the dependencies "
timestamp
pip install -e donkeycar
echo " Create d2 repository "
timestamp
donkey createcar --path ~/d2
donkey createcar ~/d2
echo " Uncompress the tar file "
timestamp
sudo tar -zxf /home/ubuntu/''' + tarfile.encode('utf8') + b''' -C /root/d2/data
export PATH=/usr/local/cuda/bin:$PATH
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64"
export CUDA_HOME=/usr/local/cuda
echo " Install tensorflow-gpu "
timestamp
pip install tensorflow-gpu==1.10
echo " Start Training "
timestamp
python ~/d2/manage.py train --model /root/d2/models/''' + model_name.encode(
'utf8') + b''' >> ''' + job_name.encode('utf8') + b'''.log
echo " Finish Training "
timestamp
echo " Upload the model to S3 "
timestamp
aws s3 cp /root/d2/models/''' + model_name.encode('utf8') + b''' s3://''' + (
bucket_name).encode('utf8') + b'''/models/
echo " Finish uploading the model to S3 "
timestamp
aws s3 cp /''' + job_name.encode('utf8') + b'''.log s3://''' + (
bucket_name).encode('utf8') + b'''/
aws s3 cp /var/log/cloud-init-output.log s3://''' + (bucket_name).encode(
'utf8') + b'''/''' + job_name.encode('utf8') + b'''_commands.log
echo "sudo halt" | at now + 1 minutes
''').decode("ascii")
}
)
request_id = response['SpotInstanceRequests'][0]['SpotInstanceRequestId']
except Exception as e:
print(e)
return request_id
@credentials_check
def list_jobs_timeout(request):
jobs = Jobs.objects.order_by('-date')[:30]
for job in jobs:
import re
list = re.findall("'(.*?)'", job.tubs)
job.tubs = list
if job.size != 'N/A':
job.size=sizify(int(job.size))
context = {
'models': jobs,
'timeout': "No Job was created ! Please Try again"
}
template = loader.get_template('console/jobs.html')
return HttpResponse(template.render(context, request))
@credentials_check
def delete_empty_folders(request):
try:
Local_directory = local_directory.objects.latest('id')
updated_local_directory_name = Local_directory.name
except:
updated_local_directory_name = ''
list_data = os.popen('ls ' + updated_local_directory_name + '/data/').read()
directories = list_data.split()
print(directories)
for dir in directories:
direcPath = os.popen('echo ' + updated_local_directory_name + '/data/' + dir).read()
direcPath = direcPath.split()
if os.path.isdir(direcPath[0]):
noImages = os.popen(
'ls -l ' + updated_local_directory_name + '/data/' + dir + ' | grep .jpg | wc -l').read()
noImages.strip()
print(noImages)
noImages = int(noImages)
if noImages == 0 :
os.system('sudo rm -r '+direcPath[0])
return HttpResponseRedirect('/data/')
def check_availability_zone(instance_type):
client = boto3.client('ec2', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,region_name='us-east-1')
response = client.describe_spot_price_history(
InstanceTypes=[
instance_type
],
ProductDescriptions=[
'Linux/UNIX',
],
MaxResults=6,
)
List= response['SpotPriceHistory']
List.sort(key=itemgetter('SpotPrice'))
models = { az['AvailabilityZone'] for az in List}
listAZ = list(models)
newlist=[]
for l in listAZ:
listA = [x for x in List if x['AvailabilityZone']== l]
newlist.append(l + " " + listA[0]['SpotPrice'] + "/H")
return newlist
def display_local_repo(request):
choices_dir = os.popen("find ~/ -type d -exec test -e '{}'/models -a -e '{}'/data \; -print").read()
list = choices_dir.split('\n')
dire =""
for l in list :
dire = dire + l + "##"
return HttpResponse(dire)
def display_availability(request,name):
response = check_availability_zone(name)
return HttpResponse(response)
|
nilq/baby-python
|
python
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# This file should only be present in a source checkout, and never in a release
# package, to allow us to determine whether we're running in a development or
# production mode.
|
nilq/baby-python
|
python
|
import math
if __name__ != "common":
from objects import glob
import time
import json
from common.ripple import userUtils
def load_achievement_data(ACHIEVEMENT_BASE, ACHIEVEMENT_KEYS, ACHIEVEMENT_STRUCT):
LENGTH = 0
ACHIEVEMENTS = []
for struct in ACHIEVEMENT_STRUCT:
LENGTH = max(LENGTH, len(ACHIEVEMENT_KEYS[struct]) * ACHIEVEMENT_STRUCT[struct])
entry = {x:0 for x in ACHIEVEMENT_STRUCT}
for i in range(LENGTH):
for struct in ACHIEVEMENT_STRUCT:
entry[struct] = math.floor(i / ACHIEVEMENT_STRUCT[struct]) % len(ACHIEVEMENT_KEYS[struct])
format_data = {x:ACHIEVEMENT_KEYS[x][entry[x]] for x in ACHIEVEMENT_KEYS}
ACHIEVEMENTS.append({x: ACHIEVEMENT_BASE[x].format_map(format_data) for x in ACHIEVEMENT_BASE})
return ACHIEVEMENTS, LENGTH
def get_usercache(userID):
user_cache = glob.redis.get("lets:user_achievement_cache:{}".format(userID))
if user_cache is None:
user_cache = {}
else:
user_cache = json.loads(user_cache.decode("utf-8"))
if "version" not in user_cache:
# Load from sql database
user_cache["version"] = userUtils.getAchievementsVersion(userID)
db_achievements = [x["achievement_id"] for x in glob.db.fetchAll("SELECT achievement_id FROM users_achievements WHERE user_id = %s", [userID])]
if "achievements" in user_cache:
user_cache["achievements"] += db_achievements
else:
user_cache["achievements"] = db_achievements
# Remove duplicates after merge
user_cache["achievements"] = list(set(user_cache["achievements"]))
return user_cache
def add_pending_achievement(userID, achievementID):
user_cache = get_usercache(userID)
if len([x for x in user_cache["achievements"] if x in [achievementID, -achievementID]]) > 0:
print("Tried to add achievement:{} to user:{}, but failed due to duplicate entry.".format(achievementID, userID))
return
user_cache["achievements"].append(-achievementID)
# Remove duplicates after merge
user_cache["achievements"] = list(set(user_cache["achievements"]))
glob.redis.set("lets:user_achievement_cache:{}".format(userID), json.dumps(user_cache), 1800)
userUtils.unlockAchievement(userID, achievementID)
|
nilq/baby-python
|
python
|
from hashlib import pbkdf2_hmac, md5
import binascii
from Crypto.Cipher import AES
import os
import sys
def generate_key(title_id, pwd):
# remove 00 padding from title id
title_idGen = title_id[2:]
# get secret string, append title id, and convert to binary string
secret = binascii.unhexlify('fd040105060b111c2d49' + title_idGen)
# get md5 hash of secret
hashed_secret = md5(secret).digest()
# key is a pbkdf2 hash with sha1 base using hashed_secret as salt and 20 iterations
non_encrypted_key = binascii.hexlify(pbkdf2_hmac('sha1', pwd.encode(), hashed_secret, 20, 16))
title_id += '0000000000000000'
title_id = binascii.unhexlify(title_id)
ckey = binascii.unhexlify(get_ckey())
title_key = binascii.unhexlify(non_encrypted_key)
encryptor = AES.new(key=ckey, mode=AES.MODE_CBC, IV=title_id)
encrypted_title_key = encryptor.encrypt(title_key)
# return as hexstring
return binascii.hexlify(encrypted_title_key)
def get_ckey() -> str:
if not os.path.exists('ckey.txt'):
print('Common key was not found. Please create a file called ckey.txt and write the cmmon key in the first line.')
sys.exit(0)
with open('ckey.txt', 'r') as f:
return f.readline().replace('\r', '').replace('\n', '')
def verify_ckey():
if md5(get_ckey().upper().encode()).hexdigest() == '35ac5994972279331d97094fa2fb97fc':
return True
def main(tid, password='mypass'):
return generate_key(tid, password);
|
nilq/baby-python
|
python
|
# https://github.com/python-poetry/poetry/issues/11
import glob
import os
from distutils.command.build_ext import build_ext
from distutils.core import Extension
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
def filter_extension_module(name, lib_objs, lib_headers):
return Extension(
"thumbor.ext.filters.%s" % name,
["thumbor/ext/filters/%s.c" % name] + lib_objs,
libraries=["m"],
include_dirs=["thumbor/ext/filters/lib"],
depends=["setup.py"] + lib_objs + lib_headers,
extra_compile_args=["-Wall", "-Wextra", "-Werror", "-Wno-unused-parameter"],
)
def gather_filter_extensions():
files = glob.glob("thumbor/ext/filters/_*.c")
lib_objs = glob.glob("thumbor/ext/filters/lib/*.c")
lib_headers = glob.glob("thumbor/ext/filters/lib/*.h")
return [
filter_extension_module(f[0:-2].split("/")[-1], lib_objs, lib_headers)
for f in files
]
class BuildFailed(Exception):
pass
class ExtBuilder(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except (DistutilsPlatformError, FileNotFoundError):
pass
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (
CCompilerError,
DistutilsExecError,
DistutilsPlatformError,
ValueError,
):
pass
def build(setup_kwargs):
"""Needed for the poetry building interface."""
if "CFLAGS" not in os.environ:
os.environ["CFLAGS"] = ""
setup_kwargs.update(
dict(
ext_modules=gather_filter_extensions(),
cmdclass={"build_ext": ExtBuilder},
packages=["thumbor"],
package_dir={"thumbor": "thumbor"},
include_package_data=True,
package_data={"": ["*.xml"]},
)
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Created by: Michael Lan
import os
import sys
import re
from PySide2.QtWidgets import QApplication, QMainWindow
from PySide2 import QtGui, QtWidgets, QtCore
from ui_main import Ui_Dialog
from pyside_material import apply_stylesheet
from dbconnection import connect, insert_data, close, validate_duplicate
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.name, self.dni, self.phone, self.addres, self.birth = ('', '', '', '', '')
self.dni_register = ''
self.path = 'dbCrokiAlitas.db'
self.ui.txtName.installEventFilter(self)
self.ui.txtDNI.installEventFilter(self)
self.ui.txtPhone.installEventFilter(self)
self.ui.txtAddres.installEventFilter(self)
self.ui.txtBirth.installEventFilter(self)
self.ui.btnSaveUser.clicked.connect(self.save_user)
#----------------------------------------------------------------------
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FocusOut:
if self.ui.txtName is obj:
if self.validate_txt(self.ui.txtName.text()):
self.name = self.ui.txtName.text().title().strip()
self.ui.txtName.setText(self.name)
else:
self.name = ''
self.ui.txtName.setText('')
self.ui.txtName.setPlaceholderText('Por favor ingrese un nombre válido')
if self.ui.txtDNI is obj:
if self.validate_num(self.ui.txtDNI.text().replace(',','')):
self.dni = int(self.ui.txtDNI.text().replace(',',''))
self.ui.txtDNI.setText('{:,}'.format(self.dni))
else:
self.dni = ''
self.ui.txtDNI.setText('')
self.ui.txtDNI.setPlaceholderText('Por favor, escriba un número')
if self.ui.txtPhone is obj:
if self.validate_num(self.ui.txtPhone.text()):
self.phone = int(self.ui.txtPhone.text())
self.ui.txtPhone.setText(str(self.phone))
else:
self.phone = ''
self.ui.txtPhone.setText('')
self.ui.txtPhone.setPlaceholderText('Por favor, escriba un número')
if self.ui.txtDNIRegister is obj:
if self.validate_num(self.ui.txtDNIRegister.text()):
self.dni_register = int(self.ui.txtDNIRegister.text())
else:
self.ui.txtDNIRegister.setText('')
self.ui.txtDNIRegister.setPlaceholderText('Por favor, escriba un número')
return super(MainWindow, self).eventFilter(obj, event)
#----------------------------------------------------------------------
def save_user(self):
user = dict(
name = self.name,
dni = self.dni,
phone = self.phone,
addres = self.ui.txtAddres.text(),
birth = self.ui.txtBirth.text(),
)
self.conn = connect(self.path)
if all([user[value] for value in ['name', 'dni', 'phone']]):
if not validate_duplicate(self.conn, 'users', 'dni', user['dni']):
print (user)
try:
with self.conn as conn:
insert_data(conn, 'users', **user)
except:
close(self.conn)
finally:
close(self.conn)
self.ui.lblBanner.setText(f'Usuario {self.name} creado correctamente')
self.ui.txtName.setText('')
self.ui.txtDNI.setText('')
self.ui.txtPhone.setText('')
self.ui.txtAddres.setText('')
self.ui.txtBirth.setText('')
self.ui.txtName.setPlaceholderText('')
self.ui.txtDNI.setPlaceholderText('')
self.ui.txtPhone.setPlaceholderText('')
else:
self.ui.lblBanner.setText(f"El usuario {user['dni']} ya existe")
else:
self.ui.lblBanner.setText('Verifique los datos')
#----------------------------------------------------------------------
def validate_txt(self, *fields):
validator = [re.match(r'^[a-z\sáéíóú.]+$', field, re.I) for field in fields]
return all(validator)
#----------------------------------------------------------------------
def validate_num(self, *fields):
validator = [re.match(r'^[0-9]+$', field) for field in fields]
return all(validator)
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
apply_stylesheet(app, theme='light_red.xml', light_secondary=True)
font = QtGui.QFont()
font.setFamily("Ubuntu Mono")
font.setPointSize(15)
font.setWeight(75)
font.setBold(True)
font2 = QtGui.QFont()
font2.setPointSize(10)
font2.setBold(True)
window.ui.lblTitle.setFont(font)
window.ui.lblBanner.setFont(font2)
window.show()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
""" $lic$
Copyright (C) 2016-2017 by The Board of Trustees of Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
If you use this program in your research, we request that you reference the
TETRIS paper ("TETRIS: Scalable and Efficient Neural Network Acceleration with
3D Memory", in ASPLOS'17. April, 2017), and that you send us a citation of your
work.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import itertools
from collections import namedtuple
from .phy_dim2 import PhyDim2
NODE_REGION_LIST = ['dim',
'origin',
'type',
]
class NodeRegion(namedtuple('NodeRegion', NODE_REGION_LIST)):
'''
A node region defined by the dimension and origin offset.
The `type` attribute specifies the region type, which could be `PROC` for
computation processing nodes or 'DATA' for data storage nodes.
NOTES: we cannot overload __contains__ and __iter__ as a node container,
because the base namedtuple already defines them.
'''
# Type enums.
PROC = 0
DATA = 1
NUM = 2
def __new__(cls, *args, **kwargs):
ntp = super(NodeRegion, cls).__new__(cls, *args, **kwargs)
if not isinstance(ntp.dim, PhyDim2):
raise TypeError('NodeRegion: dim must be a PhyDim2 object.')
if not isinstance(ntp.origin, PhyDim2):
raise TypeError('NodeRegion: origin must be a PhyDim2 object.')
if ntp.type not in range(cls.NUM):
raise ValueError('NodeRegion: type must be a valid type enum.')
return ntp
def contains_node(self, coordinate):
''' Whether the region contains the given coordinate. '''
min_coord = self.origin
max_coord = self.origin + self.dim
return all(cmin <= c and c < cmax for c, cmin, cmax
in zip(coordinate, min_coord, max_coord))
def node_iter(self):
''' Iterate through all nodes in the region. '''
gens = []
for o, d in zip(self.origin, self.dim):
gens.append(xrange(o, o + d))
cnt = 0
for tp in itertools.product(*gens):
coord = PhyDim2(*tp)
assert self.contains_node(coord)
cnt += 1
yield coord
def rel2abs(self, rel_coordinate):
''' Convert relative node coordinate to absolute node coordinate. '''
if not isinstance(rel_coordinate, PhyDim2):
raise TypeError('NodeRegion: relative coordinate must be '
'a PhyDim2 object.')
abs_coordinate = self.origin + rel_coordinate
if not self.contains_node(abs_coordinate):
raise ValueError('NodeRegion: relative coordinate {} is not '
'in node region {}'.format(rel_coordinate, self))
return abs_coordinate
|
nilq/baby-python
|
python
|
# Basic Frame Differencing Example
#
# Note: You will need an SD card to run this example.
#
# This example demonstrates using frame differencing with your OpenMV Cam. It's
# called basic frame differencing because there's no background image update.
# So, as time passes the background image may change resulting in issues.
import sensor, image, pyb, os, time
TRIGGER_THRESHOLD = 5
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now frame differencing!")
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
# Replace the image with the "abs(NEW-OLD)" frame difference.
img.difference("temp/bg.bmp")
hist = img.get_histogram()
# This code below works by comparing the 99th percentile value (e.g. the
# non-outlier max value against the 90th percentile value (e.g. a non-max
# value. The difference between the two values will grow as the difference
# image seems more pixels change.
diff = hist.get_percentile(0.99).l_value() - hist.get_percentile(0.90).l_value()
triggered = diff > TRIGGER_THRESHOLD
print(clock.fps(), triggered) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
|
nilq/baby-python
|
python
|
import tetris.input.gamepad as gp
import pygame
pygame.init()
if __name__ == "__main__":
sticks = []
qs = gp.PygameEventReader.q
for stick in gp.get_available():
print(gp.display_gamepad_info(stick) + '\n')
sticks.append(gp.GamepadWrapper(stick.get_id()))
print('Listening...')
while True:
event = qs.get()
print(event)
|
nilq/baby-python
|
python
|
import datetime # enables the start time elements in date and time format
# Interaction for patient with learning disabilities
# "psychol_assessment",
# "iapt",
# "cmh_for_smi"
# Mental health interaction 1: Psychological assessment
def psychol_assessment(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "psychological assessment",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "psychological assessment",
"start": encounter["start"] + datetime.timedelta(minutes=60),
"cost": 96, # NHS Ref cost for IAPT - PSSRU
"glucose": 0, # dummy glucose impact, to be updated
"carbon": 50, # update for accurate carbon
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {0: 0.5, 13: 0.5}
next_environment_id_to_time = {
0: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
13: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Mental health interaction 2: IAPT
def iapt(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "iapt",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "iapt",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 96, # NHS Ref cost for IAPT - PSSRU
"glucose": 0, # dummy glucose impact, to be updated
"carbon": 50, # update for accurate carbon
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {0: 0.5, 13: 0.5}
next_environment_id_to_time = {
0: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
13: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Mental health interaction 3: Community mental health for severe mental illness
def cmh_for_smi(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "cmh for smi",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "cmh for smi",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 96, # NHS Ref cost for IAPT - PSSRU
"glucose": 0, # dummy glucose impact, to be updated
"carbon": 50, # update for accurate carbon
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {0: 0.5, 13: 0.5}
next_environment_id_to_time = {
0: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
13: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
|
nilq/baby-python
|
python
|
def temp(input, output):
img = cv2.imread(input)
xmap, ymap = utils.buildmap_1(Ws=800, Hs=800, Wd=800, Hd=800, fov=193.0)
cv2.imwrite(output, cv2.remap(img, xmap,ymap,cv2.INTER_LINEAR))
|
nilq/baby-python
|
python
|
from datetime import date
# random person
class Person:
# def __new__(cls, name, age):
# print("New object called")
# # super.__new__(cls[name, age])
def __init__(self, name, age):
print('__init__ called')
self.name = name
self.age = age
@classmethod
def fromBirthYear(cls, name, birthyear):
print("Factory method called")
return cls(name, date.today().year - birthyear)
def display(self):
print(self.name, self.age)
@staticmethod
def fromFathersAge(name, fatherAge, fatherPersonAgeDiff):
return Person(name, date.today().year - fatherAge + fatherPersonAgeDiff)
# person = Person('Sudeep', 19)
# person.display()
person1 = Person.fromBirthYear('John', 1985)
person1.display()
# print(id(person), id(person1))
# class Man(Person):
# sex = 'Male'
# man = Man.fromBirthYear('John', 1985)
# print(isinstance(man, Man))
# man1 = Man.fromFathersAge('John', 1965, 20)
# print(isinstance(man1, Man), type(man1))
|
nilq/baby-python
|
python
|
import logging
from discord.ext import tasks, commands
from naotomori.cogs.source.anime import _9anime, gogoanime
from naotomori.cogs.sourcecog import SourceCog
logger = logging.getLogger('NaoTomori')
class AnimeCog(SourceCog):
"""
AnimeCog: extends the SourceCog.
"""
def __init__(self, bot):
"""
Constructor: initialize the cog.
:param bot: The Discord bot.
"""
logger.info("Initializing AnimeCog")
super().__init__(bot)
# Replace this with your own 'Anime API' if you want to use a different anime source
# self.source = _9anime._9Anime()
logger.info("Setting GoGoAnime as anime source")
self.source = gogoanime.GoGoAnime()
@commands.command(
brief='Set the anime source for retrieving new anime (set source to "none" to remove the anime source)')
async def setAnimeSource(self, ctx, source: str):
"""
Set the anime source, i.e. where it will retrieve the anime from.
:param ctx: The context.
:param source: Name of the anime source.
"""
logger.info("Receiving setAnimeSource command")
successful = self._setAnimeSource(source)
if successful:
self.bot.get_cog('DatabaseCog').updateValue("anime_source", source)
if source.lower() == "none":
self.list.clear()
logger.info('Successfully removed the anime source')
await ctx.send(f'Successfully removed the anime source.')
return
elif not successful:
logger.error('Unknown or unsupported anime source')
await ctx.send('Unknown or unsupported anime source.')
return
self.list.clear()
self.fillCache()
logger.info(f'Successfully set the anime source to {source}')
await ctx.send(f'Successfully set the anime source to {source}.')
def _setAnimeSource(self, source):
"""
Set the anime source, i.e. where it will retrieve the anime from.
:param source: Name of the anime source.
:return True if successful, False otherwise.
"""
if source.lower() == "gogoanime":
self.source = gogoanime.GoGoAnime()
# elif source.lower() == "9anime":
# self.source = _9anime._9Anime()
elif source.lower() == "none":
self.source = None
else:
return False
return True
@commands.command(brief='Ignore an anime (don\'t send pings for a certain anime)')
async def ignoreAnime(self, ctx, *args):
"""
Ignore an anime.
:param ctx: The context.
:param args: Name of the anime.
"""
logger.info("Receiving ignoreAnime command")
await super(AnimeCog, self).ignore(ctx, True, *args)
@commands.command(brief='Unignore an anime')
async def unignoreAnime(self, ctx, *args):
"""
Unignore an anime.
:param ctx: The context.
:param args: Name of the anime.
"""
logger.info("Receiving unignoreAnime command")
await super(AnimeCog, self).unignore(ctx, True, *args)
@tasks.loop(minutes=5)
async def checkNewLoop(self):
"""
Loop that periodically calls checkNew to check for new anime.
"""
await self.checkNew()
|
nilq/baby-python
|
python
|
def params_to_string(task: dict) -> dict:
for k in task['parameters'].keys():
if (isinstance(task['parameters'][k], int) or isinstance(task['parameters'][k], float)):
task['parameters'][k] = str(task['parameters'][k])
return task
|
nilq/baby-python
|
python
|
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize('./source/cython_functions.pyx',compiler_directives={'language_level' : "3"}))
|
nilq/baby-python
|
python
|
from .chem import BOND_TYPES, BOND_NAMES, set_conformer_positions, draw_mol_image, update_data_rdmol_positions, \
update_data_pos_from_rdmol, set_rdmol_positions, set_rdmol_positions_, get_atom_symbol, mol_to_smiles, \
remove_duplicate_mols, get_atoms_in_ring, get_2D_mol, draw_mol_svg, GetBestRMSD
from .distgeom import Embed3D, get_d_from_pos
from .transforms import AddHigherOrderEdges, AddEdgeLength, AddPlaceHolder, AddEdgeName, AddAngleDihedral, CountNodesPerGraph
from .torch import ExponentialLR_with_minLr, repeat_batch, repeat_data, get_optimizer, get_scheduler, clip_norm
from .evaluation import evaluate_conf, evaluate_distance, get_rmsd_confusion_matrix
from .sde import GaussianFourierProjection
__all__ = ["BOND_TYPES", "BOND_NAMES", "set_conformer_positions", "draw_mol_image",
"update_data_rdmol_positions", "update_data_pos_from_rdmol", "set_rdmol_positions",
"set_rdmol_positions_", "get_atom_symbol", "mol_to_smiles", "remove_duplicate_mols",
"get_atoms_in_ring", "get_2D_mol", "draw_mol_svg", "GetBestRMSD",
"Embed3D", "get_d_from_pos",
"AddHigherOrderEdges", "AddEdgeLength", "AddPlaceHolder", "AddEdgeName",
"AddAngleDihedral", "CountNodesPerGraph",
"ExponentialLR_with_minLr",
"repeat_batch", "repeat_data",
"get_optimizer", "get_scheduler", "clip_norm",
"evaluate_conf", "evaluate_distance", "get_rmsd_confusion_matrix",
"GaussianFourierProjection",]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu June 17 11:53:42 2021
@author: Pavan Tummala
"""
import os, numpy as np
import cv2
import random
import torch
import torch.utils.data as data
import xml.etree.ElementTree as ET
from abc import ABCMeta, abstractmethod
import scipy.cluster.vq as vq
import pickle
import pandas as pd
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from cv2 import imread, resize
from numpy import concatenate
from sklearn.metrics import accuracy_score
from sklearn.semi_supervised import LabelPropagation
from sklearn.model_selection import train_test_split
import argparse
from imblearn.under_sampling import RandomUnderSampler
from skimage import feature
import warnings
from scipy.sparse import issparse
from sklearn.naive_bayes import _BaseDiscreteNB
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import check_X_y, check_array
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.validation import check_is_fitted
warnings.filterwarnings("ignore")
"""
Data Loader reading the files, extracting individual objects from each image
"""
class DataLoader(data.Dataset):
def __init__(self,data_path="", trainval='trainval',transform=None):
self.data_path = data_path
self.transform = transform
self.trainval = trainval
self.__init_classes()
self.names, self.labels, self.lable_set, self.bounding_box = self.__dataset_info()
def __getitem__(self, index):
self.data = []
self.lables = []
x = imread(self.data_path+'JPEGImages/'+self.names[index]+'.jpg')
x_min, y_min, x_max, y_max = self.bounding_box[index]
for i in range(len(x_min)):
sub_img = x[y_min[i]:y_max[i],x_min[i]:x_max[i]]
sub_img = cv2.resize(sub_img, (64, 64),
interpolation=cv2.INTER_NEAREST)
self.data.append(sub_img)
self.lables.append(self.lable_set[index][i])
if self.transform !=None:
x = self.transform(x)
y = self.labels[index]
def __fetchdata__(self):
return self.data, self.lables
def __len__(self):
return len(self.names)
def __dataset_info(self):
with open(self.data_path+'ImageSets/Main/'+self.trainval+'.txt') as f:
annotations = f.readlines()
annotations = [n[:-1] for n in annotations]
names = []
labels = []
lable_set = []
bounding_box = []
for af in annotations:
filename = os.path.join(self.data_path,'Annotations',af)
tree = ET.parse(filename+'.xml')
objs = tree.findall('object')
num_objs = len(objs)
bdg_box = [obj.find('bndbox') for obj in objs]
x_min = [int(box.find('xmin').text.lower().strip()) for box in bdg_box]
y_min = [int(box.find('ymin').text.lower().strip()) for box in bdg_box]
x_max = [int(box.find('xmax').text.lower().strip()) for box in bdg_box]
y_max = [int(box.find('ymax').text.lower().strip()) for box in bdg_box]
coords = (x_min, y_min, x_max, y_max)
boxes_cl = np.zeros((num_objs), dtype=np.int32)
temp_lbls = []
for ix, obj in enumerate(objs):
cls = self.class_to_ind[obj.find('name').text.lower().strip()]
boxes_cl[ix] = cls
temp_lbls.append(cls)
lbl = np.zeros(self.num_classes)
lbl[boxes_cl] = 1
labels.append(lbl)
names.append(af)
lable_set.append(temp_lbls)
bounding_box.append(coords)
return np.array(names), np.array(labels).astype(np.float32), lable_set, bounding_box
def __init_classes(self):
self.classes = ('aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self.num_classes = len(self.classes)
self.class_to_ind = dict(zip(self.classes, range(self.num_classes)))
"""
local binary pattern
"""
class LocalBinaryPatterns:
def __init__(self, numPoints, radius):
# store the number of points and radius
self.numPoints = numPoints
self.radius = radius
def describe(self, image, eps=1e-7):
# compute the Local Binary Pattern representation
# of the image, and then use the LBP representation
# to build the histogram of patterns
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
lbp = feature.local_binary_pattern(image, self.numPoints,
self.radius, method="uniform")
(hist, _) = np.histogram(lbp.ravel(),
bins=np.arange(0, self.numPoints + 3),
range=(0, self.numPoints + 2))
# normalize the histogram
hist = hist.astype("float")
hist /= (hist.sum() + eps)
# return the histogram of Local Binary Patterns
return hist
"""
color layout descriptor
"""
class DescriptorComputer:
__metaclass__ = ABCMeta
@abstractmethod
def compute(self, frame):
pass
class ColorLayoutComputer(DescriptorComputer):
def __init__(self):
self.rows = 8
self.cols = 8
self.prefix = "CLD"
def compute(self, img):
averages = np.zeros((self.rows,self.cols,3))
imgH, imgW, _ = img.shape
for row in range(self.rows):
for col in range(self.cols):
row_start = int(imgH/self.rows * row)
row_end = int(imgH/self.rows * (row+1))
col_start = int(imgW/self.cols*col)
col_end = int(imgW/self.cols*(col+1))
slice1 = img[row_start:row_end, col_start:col_end]
#slice1 = img[imgH/self.rows * row: imgH/self.rows * (row+1), imgW/self.cols*col : imgW/self.cols*(col+1)]
#print(slice)
average_color_per_row = np.mean(slice1, axis=0)
average_color = np.mean(average_color_per_row, axis=0)
average_color = np.uint8(average_color)
averages[row][col][0] = average_color[0]
averages[row][col][1] = average_color[1]
averages[row][col][2] = average_color[2]
icon = cv2.cvtColor(np.array(averages, dtype=np.uint8), cv2.COLOR_BGR2YCR_CB)
y, cr, cb = cv2.split(icon)
dct_y = cv2.dct(np.float32(y))
dct_cb = cv2.dct(np.float32(cb))
dct_cr = cv2.dct(np.float32(cr))
dct_y_zigzag = []
dct_cb_zigzag = []
dct_cr_zigzag = []
flip = True
flipped_dct_y = np.fliplr(dct_y)
flipped_dct_cb = np.fliplr(dct_cb)
flipped_dct_cr = np.fliplr(dct_cr)
for i in range(self.rows + self.cols -1):
k_diag = self.rows - 1 - i
diag_y = np.diag(flipped_dct_y, k=k_diag)
diag_cb = np.diag(flipped_dct_cb, k=k_diag)
diag_cr = np.diag(flipped_dct_cr, k=k_diag)
if flip:
diag_y = diag_y[::-1]
diag_cb = diag_cb[::-1]
diag_cr = diag_cr[::-1]
dct_y_zigzag.append(diag_y)
dct_cb_zigzag.append(diag_cb)
dct_cr_zigzag.append(diag_cr)
flip = not flip
return np.concatenate([np.concatenate(dct_y_zigzag), np.concatenate(dct_cb_zigzag), np.concatenate(dct_cr_zigzag)])
"""
Bag of Visual word
"""
device = torch.device('cpu')
def random_init(dataset, num_centers):
num_points = dataset.size(0)
dimension = dataset.size(1)
used = torch.zeros(num_points, dtype=torch.long)
indices = torch.zeros(num_centers, dtype=torch.long)
for i in range(num_centers):
while True:
cur_id = random.randint(0, num_points - 1)
if used[cur_id] > 0:
continue
used[cur_id] = 1
indices[i] = cur_id
break
indices = indices.to(device)
centers = torch.gather(dataset, 0, indices.view(-1, 1).expand(-1, dimension))
return centers
def compute_codes(dataset, centers):
num_points = dataset.size(0)
dimension = dataset.size(1)
num_centers = centers.size(0)
# 5e8 should vary depending on the free memory on the GPU
# Ideally, automatically ;)
chunk_size = int(5e8 / num_centers)
codes = torch.zeros(num_points, dtype=torch.long, device=device)
centers_t = torch.transpose(centers, 0, 1)
centers_norms = torch.sum(centers ** 2, dim=1).view(1, -1)
for i in range(0, num_points, chunk_size):
begin = i
end = min(begin + chunk_size, num_points)
dataset_piece = dataset[begin:end, :]
dataset_norms = torch.sum(dataset_piece ** 2, dim=1).view(-1, 1)
distances = torch.mm(dataset_piece, centers_t)
distances *= -2.0
distances += dataset_norms
distances += centers_norms
_, min_ind = torch.min(distances, dim=1)
codes[begin:end] = min_ind
return codes
def update_centers(dataset, codes, num_centers):
num_points = dataset.size(0)
dimension = dataset.size(1)
centers = torch.zeros(num_centers, dimension, dtype=torch.float, device=device)
cnt = torch.zeros(num_centers, dtype=torch.float, device=device)
centers.scatter_add_(0, codes.view(-1, 1).expand(-1, dimension), dataset)
cnt.scatter_add_(0, codes, torch.ones(num_points, dtype=torch.float, device=device))
# Avoiding division by zero
# Not necessary if there are no duplicates among the data points
cnt = torch.where(cnt > 0.5, cnt, torch.ones(num_centers, dtype=torch.float, device=device))
centers /= cnt.view(-1, 1)
return centers
def cluster(dataset, num_centers):
centers = random_init(dataset, num_centers)
codes = compute_codes(dataset, centers)
num_iterations = 0
while True:
num_iterations += 1
centers = update_centers(dataset, codes, num_centers)
new_codes = compute_codes(dataset, centers)
# Waiting until the clustering stops updating altogether
# This is too strict in practice
if torch.equal(codes, new_codes):
print('Converged in %d iterations' % num_iterations)
break
codes = new_codes
return centers, codes
def extract_sift_descriptors(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
keypoints, descriptors = sift.detectAndCompute(gray, None)
return descriptors
def build_codebook(X, voc_size):
"""
Inupt a list of feature descriptors
voc_size is the "K" in K-means, k is also called vocabulary size
Return the codebook/dictionary
"""
features = np.vstack((descriptor for descriptor in X)).astype(np.float32)
dataset = torch.from_numpy(features)
print('Starting clustering')
centers, codes = cluster(dataset, voc_size)
return centers
def input_vector_encoder(feature, codebook):
"""
Input all the local feature of the image
Pooling (encoding) by codebook and return
"""
code, _ = vq.vq(feature, codebook)
word_hist, bin_edges = np.histogram(code, bins=range(codebook.shape[0] + 1), normed=True)
return word_hist
def extract_surf_descriptors(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
surf = cv2.xfeatures2d.SURF_create()
keypoints, descriptors = surf.detectAndCompute(gray, None)
return descriptors
"""
Histogram features
"""
def fd_histogram(image, mask=None):
bins=8
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([image], [0, 1, 2], None, [bins, bins, bins], [0, 256, 0, 256, 0, 256])
cv2.normalize(hist, hist)
return hist.flatten()
"""
feature normalization
"""
def scale(X, x_min, x_max):
nom = (X-X.min(axis=0))*(x_max-x_min)
denom = X.max(axis=0) - X.min(axis=0)
denom[denom==0] = 1
return x_min + nom/denom
class MultinomialNBSS(_BaseDiscreteNB):
"""
Semi-supervised Naive Bayes classifier for multinomial models. Unlabeled
data must be marked with -1. In comparison to the standard scikit-learn
MultinomialNB classifier, the main differences are in the _count and fit
methods.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
beta : float, optional (default=1.0)
Weight applied to the contribution of the unlabeled data
(0 for no contribution).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
tol : float, optional (default=1e-3)
Tolerance for convergence of EM algorithm.
max_iter : int, optional (default=1500)
Maximum number of iterations for EM algorithm.
verbose : boolean, optional (default=True)
Whether to output updates during the running of the EM algorithm.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : array, shape (n_classes, )
Mirrors ``class_log_prior_`` for interpreting MultinomialNBSS
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : array, shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting MultinomialNBSS
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
"""
def __init__(self, alpha=1.0, beta=1.0, fit_prior=True, class_prior=None,
tol=1e-3, max_iter=1500, verbose=True):
self.alpha = alpha
self.beta = beta
self.fit_prior = fit_prior
self.class_prior = class_prior
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
def _count(self, X, Y, U_X=np.array([]), U_prob=np.array([])):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ = safe_sparse_dot(Y.T, X)
self.class_count_ = Y.sum(axis=0)
if U_X.shape[0] > 0:
self.feature_count_ += self.beta*safe_sparse_dot(U_prob.T, U_X)
self.class_count_ += self.beta*U_prob.sum(axis=0)
else:
self.feature_count_ = safe_sparse_dot(Y.T, X)
self.class_count_ = Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""A semi-supervised version of this method has not been implemented.
"""
def fit(self, X, y, sample_weight=None):
"""Fit semi-supervised Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values. Unlabeled data must be marked with -1.
sample_weight : array-like, shape = [n_samples], (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
# Unlabeled data are marked with -1
unlabeled = np.flatnonzero(y == -1)
labeled = np.setdiff1d(np.arange(len(y)), unlabeled)
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y[labeled])
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64, copy=False)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
alpha = self._check_alpha()
self._count(X[labeled], Y)
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
jll = self._joint_log_likelihood(X)
sum_jll = jll.sum()
# Run EM algorithm
if len(unlabeled) > 0:
self.num_iter = 0
pred = self.predict(X)
while self.num_iter < self.max_iter:
self.num_iter += 1
prev_sum_jll = sum_jll
# First, the E-step:
prob = self.predict_proba(X[unlabeled])
# Then, the M-step:
self._count(X[labeled], Y, X[unlabeled], prob)
self._update_feature_log_prob(self.beta)
self._update_class_log_prior(class_prior=class_prior)
jll = self._joint_log_likelihood(X)
sum_jll = jll.sum()
if self.verbose:
print(
'Step {}: jll = {:f}'.format(self.num_iter, sum_jll)
)
if self.num_iter > 1 and prev_sum_jll - sum_jll < self.tol:
break
if self.verbose:
end_text = 's.' if self.num_iter > 1 else '.'
print(
'Optimization converged after {} '
'iteration'.format(self.num_iter)
+ end_text
)
return self
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data',
help='path for voc2007')
args = parser.parse_args()
path = args.data
data_load = DataLoader(data_path = path)
lst_data = []
lst_lbl = []
for i in range(0, 5000):
data_load.__getitem__(i)
test_data, test_label = data_load.__fetchdata__()
lst_data.append(test_data)
lst_lbl.append(test_label)
labels = np.hstack(lst_lbl)
data = np.concatenate(lst_data, axis=0)
print(len(data))
print("################### Data load completed #######################")
"""
color layour features
"""
computer = ColorLayoutComputer()
color_layout_features = [computer.compute(data[i]) for i in range(len(data))]
print("################### Color layout feature generated #######################")
VOC_SIZE = 128
# =============================================================================
# """
# visual bag of words using sift
# """
# bow_sift = [extract_sift_descriptors(data[i].astype('uint8')) for i in range(len(data))]
# bow_sift = [each for each in zip(bow_sift, labels) if not each[0] is None]
# bow_sift, y_train = zip(*bow_sift)
#
# codebook = build_codebook(bow_sift, voc_size=VOC_SIZE)
# bow_sift = [input_vector_encoder(x, codebook) for x in bow_sift]
# =============================================================================
"""
visual bag of words using surf
"""
bow_surf = [extract_surf_descriptors(data[i].astype('uint8')) for i in range(len(data))]
bow_surf = [each for each in zip(bow_surf, labels) if not each[0] is None]
bow_surf, y_train = zip(*bow_surf)
codebook = build_codebook(bow_surf, voc_size=VOC_SIZE)
bow_surf = [input_vector_encoder(x, codebook) for x in bow_surf]
print("################### Visual bag of words and surf generated #######################")
"""
color histogram
"""
color_hist_features = [fd_histogram(data[i].astype('uint8')) for i in range(len(data))]
print("################### Color Histogram generated #######################")
"""
local binary pattern
"""
desc = LocalBinaryPatterns(24, 8)
lbp = [desc.describe(data[i]) for i in range(len(data))]
print("################### Local Binary Pattern generated #######################")
bow_surf = np.array(bow_surf)
color_layout_features = np.array(color_layout_features)
color_hist_features = np.array(color_hist_features)
lbp = np.array(lbp)
# with open('color_layout_descriptor_64.pkl','wb') as f:
# pickle.dump(color_layout_features, f)
# with open('bow_surf_64.pkl','wb') as f:
# pickle.dump(bow_surf, f)
# with open('hist_64.pkl','wb') as f:
# pickle.dump(color_hist_features, f)
# with open('labels_64.pkl','wb') as f:
# pickle.dump(labels, f)
# with open('data_64.pkl','wb') as f:
# pickle.dump(data, f)
"""
pickle read
"""
# color_layout_features = pd.read_pickle(path + "/color_layout_descriptor_64.pkl")
# bow_surf = pd.read_pickle(path + "/bow_surf_64.pkl")
# color_hist_features = pd.read_pickle(path + "/hist_64.pkl")
# labels = pd.read_pickle(path +"/labels_64.pkl")
# data = pd.read_pickle(path +"/data_64.pkl")
"""
Normalizing color layour feature only
since other features have been normalized while feature extraction above
"""
color_layout_features_scaled = scale(color_layout_features, 0, 1)
"""
stacking all the features into one array
"""
features = np.hstack([color_layout_features_scaled, color_hist_features, lbp])
features = features.astype('float64')
"""
feature selection using Anova,
K is the hyper param that needs to be varied and tested
"""
fs = SelectKBest(score_func=f_classif, k=200)
fs.fit(features, labels)
selected_features = fs.transform(features)
print("################### Feature Selection completed #######################")
undersample = RandomUnderSampler(random_state=123)
X_over, y_over = undersample.fit_resample(selected_features, labels)
X_train, X_test, y_train, y_test = train_test_split(X_over, y_over, test_size=0.1, random_state=42)
X_train_lab, X_test_unlab, y_train_lab, y_test_unlab = train_test_split(X_train, y_train, test_size=0.1, random_state=1, stratify=y_train)
print("################### Class Balancing completed #######################")
print("Labelled features set size: %d, %d"%X_train_lab.shape)
print("Labelled lable set size: %d"%y_train_lab.shape)
print("Unlabelled features set size: %d, %d"%X_test_unlab.shape)
print("Unlabelled lable set size: %d"%y_test_unlab.shape)
X_train_mixed = concatenate((X_train_lab, X_test_unlab))
nolabel = [-1 for _ in range(len(y_test_unlab))]
y_train_mixed = concatenate((y_train_lab, nolabel))
model = MultinomialNBSS(verbose=False)
model.fit(X_train_mixed, y_train_mixed)
print("################### SSGMM model built #######################")
yhat = model.predict(X_test)
print("Test data accuracy: %.2f%%"% (accuracy_score(y_test, yhat)*100))
|
nilq/baby-python
|
python
|
# Copyright (c) 2011-2013 Kunal Mehta. All rights reserved.
# Use of this source code is governed by a BSD License found in README.md.
from django.conf import settings
from django.contrib.auth import authenticate, login, logout
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, \
HttpResponseNotFound, HttpResponseForbidden
from django.views.decorators.http import require_POST, require_GET
from huxley.accounts.forms import RegistrationForm
from huxley.accounts.models import HuxleyUser
from huxley.core.models import *
from huxley.shortcuts import render_template, render_json
def login_user(request):
""" Logs in a user or renders the login template. """
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user, error = HuxleyUser.authenticate(username, password)
if error:
return render_json({'success': False, 'error': error})
redirect = HuxleyUser.login(request, user)
return render_json({'success': True, 'redirect': redirect})
return render_template(request, 'auth.html')
def login_as_user(request, uid):
""" Logs in as a particular user (admin use only). """
try:
if not request.user.is_superuser:
return HttpResponseForbidden()
username = HuxleyUser.objects.get(id=uid).username
user = authenticate(username=username, password=settings.ADMIN_SECRET)
login(request, user)
return HttpResponseRedirect(reverse('index'))
except HuxleyUser.DoesNotExist:
return HttpResponseNotFound()
def logout_user(request):
""" Logs out the current user. Although we'll only be supporting AJAX,
we're leaving the standard logout here in case of a heinous bug that
prevents normal logout."""
logout(request)
if request.is_ajax():
return HttpResponse(reverse('login'))
else:
return HttpResponseRedirect(reverse('index'))
def register(request):
""" Registers a new user and school. """
# Registration is closed. TODO: Implement the waitlist.
#return render_template(request, 'registration_closed.html')
if request.method =='POST':
form = RegistrationForm(request.POST)
if form.is_valid():
new_school = form.create_school()
new_user = form.create_user(new_school)
form.add_country_preferences(new_school)
form.add_committee_preferences(new_school)
if not settings.DEBUG:
new_user.email_user("Thanks for registering for BMUN 62!",
"We're looking forward to seeing %s at BMUN 62. "
"You can find information on deadlines and fees at "
"http://bmun.org/bmun/timeline/. If you have any "
"more questions, please feel free to email me at "
"info@bmun.org. See you soon!\n\nBest,\n\nShrey Goel"
"\nUSG of External Relations, BMUN 62" % new_school.name,
"info@bmun.org")
Conference.auto_country_assign(new_school)
return render_template(request, 'thanks.html')
form = RegistrationForm()
context = {
'form': form,
'state': '',
'countries': Country.objects.filter(special=False).order_by('name'),
'committees': Committee.objects.filter(special=True)
}
return render_template(request, 'registration.html', context)
@require_POST
def change_password(request):
""" Attempts to change the user's password, or returns an error. """
if not request.user.is_authenticated():
return HttpResponse(status=401)
old = request.POST.get('oldpassword')
new = request.POST.get('newpassword')
new2 = request.POST.get('newpassword2')
success, error = request.user.change_password(old, new, new2)
return HttpResponse('OK') if success else HttpResponse(error)
def reset_password(request):
""" Reset a user's password. """
if request.method == 'POST':
username = request.POST.get('username')
new_password = HuxleyUser.reset_password(username)
if new_password:
if not settings.DEBUG:
user.email_user("Huxley Password Reset",
"Your password has been reset to %s.\nThank you for using Huxley!" % (new_password),
from_email="no-reply@bmun.org")
return render_template(request, 'password-reset-success.html')
else:
return render_template(request, 'password-reset.html', {'error': True})
return render_template(request, 'password-reset.html')
@require_GET
def validate_unique_user(request):
""" Checks that a potential username is unique. """
username = request.GET['username']
if HuxleyUser.objects.filter(username=username).exists():
return HttpResponse(status=406)
else:
return HttpResponse(status=200)
|
nilq/baby-python
|
python
|
import unittest
import torchtext.vocab as v
import han.encode.sentence as s
class SentenceEncoderTestCase(unittest.TestCase):
def test(self):
vocab = v.build_vocab_from_iterator([["apple", "is", "tasty"]])
sut = s.SentenceEncoder(vocab)
res = sut.forward(["apple is tasty", "tasty is apple"])
self.assertEqual(len(res), 2)
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
import torch
from torch import Tensor
from fairseq import checkpoint_utils, utils
from fairseq.models import (
FairseqEncoderModel,
FairseqEncoderDecoderModel,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_text import S2TTransformerEncoder
from fairseq.models.speech_to_speech.modules import CTCDecoder, StackedEmbedding
from fairseq.models.text_to_speech import TTSTransformerDecoder
from fairseq.models.transformer import (
Linear,
TransformerDecoder,
TransformerModelBase,
)
logger = logging.getLogger(__name__)
class S2STransformerEncoder(S2TTransformerEncoder):
"""Based on S2T transformer encoder, with support
to incorporate target speaker embedding."""
def __init__(self, args):
super().__init__(args)
self.spk_emb_proj = None
if args.target_speaker_embed:
self.spk_emb_proj = Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
def forward(
self, src_tokens, src_lengths, tgt_speaker=None, return_all_hiddens=False
):
out = super().forward(src_tokens, src_lengths, return_all_hiddens)
if self.spk_emb_proj:
x = out["encoder_out"][0]
seq_len, bsz, _ = x.size()
tgt_speaker_emb = tgt_speaker.view(1, bsz, -1).expand(seq_len, bsz, -1)
x = self.spk_emb_proj(torch.cat([x, tgt_speaker_emb], dim=2))
out["encoder_out"][0] = x
return out
class TransformerUnitDecoder(TransformerDecoder):
"""Based on Transformer decoder, with support to decoding stacked units"""
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn, output_projection
)
self.n_frames_per_step = args.n_frames_per_step
self.out_proj_n_frames = (
Linear(
self.output_embed_dim,
self.output_embed_dim * self.n_frames_per_step,
bias=False,
)
if self.n_frames_per_step > 1
else None
)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
bsz, seq_len, d = x.size()
if self.out_proj_n_frames:
x = self.out_proj_n_frames(x)
x = self.output_layer(x.view(bsz, seq_len, self.n_frames_per_step, d))
x = x.view(bsz, seq_len * self.n_frames_per_step, -1)
if (
incremental_state is None and self.n_frames_per_step > 1
): # teacher-forcing mode in training
x = x[
:, : -(self.n_frames_per_step - 1), :
] # remove extra frames after <eos>
return x, extra
def upgrade_state_dict_named(self, state_dict, name):
if self.n_frames_per_step > 1:
move_keys = [
(
f"{name}.project_in_dim.weight",
f"{name}.embed_tokens.project_in_dim.weight",
)
]
for from_k, to_k in move_keys:
if from_k in state_dict and to_k not in state_dict:
state_dict[to_k] = state_dict[from_k]
del state_dict[from_k]
class S2STransformerMultitaskModelBase(FairseqEncoderDecoderModel):
@classmethod
def build_encoder(cls, args):
encoder = S2STransformerEncoder(args)
pretraining_path = getattr(args, "load_pretrained_encoder_from", None)
if pretraining_path is not None:
if not Path(pretraining_path).exists():
logger.warning(
f"skipped pretraining because {pretraining_path} does not exist"
)
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=pretraining_path
)
logger.info(f"loaded pretrained encoder from: {pretraining_path}")
return encoder
@classmethod
def build_multitask_decoder(cls, args, tgt_dict, in_dim):
decoder_args = args.decoder_args
decoder_args.encoder_embed_dim = in_dim
if args.decoder_type == "transformer":
base_multitask_text_transformer_decoder_arch(decoder_args)
task_decoder = TransformerDecoder(
decoder_args,
tgt_dict,
embed_tokens=TransformerModelBase.build_embedding(
decoder_args,
tgt_dict,
decoder_args.decoder_embed_dim,
),
)
elif args.decoder_type == "ctc":
task_decoder = CTCDecoder(
dictionary=tgt_dict,
in_dim=in_dim,
)
else:
raise NotImplementedError(
"currently only support multitask decoder_type 'transformer', 'ctc'"
)
return task_decoder
@classmethod
def build_model(cls, args, task):
encoder = cls.build_encoder(args)
decoder = (
cls.build_decoder(args, task.target_dictionary)
if task.args.target_is_code
else cls.build_decoder(args)
)
base_model = cls(encoder, decoder)
# set up multitask decoders
base_model.multitask_decoders = {}
for task_name, task_obj in task.multitask_tasks.items():
in_dim = (
args.encoder_embed_dim
if task_obj.args.input_from == "encoder"
else args.decoder_embed_dim
)
task_decoder = cls.build_multitask_decoder(
task_obj.args, task_obj.target_dictionary, in_dim
)
setattr(base_model, f"{task_name}_decoder", task_decoder)
decoder_model_cls = (
FairseqEncoderModel
if task_obj.args.decoder_type == "ctc"
else FairseqLanguageModel
)
base_model.multitask_decoders[task_name] = decoder_model_cls(
getattr(base_model, f"{task_name}_decoder")
)
return base_model
def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs):
return self.encoder(
src_tokens, src_lengths=src_lengths, tgt_speaker=speaker, **kwargs
)
@register_model("s2ut_transformer")
class S2UTTransformerModel(S2STransformerMultitaskModelBase):
"""
Direct speech-to-speech translation model with S2T Transformer encoder + Transformer discrete unit decoder
https://arxiv.org/abs/2107.05604
"""
@staticmethod
def add_args(parser):
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
# speaker
parser.add_argument(
"--speaker-embed-dim",
type=int,
metavar="N",
help="speaker embedding dimension",
)
@classmethod
def build_decoder(cls, args, tgt_dict):
num_embeddings = len(tgt_dict)
padding_idx = tgt_dict.pad()
embed_tokens = StackedEmbedding(
num_embeddings,
args.decoder_embed_dim,
padding_idx,
num_stacked=args.n_frames_per_step,
)
return TransformerUnitDecoder(
args,
tgt_dict,
embed_tokens,
)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
tgt_speaker=None,
return_all_hiddens=False,
):
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
tgt_speaker=tgt_speaker,
return_all_hiddens=return_all_hiddens,
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
)
if return_all_hiddens:
decoder_out[-1]["encoder_states"] = encoder_out["encoder_states"]
decoder_out[-1]["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
]
return decoder_out
@register_model("s2spect_transformer")
class S2SpecTTransformerModel(S2STransformerMultitaskModelBase):
"""
Speech-to-spectrogram model with S2T Transformer encoder + TTS Transformer decoder
"""
@staticmethod
def add_args(parser):
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
# speaker
parser.add_argument(
"--speaker-embed-dim",
type=int,
metavar="N",
help="speaker embedding dimension",
)
# decoder
parser.add_argument("--output-frame-dim", type=int)
# decoder prenet
parser.add_argument("--prenet-dropout", type=float)
parser.add_argument("--prenet-layers", type=int)
parser.add_argument("--prenet-dim", type=int)
# decoder postnet
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
# decoder transformer layers
parser.add_argument("--decoder-transformer-layers", type=int)
parser.add_argument("--decoder-embed-dim", type=int)
parser.add_argument("--decoder-ffn-embed-dim", type=int)
parser.add_argument("--decoder-normalize-before", action="store_true")
parser.add_argument("--decoder-attention-heads", type=int)
@classmethod
def build_decoder(cls, args):
return TTSTransformerDecoder(args, None, padding_idx=1)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
tgt_speaker=None,
incremental_state=None,
target_lengths=None,
speaker=None,
return_all_hiddens=False,
):
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
tgt_speaker=tgt_speaker,
return_all_hiddens=return_all_hiddens,
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
target_lengths=target_lengths,
speaker=speaker,
)
if return_all_hiddens:
decoder_out[-1]["encoder_states"] = encoder_out["encoder_states"]
decoder_out[-1]["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
]
return decoder_out
def base_multitask_text_transformer_decoder_arch(args):
args.dropout = getattr(args, "dropout", 0.3)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
# decoder layer
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
def base_s2st_transformer_encoder_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 256)
@register_model_architecture(
model_name="s2ut_transformer", arch_name="s2ut_transformer"
)
def s2ut_architecture_base(args):
base_s2st_transformer_encoder_architecture(args)
# decoder
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
@register_model_architecture("s2ut_transformer", "s2ut_transformer_fisher")
def s2ut_architecture_fisher(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
s2ut_architecture_base(args)
@register_model_architecture(
model_name="s2spect_transformer", arch_name="s2spect_transformer"
)
def s2spect_architecture_base(args):
base_s2st_transformer_encoder_architecture(args)
# decoder
args.output_frame_dim = getattr(args, "output_frame_dim", 80)
# decoder prenet
args.prenet_dropout = getattr(args, "prenet_dropout", 0.5)
args.prenet_layers = getattr(args, "prenet_layers", 2)
args.prenet_dim = getattr(args, "prenet_dim", 256)
# decoder postnet
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# decoder transformer layers
args.decoder_transformer_layers = getattr(args, "decoder_transformer_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", 4 * args.decoder_embed_dim
)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
@register_model_architecture("s2spect_transformer", "s2spect_transformer_fisher")
def s2spect_architecture_fisher(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
# decoder
args.prenet_dim = getattr(args, "prenet_dim", 32)
s2spect_architecture_base(args)
|
nilq/baby-python
|
python
|
from .abstract_choices_factory import AbstractChoicesFactory
from .choice import Choice
from src.round import Output
from random import randrange
class PlayerVsComChoicesFactory(AbstractChoicesFactory):
def make_player1_choice():
player_choice = Choice('placeholder')
while not player_choice.is_valid():
entered_choice = input(Output.get_user_choice_header())
player_choice.set_choice(entered_choice)
return player_choice
def make_player2_choice():
valid_choices = Choice.VALID_CHOICES
number_of_valid_choices = len(valid_choices)
com_entry_index = randrange(number_of_valid_choices)
com_entry = valid_choices[com_entry_index]
com_choice = Choice(com_entry)
return com_choice
|
nilq/baby-python
|
python
|
from apple_health.util import parse_date, parse_float
DATE_COMPONENTS = "@dateComponents"
ACTIVE_ENERGY_BURNED = "@activeEnergyBurned"
ACTIVE_ENERGY_BURNED_GOAL = "@activeEnergyBurnedGoal"
ACTIVE_ENERGY_BURNED_UNIT = "@activeEnergyBurnedUnit"
APPLE_EXERCISE_TIME = "@appleExerciseTime"
APPLE_EXERCISE_TIME_GOAL = "@appleExerciseTimeGoal"
APPLE_STAND_HOURS = "@appleStandHours"
APPLE_STAND_HOURS_GOAL = "@appleStandHoursGoal"
class ActivitySummary:
# a.k.a. The Rings
def __init__(self, **data):
self.date = parse_date(data.get(DATE_COMPONENTS))
# Red
self.active_energy_burned: float = parse_float(
data.get(ACTIVE_ENERGY_BURNED)
)
self.active_energy_burned_goal: float = parse_float(
data.get(ACTIVE_ENERGY_BURNED_GOAL)
)
self.active_energy_burned_unit: str = data.get(
ACTIVE_ENERGY_BURNED_UNIT, "kcal"
)
# Green
self.exercise_time: float = parse_float(
data.get(APPLE_EXERCISE_TIME)
)
self.exercise_time_goal: float = parse_float(
data.get(APPLE_EXERCISE_TIME_GOAL)
)
# Blue
self.stand_hours: float = parse_float(
data.get(APPLE_STAND_HOURS)
)
self.stand_hours_goal: float = parse_float(
data.get(APPLE_STAND_HOURS_GOAL)
)
@property
def active_energy_percent(self) -> float:
if not self.active_energy_burned_goal:
return 0.0
return self.active_energy_burned / self.active_energy_burned_goal
@property
def exercise_time_percent(self) -> float:
if not self.exercise_time_goal:
return 0.0
return self.exercise_time / self.exercise_time_goal
@property
def stand_hours_percent(self) -> float:
if not self.stand_hours_goal:
return 0.0
return self.stand_hours / self.stand_hours_goal
def __repr__(self) -> str:
aep = int(100 * self.active_energy_percent)
etp = int(100 * self.exercise_time_percent)
shp = int(100 * self.stand_hours_percent)
return f"{aep}% / {etp}% / {shp}%"
|
nilq/baby-python
|
python
|
# Warning: Don't edit file (autogenerated from python -m dev codegen).
ROBOCODE_GET_LANGUAGE_SERVER_PYTHON = "robocode.getLanguageServerPython" # Get a python executable suitable to start the language server.
ROBOCODE_GET_PLUGINS_DIR = "robocode.getPluginsDir" # Get the directory for plugins.
ROBOCODE_CREATE_ACTIVITY = "robocode.createActivity" # Create a Robocode Activity Package.
ROBOCODE_LIST_ACTIVITY_TEMPLATES_INTERNAL = "robocode.listActivityTemplates.internal" # Provides a list with the available activity templates.
ROBOCODE_CREATE_ACTIVITY_INTERNAL = "robocode.createActivity.internal" # Actually calls rcc to create the activity.
ROBOCODE_UPLOAD_ACTIVITY_TO_CLOUD = "robocode.uploadActivityToCloud" # Upload activity package to the cloud.
ROBOCODE_LOCAL_LIST_ACTIVITIES_INTERNAL = "robocode.localListActivities.internal" # Lists the activities currently available in the workspace.
ROBOCODE_IS_LOGIN_NEEDED_INTERNAL = "robocode.isLoginNeeded.internal" # Checks if the user is already logged in.
ROBOCODE_CLOUD_LOGIN_INTERNAL = "robocode.cloudLogin.internal" # Logs into Robocloud.
ROBOCODE_CLOUD_LIST_WORKSPACES_INTERNAL = "robocode.cloudListWorkspaces.internal" # Lists the workspaces available for the user (in the cloud).
ROBOCODE_UPLOAD_TO_NEW_ACTIVITY_INTERNAL = "robocode.uploadToNewActivity.internal" # Uploads an activity package as a new activity package in the cloud.
ROBOCODE_UPLOAD_TO_EXISTING_ACTIVITY_INTERNAL = "robocode.uploadToExistingActivity.internal" # Uploads an activity package as an existing activity package in the cloud.
ALL_SERVER_COMMANDS = [
ROBOCODE_GET_PLUGINS_DIR,
ROBOCODE_LIST_ACTIVITY_TEMPLATES_INTERNAL,
ROBOCODE_CREATE_ACTIVITY_INTERNAL,
ROBOCODE_LOCAL_LIST_ACTIVITIES_INTERNAL,
ROBOCODE_IS_LOGIN_NEEDED_INTERNAL,
ROBOCODE_CLOUD_LOGIN_INTERNAL,
ROBOCODE_CLOUD_LIST_WORKSPACES_INTERNAL,
ROBOCODE_UPLOAD_TO_NEW_ACTIVITY_INTERNAL,
ROBOCODE_UPLOAD_TO_EXISTING_ACTIVITY_INTERNAL,
]
|
nilq/baby-python
|
python
|
import urllib2
import threading
from bs4 import BeautifulSoup
import re
import json
import sys
import os
import django
from stock_list import getlist, getLSEList
from extract_stock_info import get_info, getLSEInfo
from extract_stock_history import get_historical_info
from extract_sector_history import get_sector_history, get_sector_dict
from extract_stock_news import get_stock_news
from extract_NT_transactions import get_NT_transactions
import time
from pymongo import MongoClient
import warnings
import exceptions
warnings.filterwarnings("ignore", category=exceptions.RuntimeWarning, module='django.db.backends.sqlite3.base', lineno=53)
if __name__ == '__main__':
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../MADjangoProject'))
if not path in sys.path:
sys.path.insert(1, path)
del path
os.environ['DJANGO_SETTINGS_MODULE'] = 'MADjangoProject.settings'
django.setup()
from market.models import Stock, StockHistory, SectorHistory
sec_dict = get_sector_dict()
print 'Fethcing Indices...'
ALL_Stocks = getLSEList(collection=Stock)
def get_share_info():
for share in ALL_Stocks:
print 'Fetching info of ' + share['name']
info = getLSEInfo(share['query'], share['symbol'],collection=Stock, sector_dict=sec_dict)
import threading
print 'Distributing Jobs ...'
threads = []
# callables = [get_nt]
callables = [get_share_info]
for f in callables:
t = threading.Thread(target=f)
t.setDaemon(True)
threads.append(t)
t.start()
for t in threads:
t.join()
|
nilq/baby-python
|
python
|
import Tkinter as tk
class NatnetView:
def __init__(self, parent, reader):
self.parent = parent
self.reader = reader
self.setup()
def __del__(self):
self.destroy()
def destroy(self):
self.frame.grid_forget()
def setup(self):
# container
self.frame = tk.Frame(self.parent, padx=10, pady=10)
# form elements
self.host_label = tk.Label(self.frame, text="Natnet Host IP")
self.host_entry = tk.Entry(self.frame, width=25)
self.multicast_label = tk.Label(self.frame, text="Multicast IP")
self.multicast_entry = tk.Entry(self.frame, width=25)
self.port_label = tk.Label(self.frame, text="NatNet Port")
self.port_entry = tk.Entry(self.frame, width=5)
# status element
self.connection_label = tk.Label(self.frame, text='')
self.error_label = tk.Label(self.frame, text='')
# buttons
self.connect_button = tk.Button(self.frame, text='(re-)connect', command=self.onConnectButton)
self.disconnect_button = tk.Button(self.frame, text='disconnect', command=self.onDisconnectButton)
# grid/positions
self.frame.grid()
# self.file_label.grid(column=0, row=0, columnspan=3)
# self.time_label.grid(column=1, row=1)
# self.load_button.grid(column=0, row=2)
# self.startstop_button.grid(column=1, row=2)
# self.quitButton.grid(column=2, row=2)
self.host_label.grid(column=0, row=0, sticky=tk.E)
self.host_entry.grid(column=1, row=0, sticky=tk.W)
self.multicast_label.grid(column=0, row=1, sticky=tk.E)
self.multicast_entry.grid(column=1, row=1, sticky=tk.W)
self.port_label.grid(column=0, row=2, sticky=tk.E)
self.port_entry.grid(column=1, row=2, sticky=tk.W)
self.connection_label.grid(column=0, row=3, columnspan=3, padx=10, pady=10)
self.error_label.grid(column=0, row=4, columnspan=3, padx=10, pady=10)
self.connect_button.grid(column=0, row=5, sticky=tk.E)
self.disconnect_button.grid(column=1, row=5, sticky=tk.W)
# initialize
self.host_entry.insert(0, self.reader.host)
if self.reader.multicast:
self.multicast_entry.insert(0, str(self.reader.multicast))
self.port_entry.insert(0, str(self.reader.port))
self.reader.connectEvent += self.updateConnectionStatus
self.reader.connectionLostEvent += self.updateConnectionStatus
self.reader.connectionStatusUpdateEvent += self.updateConnectionStatus
self.updateConnectionStatus(self.reader)
def onConnectButton(self):
self.reader.stop()
multicast = self.multicast_entry.get()
if multicast == '':
multicast = None
self.reader.configure(host=self.host_entry.get(), port=self.port_entry.get(), multicast=multicast)
self.reader.start()
def onDisconnectButton(self):
self.reader.stop()
def updateConnectionStatus(self, reader):
if reader.connected == False:
self.connection_label.config(text="Disconnected")
self.error_label.config(text='')
return
self.connection_label.config(text=self.connectionInfo(reader))
err = reader.connection_error if reader.connection_error else ''
self.error_label.config(text=err)
def connectionInfo(self, reader):
if reader.multicast:
return 'Connected to '+str(reader.host)+'@'+str(reader.port)+' ('+reader.multicast+')'
return 'Connected to '+str(reader.host)+'@'+str(reader.port)
|
nilq/baby-python
|
python
|
import gmpy2
from gmpy2 import (
mpz,
powmod,
mul,
invert,
)
B = 2 ** 20
p = mpz('13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084171')
g = mpz('11717829880366207009516117596335367088558084999998952205599979459063929499736583746670572176471460312928594829675428279466566527115212748467589894601965568')
h = mpz('3239475104050450443565264378728065788649097520952449527834792452971981976143292558073856937958553180532878928001494706097394108577585732452307673444020333')
hash_table = dict()
for x_1 in range(B + 1):
key = mul(h, invert(powmod(g, x_1, p), p)) % p
value = x_1
hash_table[key] = value
base = powmod(g, B, p)
for x_0 in range(B + 1):
target = powmod(base, x_0, p)
if target in hash_table:
break
x = x_0 * B + hash_table[target]
print('x: {}'.format(x))
|
nilq/baby-python
|
python
|
#单一状态
class Borg:
__shared_state = {"1":"2"}
def __init__(self):
self.x = 1
self.__dict__ = self.__shared_state
b = Borg()
b1 = Borg()
b.x = 4
print("Borg Object b:",b)
print("Borg Object b1:",b1)
print("Object state b:",b.__dict__)
print("Object state b1:",b1.__dict__)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 06:10:44 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
import gc, os
from collections import defaultdict
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
#import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
from glob import glob
import utils, utils_cat
utils.start(__file__)
#==============================================================================
SEED = 71
LOOP = 3
NROUND = 4680
FEATURE_SIZE = 700
SUBMIT_FILE_PATH = '../output/725-1.csv.gz'
COMMENT = f'CV auc-mean(7 fold): 0.804265 + 0.00358 round: {NROUND} all+nejumi'
EXE_SUBMIT = True
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.9,
'subsample': 0.9,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
# 'seed': SEED
}
np.random.seed(SEED)
# =============================================================================
# load train
# =============================================================================
imp = pd.read_csv('LOG/imp_801_imp_lgb_onlyMe.py-2.csv')
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
files = ('../feature/train_' + imp.head(FEATURE_SIZE).feature + '.f').tolist()
#files = utils.get_use_files(files, True)
X = pd.concat([
pd.read_feather(f) for f in tqdm(files, mininterval=60)
], axis=1)
y = utils.read_pickles('../data/label').TARGET
if X.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')
print('no dup :) ')
print(f'X.shape {X.shape}')
gc.collect()
CAT = list( set(X.columns)&set(utils_cat.ALL))
print(f'category: {CAT}')
keys = sorted([c.split('_')[0] for c in X.columns])
di = defaultdict(int)
for k in keys:
di[k] += 1
for k,v in di.items():
print(f'{k}: {v}')
dtrain = lgb.Dataset(X, y,
categorical_feature=CAT)
COL = X.columns.tolist()
X.head().to_csv(SUBMIT_FILE_PATH.replace('.csv', '_X.csv'),
index=False, compression='gzip')
del X, y; gc.collect()
# =============================================================================
# training
# =============================================================================
models = []
for i in range(LOOP):
print(f'LOOP: {i}')
gc.collect()
param.update({'seed':np.random.randint(9999)})
model = lgb.train(param, dtrain, NROUND,
categorical_feature=CAT)
# model.save_model(f'lgb{i}.model')
models.append(model)
del dtrain; gc.collect()
"""
models = []
for i in range(LOOP):
bst = lgb.Booster(model_file=f'lgb{i}.model')
models.append(bst)
imp = ex.getImp(models)
"""
# =============================================================================
# test
# =============================================================================
files = ('../feature/test_' + imp.head(FEATURE_SIZE).feature + '.f').tolist()
dtest = pd.concat([
pd.read_feather(f) for f in tqdm(files, mininterval=60)
], axis=1)[COL]
sub = pd.read_pickle('../data/sub.p')
gc.collect()
label_name = 'TARGET'
sub[label_name] = 0
for model in models:
y_pred = model.predict(dtest)
sub[label_name] += pd.Series(y_pred).rank()
sub[label_name] /= LOOP
sub[label_name] /= sub[label_name].max()
sub['SK_ID_CURR'] = sub['SK_ID_CURR'].map(int)
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
#==============================================================================
utils.end(__file__)
|
nilq/baby-python
|
python
|
# flake8: noqa
'''
All step-related classes and factories
'''
from .base_steps import (
BaseStep,
BaseStepFactory,
BaseValidation,
)
from .steps import TestStep
from .outputs import OutputValueStep
from .steps_aggregator import StepsAggregator
from .validations import (
Validation,
XPathValidation,
URLValidation
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import math
# Respect seigot
class Waypoint:
way_point = 0
def __init__(self, path):
self.points = []
# self.way_point = 0
with open(path) as f:
lines = csv.reader(f)
for l in lines:
point = [float(i) for i in l]
# print(point)
self.points.append(point[0:3])
def get_next_waypoint(self):
Waypoint.way_point = Waypoint.way_point + 1
# 出来れば2週目からは,相手に奪われているところを狙いたい.
if Waypoint.way_point == len(self.points):
Waypoint.way_point = 0
print('Next Lap')
return self.points[Waypoint.way_point][0:3]
def get_current_waypoint(self):
return self.points[Waypoint.way_point][0:3]
# 敵が近くにいると判断できたときだけ,以下の行動を行う
# 事前設定のルートへの復帰どうする??
# 適当に座標が一番近いところへ行く??
def get_enemy_waypoints(self):
pass
|
nilq/baby-python
|
python
|
import typing
from ariadne import SchemaDirectiveVisitor
from ariadne.types import GraphQLResolveInfo
from graphql import default_field_resolver
from pytimeparse import parse as parse_duration
from .utils.rate_limit import RateLimit, TooManyRequests
class DateDirective(SchemaDirectiveVisitor):
def visit_field_definition(self, field, object_type):
date_format = self.args.get("format")
original_resolver = field.resolve or default_field_resolver
def resolve_formatted_date(obj, info, **kwargs):
result = original_resolver(obj, info, **kwargs)
if result is None:
return None
if date_format:
return result.strftime(date_format)
return result.isoformat()
field.resolve = resolve_formatted_date
return field
class AuthDirective(SchemaDirectiveVisitor):
def visit_field_definition(self, field, object_type):
original_resolver = field.resolve or default_field_resolver
def resolve_formatted_date(obj, info, **kwargs):
if not info.context["request"].scope["user"].is_authenticated:
raise Exception("unauthenticated user")
result = original_resolver(obj, info, **kwargs)
return result
field.resolve = resolve_formatted_date
return field
class PermissionsDirective(SchemaDirectiveVisitor):
def visit_field_definition(self, field, object_type):
original_resolver = field.resolve or default_field_resolver
def resolve_formatted_date(obj, info, **kwargs):
if not info.context["request"].scope["user"].is_authenticated:
raise Exception("unauthenticated user")
result = original_resolver(obj, info, **kwargs)
return result
field.resolve = resolve_formatted_date
def visit_object(self, object_type):
return object_type
class RateLimitDirective(SchemaDirectiveVisitor):
def visit_field_definition(self, field, object_type):
max_ = self.args.get("max", 10)
window = parse_duration(self.args.get("window", "10m"))
message = self.args.get("message", "You are doing that too often.")
original_resolver = field.resolve or default_field_resolver
def resolve_rate_limited(obj: typing.Any, info: GraphQLResolveInfo, **kwargs):
if info.context["request"]["user"].is_authenticated:
client = info.context["request"].scope["user"].id
else:
ip_address, port = info.context["request"]["client"]
client = ip_address
try:
with RateLimit(
resource=info.field_name,
client=client,
max_requests=max_,
expire=window,
):
result = original_resolver(obj, info, **kwargs)
return result
except TooManyRequests:
raise TooManyRequests(message)
field.resolve = resolve_rate_limited
return field
directives = {
"date": DateDirective,
"auth": AuthDirective,
"permissions": PermissionsDirective,
"rateLimit": RateLimitDirective,
}
|
nilq/baby-python
|
python
|
import random
import math
import operator
from collections import Counter, defaultdict
import twokenize
import peewee
from models import database, SMS, Contact
class NaiveBayes(object):
def __init__(self):
self.doccounts = Counter()
self.classcounts = Counter()
self.wordcounts = defaultdict(lambda: Counter())
self.vocab = set()
self.priors = {}
self._condprobs = defaultdict(lambda: dict())
def calculate_probs(self):
for c in self.doccounts:
self.priors[c] = (1.0 * self.doccounts[c]) / \
sum(self.doccounts.values())
def get_condprob(self, word, class_):
if not self._condprobs[word].get(class_):
num = self.wordcounts[class_].get(word, 0) + 1.0
denom = len(self.vocab) + 1.0 + \
sum(self.wordcounts[class_].values())
self._condprobs[word][class_] = num / denom
return self._condprobs[word][class_]
def classify(self, words):
if not self.priors:
self.calculate_probs()
score = {}
for c in self.priors:
score[c] = math.log(self.priors[c])
for w in words:
score[c] += math.log(self.get_condprob(w, c))
return max(score.iteritems(), key=operator.itemgetter(1))[0]
def add_example(self, klass, words):
self.doccounts[klass] += 1
self.vocab.update(words)
self.classcounts[klass] += len(words)
self.wordcounts[klass].update(words)
def split_set(s, SIZE):
a = set(random.sample(s, int(SIZE * len(s))))
b = s - a
return a, b
def split_me_not_me(TRAIN_SIZE=0.9):
train, test = {}, {}
not_me = SMS.select().where(from_me=False)
me = SMS.select().where(from_me=True)
not_me = set(not_me)
me = set(me)
train['me'], test['me'] = split_set(me, TRAIN_SIZE)
train['not_me'], test['not_me'] = split_set(not_me, TRAIN_SIZE)
return train, test
def recipient_is(name, TRAIN=0.9):
#: TRAIN = percent of the data to have in training set
train = {}
test = {}
person = Contact.get(name=name)
recipient = set(SMS.select().where(contact=person).where(from_me=False))
not_recipient = set(SMS.select().where(contact__ne=person)
.where(from_me=False))
train[person.name], test[person.name] = split_set(recipient, TRAIN)
train['not_' + person.name], test['not_' + person.name] = \
split_set(not_recipient, TRAIN)
return train, test
def people_with_many_texts(n, TRAIN=0.9):
# TRAIN = percent of data to have in training set
contacts = peewee.RawQuery(Contact, '''SELECT * from sms, contact
where from_me=0 and contact.id=contact_id GROUP BY contact_id
HAVING count(*) >= ?;''', n)
data = {}
for c in contacts:
data[c.name] = set(SMS.select().where(contact=c))
train = {}
test = {}
for c in data:
train[c], test[c] = split_set(data[c], TRAIN)
print 'There are %d people with >= %d texts.' % (len(data), n)
return train, test
def tokenize(words):
return twokenize.tokenize(words)
def build_classifier(train):
n = NaiveBayes()
for klass in train:
for sms in train[klass]:
n.add_example(klass, tokenize(sms.text))
n.calculate_probs()
# print 'PRIORS ARE', n.priors
print 'EXPECTED ACCURACY:', max(n.priors.values())
return n
def run_test(classifier, test):
correct = 0
incorrect = 0
for klass in test:
for sms in test[klass]:
classification = classifier.classify(tokenize(sms.text))
if classification == klass:
correct += 1
else:
incorrect += 1
accuracy = correct / float(correct + incorrect)
print 'Classified %d correctly and %d incorrectly for an accuracy of %f.' \
% (correct, incorrect, accuracy)
return accuracy
def run_naive_bayes(train, test):
classifier = build_classifier(train)
run_test(classifier, test)
def interactive(classifier):
try:
while True:
print 'CLASSIFY YOUR MESSAGE:'
text = raw_input('enter a text: ')
print 'result:', classifier.classify(tokenize(text))
print
except KeyboardInterrupt:
database.close()
if __name__ == '__main__':
database.connect()
train, test = split_me_not_me(0.9)
print 'ME AND NOT ME:'
run_naive_bayes(train, test)
threshold = 200
print
print 'PEOPLE WITH OVER %d TEXTS:' % threshold
run_naive_bayes(*people_with_many_texts(threshold))
print
# train, test = split_me_not_me(1.0)
# train, test = people_with_many_texts(threshold)
# classifier = build_classifier(train)
# interactive(classifier)
database.close()
|
nilq/baby-python
|
python
|
from LucidDynamodb import DynamoDb
from LucidDynamodb.exceptions import (
TableNotFound
)
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
try:
db = DynamoDb()
db.delete_table(table_name='dev_jobs')
logging.info("Table deleted successfully")
table_names = db.read_all_table_names()
logging.info(f"Table names: {table_names}")
except TableNotFound as e:
logging.error(f"Table delete operation failed {e}")
"""
dineshsonachalam@macbook examples % python 14-delete-a-table.py
INFO:botocore.credentials:Found credentials in environment variables.
INFO:root:Table deleted successfully
INFO:root:Table names: ['CertMagic', 'dev_test', 'kp-config-v1', 'test-1']
"""
|
nilq/baby-python
|
python
|
from .queries import *
|
nilq/baby-python
|
python
|
import Transformation
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
def calc_cov_ellipse(a, b, d):
s = np.array([[a, b], [b, d]])
(w, v) = np.linalg.eig(s)
angle = np.degrees(np.arctan2(v[1, 0], v[0, 0]))
return 2*np.sqrt(w[0]), 2*np.sqrt(w[1]), angle
class SubPlot:
def __init__(self, range, offset, rowspan=1, colspan=1):
self.range = range
self.offset = offset
self.rowspan = rowspan
self.colspan = colspan
def plot2grid(self, pos):
plt.subplot2grid(self.range,
[self.offset[0] + pos[0], self.offset[1] + pos[1]],
rowspan=self.rowspan, colspan=self.colspan)
def _plot_covariance(sub_plot, t_mu_cvec, t_cov, ts_cvec):
std = np.sqrt(np.diag(t_cov))
if ts_cvec is not None:
tnp_dev = ts_cvec - t_mu_cvec
if ts_cvec is None:
max_abs = 2.5 * std
else:
max_abs = np.max(np.abs(tnp_dev), axis=1)
for irow in range(6):
for icol in range(6):
sub_plot.plot2grid([irow, icol])
plt.xlim([-max_abs[icol], max_abs[icol]])
plt.ylim([-max_abs[irow], max_abs[irow]])
if ts_cvec is not None:
plt.plot(tnp_dev[icol, :], tnp_dev[irow, :], '.k')
ax = plt.gca()
plt.setp(ax.get_xticklabels(), visible=(irow == 5))
plt.setp(ax.get_yticklabels(), visible=(icol == 0))
# plot covariance ellipse
if icol != irow:
plt.plot(std[icol] * 2, 0., '.r')
plt.plot(0., std[irow] * 2, '.r')
width, height, angle = calc_cov_ellipse(t_cov[icol, icol], t_cov[irow, icol], t_cov[irow, irow])
ellipse = mpl.patches.Ellipse(xy=[0., 0.], width=width * 2, height=height * 2, angle=angle)
ax = plt.gca()
ax.add_artist(ellipse)
def plot_transformation_covariance(title, corners_f_image, t_mu_cvec, t_cov, ts_cvec):
fig = plt.figure()
fig.suptitle(title)
# The image representation
plt.subplot2grid([6, 8], [2, 0], rowspan=2, colspan=2)
plt.xlim([0, 920])
plt.ylim([700, 0])
# show the corners in the image
for i in range(4):
plt.plot(corners_f_image[i*2], corners_f_image[i*2+1], '.r')
# The covariances.
tnp = ts_cvec
means = t_mu_cvec
tnp_dev = tnp - means
std = np.sqrt(np.diag(t_cov))
np_std = np.std(tnp, axis=1)
maxs = np.max(tnp, axis=1)
mins = np.min(tnp, axis=1)
max_abs = np.max(np.abs(tnp_dev), axis=1)
means_label = ("t_world_xxx mu, std:\nroll {0[0]}, {1[0]}\npitch {0[1]}, {1[1]}\nyaw {0[2]}, {1[2]}\n" + \
"x {0[3]}, {1[3]}\ny {0[4]}, {1[4]}\nz {0[5]}, {1[5]}").format(means, std)
plt.subplot2grid([6, 8], [0, 0])
plt.axis([0, 1, 0, 1])
plt.text(0, 0.75, means_label, verticalalignment='top')
ax = plt.gca()
ax.set_axis_off()
_plot_covariance(SubPlot([6, 8], [0, 2]), t_mu_cvec, t_cov, ts_cvec)
# for irow in range(6):
# for icol in range(6):
# plt.subplot2grid([6, 8], [irow, icol+2])
# if True:
# plt.xlim([-max_abs[icol], max_abs[icol]])
# plt.ylim([-max_abs[irow], max_abs[irow]])
# plt.plot(tnp_dev[icol, :], tnp_dev[irow, :], '.k')
# else:
# plt.xlim([mins[icol], maxs[icol]])
# plt.ylim([mins[irow], maxs[irow]])
# plt.plot(tnp[icol, :], tnp[irow, :], '.k')
#
# ax = plt.gca()
# plt.setp(ax.get_xticklabels(), visible=(irow == 5))
# plt.setp(ax.get_yticklabels(), visible=(icol == 0))
#
# plt.plot(std[icol], 0., '.r')
# plt.plot(0., std[irow], '.r')
#
# # plot covariance ellipse
# if icol != irow:
# ax = plt.gca()
# width = 2. * std[icol]
# height = 2. * std[irow]
# width, height, angle = calc_cov_ellipse(t_cov[icol, icol], t_cov[irow, icol], t_cov[irow, irow])
# ellipse = mpl.patches.Ellipse(xy=[0., 0.], width=width, height=height, angle=angle)
# ax.add_artist(ellipse)
plt.show()
def _plot_view(sub_plot, corners_f_images):
sub_plot.plot2grid([0, 0])
plt.xlim([0, 920])
plt.ylim([700, 0])
# show the corners in the image
for i in range(len(corners_f_images)):
for r in range(4):
plt.plot(corners_f_images[i][r*2], corners_f_images[i][r*2+1], '.r')
def _plot_std_values(sub_plot, cov):
sub_plot.plot2grid([0, 0])
std = np.sqrt(np.diag(cov))
cov_label = ("std:\nroll {:5f}\npitch {:5f}\nyaw {:5f}\n" +
"x {:5f}\ny {:5f}\nz {:5f}").format(std[0], std[1], std[2],
std[3], std[4], std[5])
plt.axis([0, 1, 0, 1])
plt.text(0, 0.75, cov_label, verticalalignment='top')
ax = plt.gca()
ax.set_axis_off()
def plot_view_and_covariance(title, corners_f_images, com, do_show=True):
fig = plt.figure()
fig.suptitle(title)
_plot_view(SubPlot([6, 8], [2, 0], rowspan=2, colspan=2), corners_f_images)
_plot_std_values(SubPlot([6, 8], [0, 0]), com.cov)
_plot_covariance(SubPlot([6, 8], [0, 2]), com.mu, com.cov, com.samples)
if do_show:
plt.show(fig)
|
nilq/baby-python
|
python
|
from django.db import models
from django.utils.text import gettext_lazy as _
from common.models import CommonData, ErrorMessages
from jobs.models import JobOffer, Profile
class Comment(CommonData):
model_name = 'Comment'
profile: Profile = models.ForeignKey(
to=Profile,
on_delete=models.PROTECT,
error_messages=ErrorMessages.get_field(
model=model_name, field='profile_id')
)
job_offer: JobOffer = models.ForeignKey(
to=JobOffer,
on_delete=models.PROTECT,
error_messages=ErrorMessages.get_field(
model=model_name, field='job_offer_id')
)
description: str = models.TextField(
verbose_name=_('Description'),
error_messages=ErrorMessages.get_field(
model=model_name, field='description')
)
def __str__(self):
name: str = self.description[:30]
return f'{self.profile} - {self.job_offer} : {name}'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-#
# MIT License
#
# Copyright (c) 2019 Pim Witlox
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from datetime import timedelta, datetime
from random import shuffle
from dateutil import parser
class Scheduler(object):
"""
Simple Scheduler Mechanism
"""
logger = logging.getLogger(__name__)
def __init__(self, storage, staleness):
"""
Our simplistic CronJob scheduler
:param storage: storage class
:param staleness: amount of seconds of non-communication to declare a node as stale
"""
self.storage = storage
self.staleness = staleness
def active_nodes(self):
for node in self.storage.cluster_state():
if datetime.utcnow() - parser.parse(node.time) < timedelta(seconds=self.staleness):
yield node
else:
node.state = 'disconnected'
yield node
def check_cluster_state(self):
"""
check cluster state
:return False if invalid otherwise True
"""
left = list(self.storage.cluster_state())
right = list(self.active_nodes())
inactive_nodes = [i for i in left + right if i not in left or i not in right]
for job in self.storage.cluster_jobs:
if not job.assigned_to:
self.logger.info("detected unassigned job ({0})".format(job.command))
self.re_balance()
return False
if job.assigned_to in inactive_nodes:
self.logger.warning("detected job ({0}) on inactive node".format(job.command))
self.re_balance()
return False
return True
def re_balance(self):
"""
Redistribute CronJobs over the cluster
"""
def partition(lst, keys):
"""
divide a list over a given set of keys
:param lst: list to split in roughly equals chunks
:param keys: keys for the chunks
:return: dictionary of keys with list chunks
"""
shuffle(lst)
return {keys[i]: lst[i::len(keys)] for i in range(len(keys))}
def first_key_by_value(dct, jb):
"""
find the first key in a dictionary where jb is in the values
:param dct: dictionary to analyse
:param jb: value to search for
:return: key or None
"""
for n, jbs in dct.items():
if jb in jbs:
return n
return None
nodes = [n for n in self.active_nodes()]
jobs = list(self.storage.cluster_jobs)
partitions = partition(jobs, nodes)
for job in jobs:
node = first_key_by_value(partitions, job)
if not node:
self.logger.error("could not find node assignment for job {0}".format(job))
else:
self.logger.info("assigning job {0} to node {1}".format(job, node.ip))
job.assigned_to = node.ip
self.storage.cluster_jobs = jobs
|
nilq/baby-python
|
python
|
from hello.hello import print_hello
from world.world import print_world
def main():
print_hello()
print_world()
return
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from django.contrib.sites.models import Site
from .settings import CMS_TEMPLATES
from django.contrib.auth.models import User
# Default page settings Not used for installation
# Content for adding a page
# Still under development
title = 'Django CMS setup'
description = 'Open Source programming at its best'
template = CMS_TEMPLATES[0][0]
language = 'en'
menu_title = title
slug = title.lower().replace(" ", "-")
meta_description = description
created_by = User.objects.get(id=1).get_full_name()
in_navigation = True
published = True
site = Site.objects.get(id=1)
xframe_options = 3
page_title = title
image_path = 'site_server/static/site-images/'
# Content can be added here for initial setup
blogs = {
'Blog1': {
'title':'Manage',
'subtitle': 'This content was added with the setup_content_server.py script',
'abstract': '<section class="features-section-8 relative background-light"><div class="container"><div class="row section-separator"><div class="col-md-12"><p class="mb-4"><img alt="" class="img-fluid" src="/static/images/background-1.jpg" width="96%" /></p><p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Reiciendis, eius mollitia suscipit, quisquam doloremque distinctio perferendis et doloribus unde architecto optio laboriosam porro adipisci sapiente officiis nemo accusamus ad praesentium? Esse minima nisi et. Dolore perferendis, enim praesentium omnis, iste doloremque quia officia optio deserunt molestiae voluptates soluta architecto tempora.</p><p>Molestiae cupiditate inventore animi, maxime sapiente optio, illo est nemo veritatis repellat sunt doloribus nesciunt! Minima laborum magni reiciendis qui voluptate quisquam voluptatem soluta illo eum ullam incidunt rem assumenda eveniet eaque sequi deleniti tenetur dolore amet fugit perspiciatis ipsa, odit. Nesciunt dolor minima esse vero ut ea, repudiandae suscipit!</p><h2 class="mb-3 mt-5">Molestiae cupiditate inventore animi, maxime sapiente optio</h2><p>Temporibus ad error suscipit exercitationem hic molestiae totam obcaecati rerum, eius aut, in. Exercitationem atque quidem tempora maiores ex architecto voluptatum aut officia doloremque. Error dolore voluptas, omnis molestias odio dignissimos culpa ex earum nisi consequatur quos odit quasi repellat qui officiis reiciendis incidunt hic non? Debitis commodi aut, adipisci.</p><p>Quisquam esse aliquam fuga distinctio, quidem delectus veritatis reiciendis. Nihil explicabo quod, est eos ipsum. Unde aut non tenetur tempore, nisi culpa voluptate maiores officiis quis vel ab consectetur suscipit veritatis nulla quos quia aspernatur perferendis, libero sint. Error, velit, porro. Deserunt minus, quibusdam iste enim veniam, modi rem maiores.</p><p>Odit voluptatibus, eveniet vel nihil cum ullam dolores laborum, quo velit commodi rerum eum quidem pariatur! Quia fuga iste tenetur, ipsa vel nisi in dolorum consequatur, veritatis porro explicabo soluta commodi libero voluptatem similique id quidem? Blanditiis voluptates aperiam non magni. Reprehenderit nobis odit inventore, quia laboriosam harum excepturi ea.</p><p>Adipisci vero culpa, eius nobis soluta. Dolore, maxime ullam ipsam quidem, dolor distinctio similique asperiores voluptas enim, exercitationem ratione aut adipisci modi quod quibusdam iusto, voluptates beatae iure nemo itaque laborum. Consequuntur et pariatur totam fuga eligendi vero dolorum provident. Voluptatibus, veritatis. Beatae numquam nam ab voluptatibus culpa, tenetur recusandae!</p><p>Voluptas dolores dignissimos dolorum temporibus, autem aliquam ducimus at officia adipisci quasi nemo a perspiciatis provident magni laboriosam repudiandae iure iusto commodi debitis est blanditiis alias laborum sint dolore. Dolores, iure, reprehenderit. Error provident, pariatur cupiditate soluta doloremque aut ratione. Harum voluptates mollitia illo minus praesentium, rerum ipsa debitis, inventore?</p><div class="tag-widget post-tag-container mb-5 mt-5"> </div></div></div></div></section>',
'image':'one.jpg',
'image_path': image_path,
},
'Blog2': {
'title': 'Django CMS',
'subtitle': 'Now point and click and edit me',
'abstract': '<section class="features-section-8 relative background-light"><div class="container"><div class="row section-separator"><div class="col-md-12"><p class="mb-4"><img alt="" class="img-fluid" src="/static/images/background-2.jpg" width="96%" /></p><p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Reiciendis, eius mollitia suscipit, quisquam doloremque distinctio perferendis et doloribus unde architecto optio laboriosam porro adipisci sapiente officiis nemo accusamus ad praesentium? Esse minima nisi et. Dolore perferendis, enim praesentium omnis, iste doloremque quia officia optio deserunt molestiae voluptates soluta architecto tempora.</p><p>Molestiae cupiditate inventore animi, maxime sapiente optio, illo est nemo veritatis repellat sunt doloribus nesciunt! Minima laborum magni reiciendis qui voluptate quisquam voluptatem soluta illo eum ullam incidunt rem assumenda eveniet eaque sequi deleniti tenetur dolore amet fugit perspiciatis ipsa, odit. Nesciunt dolor minima esse vero ut ea, repudiandae suscipit!</p><h2 class="mb-3 mt-5">Molestiae cupiditate inventore animi, maxime sapiente optio</h2><p>Temporibus ad error suscipit exercitationem hic molestiae totam obcaecati rerum, eius aut, in. Exercitationem atque quidem tempora maiores ex architecto voluptatum aut officia doloremque. Error dolore voluptas, omnis molestias odio dignissimos culpa ex earum nisi consequatur quos odit quasi repellat qui officiis reiciendis incidunt hic non? Debitis commodi aut, adipisci.</p><p>Quisquam esse aliquam fuga distinctio, quidem delectus veritatis reiciendis. Nihil explicabo quod, est eos ipsum. Unde aut non tenetur tempore, nisi culpa voluptate maiores officiis quis vel ab consectetur suscipit veritatis nulla quos quia aspernatur perferendis, libero sint. Error, velit, porro. Deserunt minus, quibusdam iste enim veniam, modi rem maiores.</p><p>Odit voluptatibus, eveniet vel nihil cum ullam dolores laborum, quo velit commodi rerum eum quidem pariatur! Quia fuga iste tenetur, ipsa vel nisi in dolorum consequatur, veritatis porro explicabo soluta commodi libero voluptatem similique id quidem? Blanditiis voluptates aperiam non magni. Reprehenderit nobis odit inventore, quia laboriosam harum excepturi ea.</p><p>Adipisci vero culpa, eius nobis soluta. Dolore, maxime ullam ipsam quidem, dolor distinctio similique asperiores voluptas enim, exercitationem ratione aut adipisci modi quod quibusdam iusto, voluptates beatae iure nemo itaque laborum. Consequuntur et pariatur totam fuga eligendi vero dolorum provident. Voluptatibus, veritatis. Beatae numquam nam ab voluptatibus culpa, tenetur recusandae!</p><p>Voluptas dolores dignissimos dolorum temporibus, autem aliquam ducimus at officia adipisci quasi nemo a perspiciatis provident magni laboriosam repudiandae iure iusto commodi debitis est blanditiis alias laborum sint dolore. Dolores, iure, reprehenderit. Error provident, pariatur cupiditate soluta doloremque aut ratione. Harum voluptates mollitia illo minus praesentium, rerum ipsa debitis, inventore?</p><div class="tag-widget post-tag-container mb-5 mt-5"> </div></div></div></div></section>',
'image': 'two.jpg',
'image_path': image_path,
},
'Blog3': {
'title': 'Django Blog',
'subtitle': 'All content is a blog post and comments can be enabled',
'abstract': '<section class="features-section-8 relative background-light"><div class="container"><div class="row section-separator"><div class="col-md-12"><p class="mb-4"><img alt="" class="img-fluid" src="/static/images/background-4.jpg" width="96%" /></p><p>Lorem ipsum dolor sit amet, consectetur adipisicing elit. Reiciendis, eius mollitia suscipit, quisquam doloremque distinctio perferendis et doloribus unde architecto optio laboriosam porro adipisci sapiente officiis nemo accusamus ad praesentium? Esse minima nisi et. Dolore perferendis, enim praesentium omnis, iste doloremque quia officia optio deserunt molestiae voluptates soluta architecto tempora.</p><p>Molestiae cupiditate inventore animi, maxime sapiente optio, illo est nemo veritatis repellat sunt doloribus nesciunt! Minima laborum magni reiciendis qui voluptate quisquam voluptatem soluta illo eum ullam incidunt rem assumenda eveniet eaque sequi deleniti tenetur dolore amet fugit perspiciatis ipsa, odit. Nesciunt dolor minima esse vero ut ea, repudiandae suscipit!</p><h2 class="mb-3 mt-5">Molestiae cupiditate inventore animi, maxime sapiente optio</h2><p>Temporibus ad error suscipit exercitationem hic molestiae totam obcaecati rerum, eius aut, in. Exercitationem atque quidem tempora maiores ex architecto voluptatum aut officia doloremque. Error dolore voluptas, omnis molestias odio dignissimos culpa ex earum nisi consequatur quos odit quasi repellat qui officiis reiciendis incidunt hic non? Debitis commodi aut, adipisci.</p><p>Quisquam esse aliquam fuga distinctio, quidem delectus veritatis reiciendis. Nihil explicabo quod, est eos ipsum. Unde aut non tenetur tempore, nisi culpa voluptate maiores officiis quis vel ab consectetur suscipit veritatis nulla quos quia aspernatur perferendis, libero sint. Error, velit, porro. Deserunt minus, quibusdam iste enim veniam, modi rem maiores.</p><p>Odit voluptatibus, eveniet vel nihil cum ullam dolores laborum, quo velit commodi rerum eum quidem pariatur! Quia fuga iste tenetur, ipsa vel nisi in dolorum consequatur, veritatis porro explicabo soluta commodi libero voluptatem similique id quidem? Blanditiis voluptates aperiam non magni. Reprehenderit nobis odit inventore, quia laboriosam harum excepturi ea.</p><p>Adipisci vero culpa, eius nobis soluta. Dolore, maxime ullam ipsam quidem, dolor distinctio similique asperiores voluptas enim, exercitationem ratione aut adipisci modi quod quibusdam iusto, voluptates beatae iure nemo itaque laborum. Consequuntur et pariatur totam fuga eligendi vero dolorum provident. Voluptatibus, veritatis. Beatae numquam nam ab voluptatibus culpa, tenetur recusandae!</p><p>Voluptas dolores dignissimos dolorum temporibus, autem aliquam ducimus at officia adipisci quasi nemo a perspiciatis provident magni laboriosam repudiandae iure iusto commodi debitis est blanditiis alias laborum sint dolore. Dolores, iure, reprehenderit. Error provident, pariatur cupiditate soluta doloremque aut ratione. Harum voluptates mollitia illo minus praesentium, rerum ipsa debitis, inventore?</p><div class="tag-widget post-tag-container mb-5 mt-5"> </div></div></div></div></section>',
'image': 'three.jpg',
'image_path': image_path,
},
}
# Usefull when adding images from the front end
image_sizes = {
'Small': {
'width': '400',
'height': '300',
},
'Medium': {
'width': '800',
'height': '600',
},
'Large': {
'width': '1024',
'height': '768',
}
}
developer = {
'first_name': 'Jody',
'last_name': 'Beggs',
}
AllowedSearchDomains = {
'nationalgeographic.com': {
'class_names': '',
'id_names': 'article__body'
},
'en.wikipedia.org': {
'class_names': '',
'id_names': 'mw-content-text'
},
'spaceplace.nasa.gov': {
'class_names': '',
'id_names': 'bodyContent'
},
'www.britannica.com': {
'class_names': '',
'id_names': 'ref1'
},
'www.space.com': {
'class_names': 'content-wrapper',
'id_names': ''
},
'www.sciencealert.com': {
'class_names': 'responsive-articlepage',
'id_names': ''
},
'spacecenter.org': {
'class_names': 'single-post format-standard',
'id_names': ''
},
'www.livescience.com': {
'class_names': 'content-wrapper',
'id_names': ''
},
'phys.org': {
'class_names': 'news-article',
'id_names': ''
},
'www.dw.com': {
'class_names': '',
'id_names': 'bodyContent'
},
'www.sun.org': {
'class_names': 'white-field',
'id_names': ''
},
'lco.global': {
'class_names': 'section maincontent',
'id_names': ''
},
'edition.cnn.com': {
'class_names': 'pg-rail-tall__body',
'id_names': ''
},
'www.bbc.com': {
'class_names': 'column--primary',
'id_names': ''
},
'www.nytimes.com': {
'class_names': 'StoryBodyCompanionColumn',
'id_names': ''
},
}
|
nilq/baby-python
|
python
|
from .shared import replace_gender
#TODO At some point, I want to be able to pass only part of the subject tree
# to child snippets.
class Snippet(object):
''' The base snippet class that all snippets will extend.
Responsible for listing required and optional subject data, validating a
passed subject, generating a bit of text which can have zero or more tokens,
filled in dynamically. '''
# Data names, in xpath format, that are recognized by this snippet
supportedData = []
# Tokens that this snippet will replace
ownTokens = []
def __init__(self, subject, ancestorTokens = [], children = {}):
''' Sets up a new snippet instance. '''
# Each snippet has a list of tokens that it will replace. This base class
# simply adds the parent's supported tokens to that list. Any tokens that
# will be replaced at this level should be appended to the tokens list in
# the child class's constructor.
self.ancestorTokens = ancestorTokens
self.children = children
# Each instance has a subject
self.subject = subject
# Each instance has a dict of tokens that are overridden by child snippets.
for token, child in children.items():
# If the token is actually supported, copy it to the instance
if token in self.ownTokens:
self.children[token] = child
else:
raise ValueError("Token, {}, not supported.".format(token))
#TODO this should be a class method or static method or something because it
# will be called before construction presumably.
def is_compatible(cls, subject):
''' Returns boolean whether the subject is compatible with this snippet. '''
raise NotImplementedError("Abstract method, 'is_compatible' not implemented in child class.")
def generate_text(self):
raise NotImplementedError("Abstract method, 'generate_text' not implemented in child class.")
def render(self):
''' Primary method for actually constructing the final text.
Replaces tokens with text from child snippets if they are provided, or
the default snippet handler if not. Replacement happens in the order
specified in snippet's the ownTokens list'''
# Generate text
self.generate_text()
# Replace tokens
for token in self.ownTokens:
if '/' + token in self.text: # Only replace the token if it actually needs replacing.
if token in self.children:
# This is for nesting snippets. I don't think this line has ever actually been tested.
self.text = self.text.replace('/' + token, self.children[token].render())
else:
self.text = self.text.replace('/' + token, getattr(self, 'token_' + token)())
#TODO Figure out the best way to handle the gender pronouns. For now they
# are handled here in the base class. Idea: gender could be a seperate
# snippet, and depending which token is being replaced, it will return a
# different pronoun. Or it could be a bunch of different snippets
self.text = replace_gender(self.text, self.subject['gender'])
# Return fully rendered text
return self.text
def get_all_tokens(self):
''' Returns a list of all tokens safe to return to this snippet. A safe token
is any token that is replaced directly by this snippet or guarenteed to be
replaced by one of its ancestors.'''
return self.ownTokens + self.ancestorTokens
|
nilq/baby-python
|
python
|
#!/usr/bin/python2.7
###############################################################
#### Assembled by: Ian M. Pendleton ######################
#### www.pendletonian.com ######################
###############################################################
# Updated December 17, 2019
### This program is designed to take all of the bonded atoms
### to the metal center and find the sterimol parameters of those
### bonded atoms. This program assumes that the vector of interest
### is from the metal center toward the phosphine (or whatever).
############################
### Be sure to adjust the metal center label and the distance cutoff in the variables
### section below. Default is to ignore hydrides (no sterimol information)
##module list###
import sys
import os
import argparse
import numpy as np
from openbabel import pybel
from sterimoltools import *
from tqdm import tqdm
#### variables ####
directory=os.path.dirname(os.path.abspath(__file__))
### User variables ###
METAL = 27 #atomic number of metal center in question for calculating tau
HYDRO_BOOL = 0 #consider hydrogens bound to the metal center or not?
DISTANCE_THRESHHOLD = 2.8 #Sets the cutoff for considering an atom "bound" that open babel doens't see bound
########File Handling - Program operates on all available XYZ files in directory#########
obatom = None
bondedlist={}
##output bonded atoms ###
def bite_angles(mol1, d_cutoff):
''' calculates bite angles for 1-M-2 and 3-M-4 ligands where 1-4 are determined based on distcutoff
rigid and ungeneralized, careful on use!
:params mol1: pybel molecule object (typically generated from readfile function)
:params d_cutoff: value of distance cutoff in angstroms
:returns: tuple (1-M-2 angle, 3-M-4 angle)
'''
y=[]
for atom in mol1:
if atom.atomicnum == METAL:
global obatom
obatom = atom.OBAtom
for atom2 in mol1:
N = atom2.OBAtom
if N.GetDistance(obatom) < d_cutoff:
if atom2.atomicnum != METAL:
if atom2.atomicnum != 1:
y.append(N)
#TODO: generalize to find relevant(tm) angles -- hard generalization...
return (y[0].GetAngle(obatom, y[1]), y[2].GetAngle(obatom, y[3]))
def atomsbonded(mol1, d_cutoff):
''' finds atoms proximal to the metal center returns as a list
:params mol1: pybel molecule object (typically generated from readfile function)
:params d_cutoff: value of distance cutoff in angstroms
:returns: list of atom numbers (from specified input file) [M, atom1, atom2,... n]
'''
y=[]
for atom in mol1:
if atom.atomicnum == METAL:
global obatom
obatom = atom.OBAtom
y.append(atom.idx)
for atom2 in mol1:
N = atom2.OBAtom
if N.GetDistance(obatom) < d_cutoff:
if atom2.atomicnum != METAL:
if atom2.atomicnum != 1:
y.append(atom2.idx)
return y
def run_sterimol(file, atom1, atom2):
radii = 'bondi'
file_Params = calcSterimol(file, radii, atom1, atom2, True)
lval = file_Params.lval; B1 = file_Params.B1; B5 = file_Params.newB5
with open('sterimol_values.csv', 'a') as f:
print >>f, file,', L:,',"%.2f" % lval, ", B1:,", "%.2f" % B1,", B5:,","%.2f" % B5, "\n"
# print >>f, file.ljust(22),"%.2f".rjust(9) % lval,", L","%.2f".rjust(9) % B1,"B1","%.2f".rjust(9) % B5,"B5"
def main_pipeline(mol_obj, d_cutoff, file):
# Generate the bite angles (angle of ligands on specified metal)
#TODO: generalize for all metal bonded angles
angle_1, angle_2 = bite_angles(mol_obj, d_cutoff) # hard coded for specific 2 angle return (add more above!)
with open('biteangle_values.csv', 'a') as myfile:
print >>myfile, file, ", 1-M-2:,", angle_1, ", 3-M-4:,", angle_2, "\n"
bonded_atom_list = atomsbonded(mol_obj, d_cutoff)
count = 1
while count < len(bonded_atom_list):
run_sterimol(file, bonded_atom_list[0], bonded_atom_list[count])
count+=1
# generate list of pairs from original metal center
#for atom in bonded_atom_list
if __name__ == "__main__":
lst=os.listdir(directory)
lst.sort()
xyz_list = []
for file in lst:
if file.endswith(".xyz"):
xyz_list.append(file)
for file in xyz_list:
if os.stat(file).st_size == 0:
print file, "0 0"
else:
molecule_obj = next(pybel.readfile("xyz", file))
main_pipeline(molecule_obj, DISTANCE_THRESHHOLD, file)
print("Operation completed successfully, please check output files")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from setuptools import setup
from setuptools.command.install import install as _install
class install(_install):
def pre_install_script(self):
pass
def post_install_script(self):
pass
def run(self):
self.pre_install_script()
_install.run(self)
self.post_install_script()
if __name__ == '__main__':
setup(
name = 'task-mapper',
version = '1.0',
description = 'Task Mapper',
long_description = 'distribute tasks performed on many files on either threads or processes',
author = "Pontus Pohl",
author_email = "pontus.pohl@gmail.com",
license = '',
url = '',
scripts=['task-mapper/scripts/task-mapper'],
py_modules = [],
classifiers = [
'Development Status :: Alpha',
'Environment :: Python',
'Intended Audience :: Envac',
'Intended Audience :: Envac',
'Programming Language :: Python',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Monitoring',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
],
entry_points = {},
package_data = {},
install_requires = [],
dependency_links = [],
zip_safe=True,
cmdclass={'install': install},
)
|
nilq/baby-python
|
python
|
bg_black = "\u001b[48;5;0m"
bg_gray = "\u001b[48;5;8m"
bg_red = "\u001b[48;5;9m"
bg_green = "\u001b[48;5;10m"
bg_yellow = "\u001b[48;5;11m"
bg_blue = "\u001b[48;5;12m"
bg_purple = "\u001b[48;5;13m"
bg_cyan = "\u001b[48;5;14m"
bg_white = "\u001b[48;5;15m"
def customColor(number):
print(f"\u001b[48;5;{number}m")
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.