content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# Copyright (C) NVIDIA CORPORATION. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.****
import enum
import logging as _logging
import sys
import threading
import warnings
from contextlib import contextmanager
# from nemo.constants import NEMO_ENV_VARNAME_SAVE_LOGS_TO_DIR
from nemo.constants import NEMO_ENV_VARNAME_REDIRECT_LOGS_TO_STDERR, NEMO_ENV_VARNAME_TESTING
from nemo.utils.env_var_parsing import get_envbool, get_envint
from nemo.utils.formatters.base import BaseNeMoFormatter, DebugNeMoFormatter
from nemo.utils.metaclasses import Singleton
__all__ = ["Logger", "LogMode"]
class LogMode(enum.IntEnum):
EACH = 0 # Log the message each time
ONCE = 1 # Log the message only once. The same message will not be logged again.
class Logger(metaclass=Singleton):
# Level 0
NOTSET = _logging.NOTSET
# Level 10
DEBUG = _logging.DEBUG
# Level 20
INFO = _logging.INFO
# Level 30
WARNING = _logging.WARNING
# Level 40
ERROR = _logging.ERROR
# Level 50
CRITICAL = _logging.CRITICAL
_level_names = {
0: "NOTSET",
10: "DEBUG",
20: "INFO",
30: "WARNING",
40: "ERROR",
50: "CRITICAL",
}
def __init__(self):
self._logger = None
# Multi-GPU runs run in separate processes, thread locks shouldn't be needed
self._logger_lock = threading.Lock()
self._handlers = dict()
self.old_warnings_showwarning = None
self._define_logger()
self.once_logged = set()
def _define_logger(self):
# Use double-checked locking to avoid taking lock unnecessarily.
if self._logger is not None:
return self._logger
with self._logger_lock:
try:
self._logger = _logging.getLogger("nemo_logger")
# By default, silence all loggers except the logger for rank 0
self.remove_stream_handlers()
if get_envbool(NEMO_ENV_VARNAME_TESTING, False):
old_factory = _logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
record = old_factory(*args, **kwargs)
record.rank = get_envint("RANK", 0)
return record
_logging.setLogRecordFactory(record_factory)
self.add_stream_handlers(formatter=DebugNeMoFormatter)
elif get_envint("RANK", 0) == 0:
self.add_stream_handlers()
finally:
self.set_verbosity(verbosity_level=Logger.INFO)
self._logger.propagate = False
def remove_stream_handlers(self):
if self._logger is None:
raise RuntimeError("Impossible to set handlers if the Logger is not predefined")
# ======== Remove Handler if already existing ========
try:
self._logger.removeHandler(self._handlers["stream_stdout"])
except KeyError:
pass
try:
self._logger.removeHandler(self._handlers["stream_stderr"])
except KeyError:
pass
def add_stream_handlers(self, formatter=BaseNeMoFormatter):
if self._logger is None:
raise RuntimeError("Impossible to set handlers if the Logger is not predefined")
# Add the output handler.
if get_envbool(NEMO_ENV_VARNAME_REDIRECT_LOGS_TO_STDERR, False):
self._handlers["stream_stdout"] = _logging.StreamHandler(sys.stderr)
else:
self._handlers["stream_stdout"] = _logging.StreamHandler(sys.stdout)
self._handlers["stream_stdout"].addFilter(lambda record: record.levelno <= _logging.INFO)
self._handlers["stream_stderr"] = _logging.StreamHandler(sys.stderr)
self._handlers["stream_stderr"].addFilter(lambda record: record.levelno > _logging.INFO)
self._handlers["stream_stdout"].setFormatter(formatter())
self._logger.addHandler(self._handlers["stream_stdout"])
try:
self._handlers["stream_stderr"].setFormatter(formatter())
self._logger.addHandler(self._handlers["stream_stderr"])
except KeyError:
pass
def reset_stream_handler(self, formatter=BaseNeMoFormatter):
self.remove_stream_handlers()
self.add_stream_handlers(formatter=formatter)
def add_file_handler(self, log_file):
if self._logger is None:
raise RuntimeError("Impossible to set handlers if the Logger is not predefined")
self._handlers["file"] = _logging.FileHandler(log_file)
formatter = BaseNeMoFormatter
self._handlers["file"].setFormatter(formatter())
self._logger.addHandler(self._handlers["file"])
def getEffectiveLevel(self):
"""Return how much logging output will be produced."""
if self._logger is not None:
return self._logger.getEffectiveLevel()
def get_verbosity(self):
return self.getEffectiveLevel()
def setLevel(self, verbosity_level):
"""Sets the threshold for what messages will be logged."""
if self._logger is not None:
self._logger.setLevel(verbosity_level)
for handler in self._logger.handlers:
handler.setLevel(verbosity_level)
def set_verbosity(self, verbosity_level):
self.setLevel(verbosity_level)
@contextmanager
def patch_stderr_handler(self, stream):
""" Useful for unittests
"""
if self._logger is not None:
try:
old_stream = self._handlers["stream_stderr"].stream
if old_stream is None:
raise ValueError
# Port backwards set_stream() from python 3.7
self._handlers["stream_stderr"].acquire()
try:
self._handlers["stream_stderr"].flush()
self._handlers["stream_stderr"].stream = stream
finally:
self._handlers["stream_stderr"].release()
yield stream
except (KeyError, ValueError):
raise RuntimeError("Impossible to patch logging handlers if handler does not exist")
finally:
# Port backwards set_stream() from python 3.7
self._handlers["stream_stderr"].acquire()
try:
self._handlers["stream_stderr"].flush()
self._handlers["stream_stderr"].stream = old_stream
finally:
self._handlers["stream_stderr"].release()
else:
raise RuntimeError("Impossible to patch logging handlers if handler does not exist")
@contextmanager
def temp_verbosity(self, verbosity_level):
"""Sets the a temporary threshold for what messages will be logged."""
if self._logger is not None:
old_verbosity = self.get_verbosity()
try:
self.set_verbosity(verbosity_level)
yield
finally:
self.set_verbosity(old_verbosity)
else:
try:
yield
finally:
pass
def captureWarnings(self, capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
if self._logger is not None:
if capture and self.old_warnings_showwarning is None:
# Backup Method
self.old_warnings_showwarning = warnings.showwarning
warnings.showwarning = self._showwarning
elif not capture and self.old_warnings_showwarning is not None:
# Restore Method
warnings.showwarning = self.old_warnings_showwarning
self.old_warnings_showwarning = None
def _showwarning(self, message, category, filename, lineno, line=None):
"""
Implementation of showwarnings which redirects to logging.
It will call warnings.formatwarning and will log the resulting string
with level logging.WARNING.
"""
s = warnings.formatwarning(message, category, filename, lineno, line)
self.warning("%s", s)
def _logged_once(self, msg, mode):
PREFIX_LEN = 12
if mode == LogMode.ONCE:
if msg[PREFIX_LEN:] in self.once_logged:
return True
self.once_logged.add(msg[PREFIX_LEN:])
return False
def debug(self, msg, *args, mode=LogMode.EACH, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self._logger is not None and self._logger.isEnabledFor(Logger.DEBUG) and not self._logged_once(msg, mode):
self._logger._log(Logger.DEBUG, msg, args, **kwargs)
def info(self, msg, *args, mode=LogMode.EACH, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self._logger is not None and self._logger.isEnabledFor(Logger.INFO) and not self._logged_once(msg, mode):
self._logger._log(Logger.INFO, msg, args, **kwargs)
def warning(self, msg, *args, mode=LogMode.EACH, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self._logger is not None and self._logger.isEnabledFor(Logger.WARNING) and not self._logged_once(msg, mode):
self._logger._log(Logger.WARNING, msg, args, **kwargs)
def error(self, msg, *args, mode=LogMode.EACH, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self._logger is not None and self._logger.isEnabledFor(Logger.ERROR) and not self._logged_once(msg, mode):
self._logger._log(Logger.ERROR, msg, args, **kwargs)
def critical(self, msg, *args, mode=LogMode.EACH, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if (
self._logger is not None
and self._logger.isEnabledFor(Logger.CRITICAL)
and not self._logged_once(msg, mode)
):
self._logger._log(Logger.CRITICAL, msg, args, **kwargs)
# # Necessary to catch the correct caller
# _logging._srcfile = os.path.normcase(inspect.getfile(Logger.__class__))
|
nilq/baby-python
|
python
|
from yahoo_finance import Currency
file= open("Currency_update.txt", 'r')
x=file.readlines()
for y in x:
L=map(str, y.split())
res=''
for z in L[:-1]:
res+=z+' '
print '%30s %s'%(res,L[-1])
first_currency=raw_input("enter first currency: ")
second_currency=raw_input("enter second currency: ")
conversion=first_currency+second_currency
eur_pln = Currency(conversion)
print eur_pln.get_bid()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#encoding=utf-8
#-----------------------------------------------------
# Usage: python3 timer3.py
# Description: timer function with keywordonly argument
#-----------------------------------------------------
'''
Same usage as timer2.py, but uses 3.X keyword-only default arguments
instead of dict pops for simpler code. No need to hoist range() out
of tests in 3.X: always a generator in 3.X, and this can't run on 2.X.
Keywordonly arguments are ideal for configuration options such as our
functions' _reps argument. They must be coded after a * and before a **
in the function header, and in a function call they must be passed by
keyword and appear before the ** if used. The following is a
keyword-only-based alternative to the prior module.
'''
import time, sys
timer = time.clock if sys.platform[:3] == 'win' else time.time
'''
This module can be tested by timeseqs_timer2.py
'''
def total(func, *args, _reps=1000, **kargs):
start = timer()
for i in range(_reps):
ret = func(*args, **kargs)
elapsed = timer() - start
return (elapsed, ret)
def bestof(func, *args, _reps=5, **kargs):
best = 2 ** 32
for i in range(_reps):
start = timer()
ret = func(*args, **kargs)
elapsed = timer() - start
best = elapsed if elapsed < best else best
return (best, ret)
def bestoftotal(func, *args, _reps1=5, **kargs):
return min(total(func, *args, **kargs) for i in range(_reps1))
|
nilq/baby-python
|
python
|
"""
Generate a autoencoder neural network visualization
"""
# Changing these adjusts the size and layout of the visualization
FIGURE_WIDTH = 16
FIGURE_HEIGHT = 9
RIGHT_BORDER = 0.7
LEFT_BORDER = 0.7
TOP_BORDER = 0.8
BOTTOM_BORDER = 0.6
N_IMAGE_PIXEL_COLS = 64
N_IMAGE_PIXEL_ROWS = 48
N_NODES_BY_LAYER = [10, 7, 5, 8]
INPUT_IMAGE_BOTTOM = 5
INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT
ERROR_IMAGE_SCALE = 0.7
ERROR_GAP_SCALE = 0.3
BETWEEN_LAYER_SCALE = 0.8
BETWEEN_NODE_SCALE = 0.4
def main():
print(f"Node images are {N_IMAGE_PIXEL_ROWS}"
+ f" by {N_IMAGE_PIXEL_COLS} pixels")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from kafka import KafkaConsumer
from kafka.errors import KafkaError
import logging
import sys
BOOTSTRAP_SERVERS = ['3.209.55.41:9092']
KAFKA_TOPIC = 'fledge-testing'
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
_LOGGER.addHandler(handler)
consumer = KafkaConsumer(KAFKA_TOPIC,
auto_offset_reset='earliest',
bootstrap_servers=BOOTSTRAP_SERVERS,
api_version=(0,11),
consumer_timeout_ms=1000)
_LOGGER.info(f'Waiting for messages from topic: {KAFKA_TOPIC}')
for msg in consumer:
_LOGGER.info (f'message: {msg}')
_LOGGER.info('Done')
|
nilq/baby-python
|
python
|
import os
from shutil import copy2
from datetime import datetime
from PIL import Image
from sys import argv
username = argv[1]
dest = argv[2]
source = "C:/Users/" + username + "/AppData/Local/Packages/Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy/LocalState/Assets"
currentImgs = []
for filename in os.listdir(dest):
try:
image = Image.open(dest + "/" + filename)
currentImgs.append(image)
except:
print(filename)
i = 0
for filename in os.listdir(source):
sourceFile = source + "/" + filename
try:
im = Image.open(sourceFile)
width, height = im.size
if width != 1920 or height != 1080:
continue
h = im.histogram()
if any(h == ci.histogram() for ci in currentImgs):
continue
destFile = "{0}/{1:%Y-%m-%d-%H-%M-%S}{2}.jpg".format(
dest, datetime.now(), i)
i += 1
copy2(sourceFile, destFile)
currentImgs.append(im)
except:
print(filename)
|
nilq/baby-python
|
python
|
class HabitatError(Exception):
_msg = 'Unhandled Error'
def __init__(self, *args, **kwargs):
return super().__init__(self._msg%args, **kwargs)
class InvalidBiomeError(HabitatError):
_msg = '%s is not a valid biome!'
class AmbiguousProvidesError(HabitatError):
_msg = '%s and %s both provide %s!'
class InvalidModuleError(HabitatError):
_msg = '%s is not a valid module!'
class UnexpectedFlagError(HabitatError):
_msg = 'Unexpected flag %s!'
|
nilq/baby-python
|
python
|
import random
# Easy to read representation for each cardinal direction.
N, S, W, E = ('n', 's', 'w', 'e')
class Cell(object):
"""
Class for each individual cell. Knows only its position and which walls are
still standing.
"""
def __init__(self, x, y, walls):
self.x = x
self.y = y
self.walls = set(walls)
def __repr__(self):
# <15, 25 (es )>
return '<{}, {} ({:4})>'.format(self.x, self.y, ''.join(sorted(self.walls)))
def __contains__(self, item):
# N in cell
return item in self.walls
def is_full(self):
"""
Returns True if all walls are still standing.
"""
return len(self.walls) == 4
def _wall_to(self, other):
"""
Returns the direction to the given cell from the current one.
Must be one cell away only.
"""
assert abs(self.x - other.x) + abs(self.y - other.y) == 1, '{}, {}'.format(self, other)
if other.y < self.y:
return N
elif other.y > self.y:
return S
elif other.x < self.x:
return W
elif other.x > self.x:
return E
else:
assert False
def connect(self, other):
"""
Removes the wall between two adjacent cells.
"""
other.walls.remove(other._wall_to(self))
self.walls.remove(self._wall_to(other))
class Maze(object):
"""
Maze class containing full board and maze generation algorithms.
"""
# Unicode character for a wall with other walls in the given directions.
UNICODE_BY_CONNECTIONS = {'ensw': '┼',
'ens': '├',
'enw': '┴',
'esw': '┬',
'es': '┌',
'en': '└',
'ew': '─',
'e': '╶',
'nsw': '┤',
'ns': '│',
'nw': '┘',
'sw': '┐',
's': '╷',
'n': '╵',
'w': '╴'}
def __init__(self, width=20, height=10):
"""
Creates a new maze with the given sizes, with all walls standing.
"""
self.width = width
self.height = height
self.cells = []
for y in range(self.height):
for x in range(self.width):
self.cells.append(Cell(x, y, [N, S, E, W]))
def __getitem__(self, index):
"""
Returns the cell at index = (x, y).
"""
x, y = index
if 0 <= x < self.width and 0 <= y < self.height:
return self.cells[x + y * self.width]
else:
return None
def neighbors(self, cell):
"""
Returns the list of neighboring cells, not counting diagonals. Cells on
borders or corners may have less than 4 neighbors.
"""
x = cell.x
y = cell.y
for new_x, new_y in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:
neighbor = self[new_x, new_y]
if neighbor is not None:
yield neighbor
def _to_str_matrix(self):
"""
Returns a matrix with a pretty printed visual representation of this
maze. Example 5x5:
OOOOOOOOOOO
O O O
OOO OOO O O
O O O O
O OOO OOO O
O O O O
OOO O O OOO
O O O O O
O OOO O O O
O O O
OOOOOOOOOOO
"""
str_matrix = [['W'] * (self.width * 2 + 1)
for i in range(self.height * 2 + 1)]
# str_matrix = [['O'] * (self.width)
# for i in range(self.height)]
for cell in self.cells:
x = cell.x * 2 + 1
y = cell.y * 2 + 1
str_matrix[y][x] = ' '
if N not in cell and y > 0:
str_matrix[y - 1][x + 0] = ' '
if S not in cell and y + 1 < self.width:
str_matrix[y + 1][x + 0] = ' '
if W not in cell and x > 0:
str_matrix[y][x - 1] = ' '
if E not in cell and x + 1 < self.width:
str_matrix[y][x + 1] = ' '
return str_matrix
def __repr__(self):
"""
Returns an Unicode representation of the maze. Size is doubled
horizontally to avoid a stretched look. Example 5x5:
┌───┬───────┬───────┐
│ │ │ │
│ │ ╷ ╵ ╷ │
│ │ │ │ │
│ │ └───┬───┘ │
│ │ │ │
│ └───────┤ ┌───┤
│ │ │ │
│ ╷ ╶───┘ ╵ │
│ │ │
└───┴───────────────┘
"""
# Starts with regular representation. Looks stretched because chars are
# twice as high as they are wide (look at docs example in
# `Maze._to_str_matrix`).
skinny_matrix = self._to_str_matrix()
# Simply duplicate each character in each line.
double_wide_matrix = []
for line in skinny_matrix:
double_wide_matrix.append([])
for char in line:
double_wide_matrix[-1].append(char)
double_wide_matrix[-1].append(char)
# The last two chars of each line are walls, and we will need only one.
# So we remove the last char of each line.
matrix = [line[:-1] for line in double_wide_matrix]
def g(x, y):
"""
Returns True if there is a wall at (x, y). Values outside the valid
range always return false.
This is a temporary helper function.
"""
if 0 <= x < len(matrix[0]) and 0 <= y < len(matrix):
return matrix[y][x] != ' '
else:
return False
# Fix double wide walls, finally giving the impression of a symmetric
# maze.
for y, line in enumerate(matrix):
for x, char in enumerate(line):
if not g(x, y) and g(x - 1, y):
matrix[y][x - 1] = ' '
# Right now the maze has the correct aspect ratio, but is still using
# 'O' to represent walls.
# Finally we replace the walls with Unicode characters depending on
# their context.
for y, line in enumerate(matrix):
for x, char in enumerate(line):
if not g(x, y):
continue
connections = set((N, S, E, W))
if not g(x, y + 1): connections.remove(S)
if not g(x, y - 1): connections.remove(N)
if not g(x + 1, y): connections.remove(E)
if not g(x - 1, y): connections.remove(W)
str_connections = ''.join(sorted(connections))
# Note we are changing the matrix we are reading. We need to be
# careful as to not break the `g` function implementation.
matrix[y][x] = Maze.UNICODE_BY_CONNECTIONS[str_connections]
# Simple double join to transform list of lists into string.
return '\n'.join(''.join(line) for line in matrix) + '\n'
def randomize(self):
"""
Knocks down random walls to build a random perfect maze.
Algorithm from http://mazeworks.com/mazegen/mazetut/index.htm
"""
cell_stack = []
cell = random.choice(self.cells)
n_visited_cells = 1
while n_visited_cells < len(self.cells):
neighbors = [c for c in self.neighbors(cell) if c.is_full()]
if len(neighbors):
neighbor = random.choice(neighbors)
cell.connect(neighbor)
cell_stack.append(cell)
cell = neighbor
n_visited_cells += 1
else:
cell = cell_stack.pop()
@staticmethod
def generate(width=20, height=10):
"""
Returns a new random perfect maze with the given sizes.
"""
m = Maze(width, height)
m.randomize()
return m
def generar_mapa(width=16, height=10, jugadores = 1):
mapa = Maze.generate(width, height)._to_str_matrix()
# Crear obstaculos
for i in range(0, random.randint(0,50)):
obstaculo = ( random.randint(-width, width), random.randint(-height, height) ) # Ubicación random
mapa[obstaculo[0]][obstaculo[1]] = 'O'
# Crear jugador(es)
if jugadores == 1:
jugador1 = (random.randrange(0, width),0) # Ubicación random
mapa[jugador1[0]][jugador1[1]] = 'P'
mapa[jugador1[0]][jugador1[1]+1] = ' ' # Siempre dejar un espacio de salida
# Crear objetivo player 1
objetivo = (random.randrange(0, width), -(height-random.randint(0,2)) ) # Ubicación random
mapa[objetivo[0]][objetivo[1]] = '1'
else:
letras = 'PQRS'
objetivos = '1234'
ubicaciones = [(random.randrange(0, width),0), # Ubicación random
(random.randrange(0, width),0), # Ubicación random
(random.randrange(0, width),0), # Ubicación random
(random.randrange(0, width),0)] # Ubicación random
while len(ubicaciones) != len(set(ubicaciones)): # Checar duplicados
ubicaciones = [(random.randrange(0, width),0), # Ubicación random
(random.randrange(0, width),0), # Ubicación random
(random.randrange(0, width),0), # Ubicación random
(random.randrange(0, width),0)] # Ubicación random
for p in range(0,jugadores):
mapa[ubicaciones[p][0]][ubicaciones[p][1]] = letras[p]
mapa[ubicaciones[p][0]][ubicaciones[p][1]+1] = ' ' # Siempre dejar un espacio de salida
# Crear objetivo player X
objetivo = (random.randrange(0, width), -(height-random.randint(1,2)) ) # Ubicación random
mapa[objetivo[0]][objetivo[1]] = f'{objetivos[p]}'
return mapa
####################################################################################################
# EJEMPLO DE USO
mapa1 = generar_mapa(jugadores=2)
with open('your_file.txt', 'w') as f:
for item in mapa1:
f.write("%s\n" % item)
pass
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
from sklearn.linear_model import lasso_path
from lassoloaddata import get_folds
# define the grid of lambda values to explore
alphas = np.logspace(-4, -0.5, 30)
def get_lasso_path(X_train, y_train, alphas=alphas):
"""
compute the lasso path for the given data
Args:
X_train: predictors k-mers and PLS, assumes that predictros
have being scaled, numpy array
y_train: response var (mRNA indicator) pd.Series or array
"""
alphas_lasso, coefs_lasso, _ = lasso_path(X_train, y_train, alphas=alphas,
fit_intercept=False)
return alphas_lasso, coefs_lasso
def path_to_frame(coefs, colnames, rownames):
"""
puts the lasso path coefs in a pandas
data frame with adecuate rownames and
column names
Args:
colnames: list, explored lambda values
rownames: list, name of predicotr variables
"""
return pd.DataFrame(coefs, index=rownames, columns=colnames)
def lasssopath(predictors, response, alphas=alphas):
"""
computes the lasso path on the data using 6 folds
of the data, therefore returns 6 different paths
Args:
predictors: predictors pd.DataFrame
response: response pd.Series
"""
folds = get_folds(predictors, response,
k=6) # get folds and scales predictors
paths = []
i = 1
for (X_train, y_train), _ in folds:
print('runing fold {} of 6'.format(i))
alphas, lasso_path=get_lasso_path(X_train, y_train, alphas=alphas)
coefs=path_to_frame(lasso_path, alphas, predictors.columns)
coefs['kfold']=i
paths.append(coefs)
i += 1
return pd.concat(paths)
|
nilq/baby-python
|
python
|
""""
Settings:
pos_id
second_key
client_id
client_secret
"""
import hashlib
import json
import logging
from collections import OrderedDict
from decimal import Decimal
from typing import Optional, Union
from urllib.parse import urljoin
from django import http
from django.conf import settings
from django.db.transaction import atomic
from django.http import HttpResponse
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.http import urlencode
from rest_framework import status as http_status
from getpaid.exceptions import LockFailure
from getpaid.post_forms import PaymentHiddenInputsPostForm
from getpaid.processor import BaseProcessor
from getpaid.types import BackendMethod as bm
from getpaid.types import PaymentStatusResponse
from .callback_handler import PayuCallbackHandler
from .client import Client
from .types import Currency, OrderStatus, ResponseStatus
logger = logging.getLogger(__name__)
key_trans = {
"unit_price": "unitPrice",
"first_name": "firstName",
"last_name": "lastName",
"order_id": "extOrderId",
"customer_ip": "customerIp",
"notify_url": "notifyUrl",
}
class PaymentProcessor(BaseProcessor):
slug = settings.GETPAID_PAYU_SLUG
display_name = "PayU"
accepted_currencies = [c.value for c in Currency]
ok_statuses = [200, 201, 302]
method = "REST" #: Supported modes: REST, POST (not recommended!)
sandbox_url = "https://secure.snd.payu.com/"
production_url = "https://secure.payu.com/"
confirmation_method = "PUSH" #: PUSH - paywall will send POST request to your server; PULL - you need to check the payment status
post_form_class = PaymentHiddenInputsPostForm
post_template_name = "getpaid_payu/payment_post_form.html"
callback_url_name = "getpaid:callback"
client_class = Client
callback_handler_class = PayuCallbackHandler
_token = None
_token_expires = None
# Specifics
@classmethod
def get_paywall_baseurl(cls):
if cls.get_setting("use_sandbox", True):
return cls.sandbox_url
return cls.production_url
@classmethod
def get_paywall_method(self):
return self.get_setting("paywall_method", self.method)
def get_paywall_context(self, request=None, camelize_keys=False, **kwargs):
context = {
"notify_url": self.get_notify_url(),
"continue_url": self.get_continue_url(),
"customer_ip": self.get_customer_ip(request),
"description": self.payment.description,
"currency": self.payment.currency,
"amount": self.payment.amount_required,
"order_id": self.payment.get_unique_id(),
"buyer": self.payment.get_buyer_info(),
}
if self.get_setting("is_marketplace", False):
context["shopping_carts"] = self.get_shopping_carts()
else:
context["products"] = self.get_products()
return context
def get_notify_url(self):
backend_url = settings.GETPAID_BACKEND_HOST
return urljoin(
backend_url, reverse(self.callback_url_name, kwargs={"pk": self.payment.pk})
)
def get_continue_url(self):
frontend_host = settings.GETPAID_FRONTEND_HOST
return self.get_setting("continue_url").format(
frontend_host=frontend_host, payment_id=self.payment.id
)
def get_customer_ip(self, request=None):
customer_ip = "127.0.0.1"
if request:
customer_ip = request.META.get("REMOTE_ADDR", customer_ip)
return customer_ip
def get_shopping_carts(self):
shopping_carts = []
raw_items = self.payment.get_items()
for shopping_cart in raw_items:
products = [
{key_trans.get(k, k): v for k, v in product.items()}
for product in shopping_cart["products"]
]
shopping_carts.append({**shopping_cart, "products": products})
return shopping_carts
def get_products(self):
raw_products = self.payment.get_items()
products = []
for product in raw_products:
transformed_product = {key_trans.get(k, k): v for k, v in product.items()}
products.append(transformed_product)
return products
@atomic()
def prepare_transaction(self, request=None, view=None, **kwargs):
method = self.get_paywall_method().upper()
if method == bm.REST:
try:
results = self.prepare_lock(request=request, **kwargs)
response = http.HttpResponseRedirect(results["url"])
except LockFailure as exc:
logger.error(exc, extra=getattr(exc, "context", None))
self.payment.fail()
response = http.HttpResponseRedirect(
reverse("getpaid:payment-failure", kwargs={"pk": self.payment.pk})
)
self.payment.save()
return response
elif method == bm.POST:
data = self.get_paywall_context(
request=request, camelize_keys=True, **kwargs
)
data["merchantPosId"] = self.get_setting("pos_id")
url = urljoin(self.get_paywall_baseurl(), "/api/v2_1/orders")
form = self.get_form(data)
return TemplateResponse(
request=request,
template=self.get_template_names(view=view),
context={"form": form, "paywall_url": url},
)
def handle_paywall_callback(self, request, **kwargs):
given_signature, expected_signature = self.get_signatures(request)
if given_signature == expected_signature:
data = json.loads(request.body)
self.callback_handler_class(self.payment).handle(data)
return HttpResponse(status=http_status.HTTP_200_OK)
else:
logger.error(
f"Received bad signature for payment {self.payment.id}! "
f"Got '{given_signature}', expected '{expected_signature}'"
)
def prepare_lock(self, request=None, **kwargs):
results = {}
params = self.get_paywall_context(request=request, **kwargs)
response = self.client.new_order(**params)
results["raw_response"] = self.client.last_response
self.payment.confirm_prepared()
self.payment.external_id = results["ext_order_id"] = response.get("orderId", "")
self.payment.redirect_uri = results["url"] = response.get("redirectUri", "")
return results
def charge(self, **kwargs):
response = self.client.capture(self.payment.external_id)
result = {
"raw_response": self.client.last_response,
"status_desc": response.get("status", {}).get("statusDesc"),
}
if response.get("status", {}).get("statusCode") == ResponseStatus.SUCCESS:
result["success"] = True
return result
def release_lock(self):
response = self.client.cancel_order(self.payment.external_id)
status = response.get("status", {}).get("statusCode")
if status == ResponseStatus.SUCCESS:
return self.payment.amount_locked
def get_signatures(self, request):
payu_header_raw = request.headers.get(
"OpenPayU-Signature"
) or request.headers.get("X-OpenPayU-Signature", "")
payu_header = {
k: v for k, v in [i.split("=") for i in payu_header_raw.split(";")]
}
algo_name = payu_header.get("algorithm", "MD5")
given_signature = payu_header.get("signature")
second_key = self.get_setting("second_key")
algorithm = getattr(hashlib, algo_name.replace("-", "").lower())
request_body = request.body.decode()
expected_signature = algorithm(
f"{request_body}{second_key}".encode("utf-8")
).hexdigest()
return given_signature, expected_signature
def fetch_payment_status(self) -> PaymentStatusResponse:
response = self.client.get_order_info(self.payment.external_id)
results = {"raw_response": self.client.last_response}
order_data = response.get("orders", [None])[0]
status = order_data.get("status")
callback_mapping = {
OrderStatus.NEW: "confirm_prepared",
OrderStatus.PENDING: "confirm_prepared",
OrderStatus.CANCELED: "fail",
OrderStatus.COMPLETED: "confirm_payment",
OrderStatus.WAITING_FOR_CONFIRMATION: "confirm_lock",
}
results["callback"] = callback_mapping[status]
return results
def prepare_form_data(self, post_data, **kwargs):
pos_id = self.get_setting("pos_id")
second_key = self.get_setting("second_key")
algorithm = self.get_setting("algorithm", "SHA-256").upper()
hasher = getattr(hashlib, algorithm.replace("-", "").lower())
encoded = urlencode(OrderedDict(sorted(post_data.items())))
prepared = f"{encoded}&{second_key}".encode("ascii")
signature = hasher(prepared).hexdigest()
post_data[
"OpenPayu-Signature"
] = f"signature={signature};algorithm={algorithm};sender={pos_id}"
return post_data
def start_refund(
self, amount: Optional[Union[Decimal, float, int]] = None, **kwargs
) -> Decimal:
"""
Refunds the given amount.
Returns the amount that is refunded.
"""
client = self.get_client()
if self.get_setting("is_marketplace", False):
assert (
"ext_customer_id" in kwargs
), "Add ext_customer_id if you use marketplace"
response = client.refund(
order_id=str(self.payment.external_id),
ext_refund_id=str(self.payment.id),
amount=amount,
**kwargs,
)
self.payment.refund_status_desc = response["status"]["statusDesc"]
self.payment.refund_description = response["refund"]["description"]
self.payment.external_refund_id = response["refund"]["refundId"]
|
nilq/baby-python
|
python
|
from redbot.core import commands
class Tutorial_Cog(commands.Cog):
"""Minimal tutorial bot"""
def __init__(self, bot):
self.bot = bot
@commands.group()
async def simple_cog(self, ctx):
pass
@simple_cog.command()
async def hello(self, ctx, *, message):
"""Says something in a text channel"""
await ctx.send(f"Cog says: Hello World! {message}")
|
nilq/baby-python
|
python
|
from IPython.display import HTML
import IPython
import htmlmin
def _format_disqus_code(page_url: str, page_identifier: str, site_shortname: str) -> str:
"""This function formats the necessary html and javascript codes needed to be
inserted into the jupyter notebook
Args:
page_url (str): your page's canonical URL
page_identifier (str): your page's unique identifier
site_shortname (str): your site's disqus shortname
Returns:
str: the formatted html disqus code
"""
disqus_code = """
<div id="disqus_thread"></div>
<script>
/**
* RECOMMENDED CONFIGURATION VARIABLES: EDIT AND UNCOMMENT THE SECTION BELOW TO INSERT DYNAMIC VALUES FROM YOUR PLATFORM OR CMS.
* LEARN WHY DEFINING THESE VARIABLES IS IMPORTANT: https://disqus.com/admin/universalcode/#configuration-variables*/
var disqus_config = function () {
this.page.url = '%s'; // Replace PAGE_URL with your page's canonical URL variable
this.page.identifier = '%s'; // Replace PAGE_IDENTIFIER with your page's unique identifier variable
};
(function() { // DON'T EDIT BELOW THIS LINE
var d = document, s = d.createElement('script');
s.src = 'https://%s.disqus.com/embed.js';
s.setAttribute('data-timestamp', +new Date());
(d.head || d.body).appendChild(s);
})();
</script>
<noscript>Please enable JavaScript to view the <a href="https://disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript>
<script id="dsq-count-scr" src="//%s.disqus.com/count.js" async></script>
</body>
""" % (page_url, page_identifier, site_shortname, site_shortname)
return htmlmin.minify(disqus_code)
def inject(page_url: str, page_identifier: str, site_shortname: str) -> IPython.core.display.HTML:
"""this function injects and displays a disqus commenting section in a code cell of your jupyter notebook
Args:
page_url (str): your page's canonical URL
page_identifier (str): your page's unique identifier
site_shortname (str): your site's disqus shortname
Returns:
IPython.core.display.HTML
Example:
>>> from jupyter_disqus import inject
>>> # call this function in a separate code cell of your jupyter notebook
>>> inject(
page_url="https://costahuang.me/SC2AI/",
page_identifier="1f527ae5-5a59-4dc3-9bb0-d77c2ccf5cab",
site_shortname="costahuang"
)
"""
return HTML(_format_disqus_code(page_url, page_identifier, site_shortname))
|
nilq/baby-python
|
python
|
DEBUG = True
# Make these unique, and don't share it with anybody.
SECRET_KEY = "c69c2ab2-9c58-4013-94a6-004052f2583d40029806-a510-4c48-a874-20e9245f55f70394cbad-48b5-4945-9499-96c303d771e6"
NEVERCACHE_KEY = "9fb86bbb-51a2-494d-b6ca-1065c0f1f58ee6d757ec-85b0-4f66-9003-ff57c8a3d9d8b37a8b11-19a9-4c03-8596-ba129af542ed"
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.mysql",
# DB name or path to database file if using sqlite3.
"NAME": "hsbsite",
# Not used with sqlite3.
"USER": "dbuser",
# Not used with sqlite3.
"PASSWORD": "dbuser",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
setup(
name="componentsdb",
version="0.1",
packages=find_packages(exclude=['tests']),
package_data={
'componentsdb': [
'ui/templates/*.html',
'ui/static/*',
],
},
install_requires=[
'enum34',
'flask',
'flask-migrate',
'flask-script',
'flask-sqlalchemy',
'oauth2client',
'psycopg2',
'pyjwt',
'pyopenssl',
'sqlalchemy',
],
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# https://leetcode.com/problems/ugly-number/
import unittest
class Solution:
def isUgly(self, num: int) -> bool:
if num <= 0:
return False
if num == 1:
return True
original = num
while num % 2 == 0:
num //= 2
while num % 3 == 0:
num //= 3
while num % 5 == 0:
num //= 5
return num != original and num == 1
class TestCode(unittest.TestCase):
def test_minus(self):
self.assertFalse(Solution().isUgly(-1))
def test_0(self):
self.assertFalse(Solution().isUgly(0))
def test_1(self):
self.assertTrue(Solution().isUgly(1))
def test_2(self):
self.assertTrue(Solution().isUgly(2))
def test_3(self):
self.assertTrue(Solution().isUgly(3))
def test_7(self):
self.assertFalse(Solution().isUgly(7))
def test_11(self):
self.assertFalse(Solution().isUgly(11))
def test_14(self):
self.assertFalse(Solution().isUgly(14))
def test_16(self):
self.assertTrue(Solution().isUgly(16))
def test_27(self):
self.assertTrue(Solution().isUgly(27))
def test_937351770(self):
self.assertFalse(Solution().isUgly(937351770))
def test_905391974(self):
self.assertFalse(Solution().isUgly(905391974))
|
nilq/baby-python
|
python
|
import unittest
from unittest import mock
from easybill_rest import Client
from easybill_rest.resources.resource_attachments import ResourceAttachments
from easybill_rest.tests.test_case_abstract import EasybillRestTestCaseAbstract
class TestResourceAttachments(unittest.TestCase, EasybillRestTestCaseAbstract):
def setUp(self) -> None:
mocked_object = mock.Mock()
mocked_object.call = mock.Mock(return_value={})
mocked_object.upload = mock.Mock(return_value={})
mocked_object.download = mock.Mock(return_value=bytes())
self.mocked_object = ResourceAttachments(mocked_object)
def test_get_endpoint(self) -> None:
self.assertEqual("/attachments", Client('').attachments().get_resource_endpoint())
def test_get_attachments(self) -> None:
self.assertTrue(isinstance(
self.mocked_object.get_attachments({"page": "2"}), dict))
def test_get_attachment(self) -> None:
self.assertTrue(
isinstance(
self.mocked_object.get_attachment("3"),
dict))
def test_create_attachment(self) -> None:
self.assertTrue(
isinstance(
self.mocked_object.create_attachment(
bytes(
'{"test": "test"}',
'utf-8')),
dict))
def test_update_attachment(self) -> None:
self.assertTrue(
isinstance(
self.mocked_object.update_attachment(
"3", {
"test": "test"}), dict))
def test_delete_attachment(self) -> None:
self.assertIsNone(self.mocked_object.delete_attachment("3"))
def test_get_content(self) -> None:
self.assertIsNotNone(self.mocked_object.get_content("3"))
@staticmethod
def get_suite() -> unittest.TestSuite:
return unittest.TestSuite(map(TestResourceAttachments, [
'test_get_endpoint',
'test_get_attachments',
'test_get_attachment',
'test_create_attachment',
'test_update_attachment',
'test_delete_attachment',
'test_get_content',
]))
|
nilq/baby-python
|
python
|
#########
#IMPORTS#
#########
from tensorflow.keras.losses import binary_crossentropy
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import *
class Unet2D:
def __init__(self):
c1 = Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (inputs)
c1 = Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (c1)
c1 = BatchNormalization()(c1)
p1 = MaxPooling2D((2, 2)) (c1)
p1 = Dropout(0.25)(p1)
c2 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (p1)
c2 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (c2)
c2 = BatchNormalization()(c2)
p2 = MaxPooling2D((2, 2)) (c2)
p2 = Dropout(0.25)(p2)
c3 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (p2)
c3 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (c3)
c3 = BatchNormalization()(c3)
p3 = MaxPooling2D((2, 2)) (c3)
p3 = Dropout(0.25)(p3)
c4 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (p3)
c4 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (c4)
c4 = BatchNormalization()(c4)
p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
p4 = Dropout(0.25)(p4)
c5 = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (p4)
c5 = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (c5)
u6 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
u6 = BatchNormalization()(u6)
c6 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (u6)
c6 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (c6)
u7 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = concatenate([u7, c3])
u7 = BatchNormalization()(u7)
c7 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (u7)
c7 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (c7)
u8 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = concatenate([u8, c2])
u8 = BatchNormalization()(u8)
c8 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (u8)
c8 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (c8)
u9 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
u9 = BatchNormalization()(u9)
c9 = Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (u9)
c9 = Conv2D(32, (3, 3), activation='relu', padding='same', kernel_initializer="he_normal") (c9)
outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)
model = Model(inputs=[inputs], outputs=[outputs])
|
nilq/baby-python
|
python
|
async def get_value():
return "not-none"
<caret>if await get_value():
print("Not none")
else:
print("None")
|
nilq/baby-python
|
python
|
import sys
from bson.objectid import ObjectId, InvalidId
from girder import logger
from girder.constants import AccessType
from girder.models.model_base import AccessControlledModel
from girder.models.model_base import ValidationException
from girder.models.user import User as UserModel
from girder.utility.model_importer import ModelImporter
import cumulus
from cumulus.taskflow import load_class, TaskFlowState
from taskflow.models.taskflow import Taskflow as TaskflowModel
TASKFLOW_NON_RUNNING_STATES = [
TaskFlowState.CREATED,
TaskFlowState.COMPLETE,
TaskFlowState.ERROR,
TaskFlowState.UNEXPECTEDERROR,
TaskFlowState.TERMINATED,
TaskFlowState.DELETED
]
class QueueType(object):
FIFO = 'fifo'
LIFO = 'lifo'
TYPES = [FIFO, LIFO]
class TaskStatus(object):
PENDING = 'pending'
RUNNING = 'running'
class Queue(AccessControlledModel):
def initialize(self):
self.name = 'queues'
self.ensureIndices(['name'])
self.mutable_props = ['maxRunning']
def validate(self, queue):
name = queue['name']
userId = queue['userId']
# Do we already have this name?
if queue.get('_id') is None:
if len(list(self.find(name=name, owner=userId, force=True))) > 0:
raise ValidationException('"%s" has already been taken.' % name, field='name')
return queue
def find(self, name=None, owner=None, offset=0, limit=None, sort=None, user=None, force=False):
query = {}
if name is not None:
query['name'] = name
if owner is not None:
if not isinstance(owner, ObjectId):
try:
owner = ObjectId(owner)
except InvalidId:
raise ValidationException('Invalid ObjectId: %s' % owner,
field='owner')
query['userId'] = owner
cursor = super(Queue, self).find(query=query, sort=sort, user=user)
if not force:
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
else:
for r in cursor:
yield r
def create(self, name, type_, max_running, user=None):
queue = {
'name': name,
'type': type_,
'nRunning': 0,
'maxRunning': max_running,
'pending': [],
'taskflows': {}
}
userId = None
if user is not None:
userId = user['_id']
queue['userId'] = userId
self.setUserAccess(queue, user=user, level=AccessType.ADMIN)
return self.save(queue)
def apply_updates(self, queue, model_updates, user):
query = {
'_id': queue['_id']
}
updates = {}
for prop in model_updates:
if prop in self.mutable_props:
updates.setdefault('$set', {})[prop] = model_updates[prop]
if updates:
super(Queue, self).update(query, updates, multi=False)
queue = self.load(queue['_id'], user=user, level=AccessType.READ)
return queue
def add(self, queue, taskflow, params, user):
query = {
'_id': queue['_id'],
'taskflows.%s' % taskflow['_id']: {
'$exists': False
}
}
payload = {
'taskflowId': taskflow['_id'],
'startParams': params
}
if queue['type'] == QueueType.FIFO:
push = {
'pending': payload
}
else:
push = {
'pending': {
'$each': [ payload ],
'$position': 0
}
}
updates = {
'$push': push,
'$set': {
'taskflows.%s' % taskflow['_id']: TaskStatus.PENDING
}
}
self.update(query, updates)
queue = self.load(queue['_id'], user=user, level=AccessType.READ)
return queue
def pop(self, queue, limit, user):
queue, popped = self._pop_many(queue, limit, user)
for task in popped:
self._start_taskflow(queue['_id'], task['taskflowId'], task['start_params'], user)
return queue
def finish(self, queue, taskflow, user):
query = {
'_id': queue['_id'],
'taskflows.%s' % taskflow['_id']: TaskStatus.RUNNING
}
updates = {
'$inc': {
'nRunning': -1
},
'$unset': {
'taskflows.%s' % taskflow['_id']: ""
}
}
self.update(query, updates)
queue = self.load(queue['_id'], user=user, level=AccessType.READ)
return queue
def _pop_one(self, queue, user):
max_running = queue['maxRunning']
if max_running == 0:
max_running = sys.maxsize
query = {
'_id': queue['_id'],
'nRunning': {
'$lt': max_running
},
'$where': 'this.pending.length > 0'
}
updates = {
'$inc': {
'nRunning': 1
},
'$pop': {
'pending': -1
}
}
# queue is the document BEFORE the updates
queue = self.collection.find_one_and_update(query, updates)
taskflow_id = None
start_params = None
if queue is None:
return queue, taskflow_id, start_params
n_running = queue['nRunning']
pending = queue['pending']
if (n_running >= max_running or len(pending) == 0):
return queue, taskflow_id, start_params
task = pending.pop(0)
taskflow_id = task['taskflowId']
start_params = task['startParams']
query = {
'_id': queue['_id']
}
updates = {
'$set': {
'taskflows.%s' % taskflow_id: TaskStatus.RUNNING
}
}
self.update(query, updates)
queue = self.load(queue['_id'], user=user, level=AccessType.READ)
return queue, taskflow_id, start_params
def _pop_many(self, queue, limit, user):
popped = []
queue_, taskflow_id, start_params = self._pop_one(queue, user)
while taskflow_id is not None and len(popped) < limit:
queue = queue_
popped.append({'taskflowId': taskflow_id, 'start_params': start_params})
queue_, taskflow_id, start_params = self._pop_one(queue, user)
return queue, popped
def _start_taskflow(self, queue_id, taskflow_id, params, user):
taskflow = {"_id": taskflow_id}
updates = {"meta": {"queueId": queue_id}}
taskflow = TaskflowModel().update_taskflow(user, taskflow, updates)
constructor = load_class(taskflow['taskFlowClass'])
token = ModelImporter.model('token').createToken(user=user, days=7)
workflow = constructor(
id=str(taskflow['_id']),
girder_token=token['_id'],
girder_api_url=cumulus.config.girder.baseUrl
)
if params is None:
params = {}
workflow.start(**params)
return workflow
def cleanup_failed_taskflows():
queues = list(Queue().find(limit=sys.maxsize, force=True))
for queue in queues:
user = UserModel().load(queue['userId'], force=True)
if user is None:
continue
for taskflow_id, status in queue['taskflows'].items():
if status == TaskStatus.RUNNING:
taskflow = TaskflowModel().load(taskflow_id, force=True)
if taskflow['status'] in TASKFLOW_NON_RUNNING_STATES:
logger.warning("Removing non-running taskflow {} from the queue {}".format(taskflow_id, queue["_id"]))
Queue().finish(queue, taskflow, user)
def on_taskflow_status_update(event):
taskflow = event.info['taskflow']
queue_id = taskflow.get('meta', {}).get('queueId')
if queue_id is None:
return
if taskflow['status'] in TASKFLOW_NON_RUNNING_STATES:
queue = Queue().load(queue_id, force=True)
user = UserModel().load(queue['userId'], force=True)
Queue().finish(queue, taskflow, user)
Queue().pop(queue, sys.maxsize, user)
|
nilq/baby-python
|
python
|
from .model import DeepUNet
|
nilq/baby-python
|
python
|
from flask import Blueprint, redirect, url_for, render_template, request, abort, Flask
from flask import current_app
from website import db
from website.main.forms import SearchForm
from website.main.utils import db_reset, build_destination, make_parks, miles_to_meters, seconds_to_minutes
from website.models import Result
from website.main import gmaps, geolocator
import json, os, requests
import time, asyncio, aiohttp
# create instance of Blueprint; 'main' is the name
main = Blueprint('main', __name__)
# home route
@main.route('/')
@main.route('/home', methods=['GET', 'POST'])
def home():
form = SearchForm()
return render_template('home.html', form=form)
# results route
@main.route('/results', methods=['GET', 'POST'])
def results():
form = SearchForm()
if form.validate_on_submit():
DISTANCE_RADIUS = miles_to_meters(form.radius.data)
city = form.location.data
location = geolocator.geocode(city)
longitude = location.longitude
latitude = location.latitude
query = ['skatepark', 'skate park']
skatepark_result = gmaps.places(
query=query[0] or query[1],
radius=DISTANCE_RADIUS,
location=f'{latitude}, {longitude}')['results']
address_list = [park['formatted_address'] for park in skatepark_result]
address_string = '|'.join(address_list)
a = time.time()
desp = gmaps.distance_matrix(origins=f'{latitude}, {longitude}',
destinations=address_string,
transit_mode='driving')
names = [park['name'] for park in skatepark_result]
ratings = [park['rating'] for park in skatepark_result]
destinations = desp['destination_addresses']
durations = [
element['duration'] for element in desp['rows'][0]['elements']
]
distances = [
element['distance'] for element in desp['rows'][0]['elements']
]
# build up photo_list to be added to Park object
photo_list = []
for park in skatepark_result:
try:
for photo in park['photos']:
reference = photo['photo_reference']
photo_url = current_app.config['GPHOTO_URL'] + 'maxheight=' + current_app.config['HEIGHT'] +'&photoreference=' + reference + '&key=' + current_app.config['API_KEY']
photo_list.append(photo_url)
except Exception as e:
print('ERROR')
async def fetch(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
pass
# create loop and then run it in another thread
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
tasks = [loop.create_task(fetch(photo_url))]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
print(city, names, destinations, ratings, durations, distances, photo_list)
dest_info = build_destination(names, destinations, ratings, distances, durations, photo_list)
parks = list(make_parks(dest_info))
# adding to park instance attributes to database
db_reset()
for park in parks:
entry = Result(city=city,
name=park.name,
address=park.destination,
rating=park.rating,
distance=park.distance,
duration=seconds_to_minutes(park.duration),
photo_url=park.photo_url)
db.session.add(entry)
db.session.commit()
print(f'speed = {time.time() - a}')
# pagination
page = request.args.get('page', 1, type=int)
origin = Result.query.with_entities(Result.city).limit(1).scalar()
print(origin)
radius = request.form.get('radius')
page_results = Result.query.paginate(page=page, per_page=2)
return render_template('results.html', form=form, results=page_results, origin=origin, radius=radius)
|
nilq/baby-python
|
python
|
import pytest
import os
import time
import projects.sample.sample as sample
from tests.backgroundTestServers import BackgroundTestServers
from rtCommon.clientInterface import ClientInterface
from tests.common import rtCloudPath
test_sampleProjectPath = os.path.join(rtCloudPath, 'projects', 'sample')
test_sampleProjectDicomPath = os.path.join(test_sampleProjectPath,
'dicomDir', '20190219.0219191_faceMatching.0219191_faceMatching')
# leaving '/tmp' as an allowed directory because the sample.py project currently uses '/tmp'
allowedDirs = ['/tmp', test_sampleProjectPath]
allowedFileTypes = ['.dcm', '.txt']
class TestSampleProject:
serversForTests = None
pingCount = 0
def setup_class(cls):
cls.serversForTests = BackgroundTestServers()
def teardown_class(cls):
cls.serversForTests.stopServers()
def test_runWithDataLocal(self):
print("\nSampleProject::test_runWithDataLocal")
TestSampleProject.serversForTests.stopServers()
TestSampleProject.serversForTests.startServers(allowedDirs=allowedDirs,
allowedFileTypes=allowedFileTypes,
dataRemote=False)
client = ClientInterface()
assert client.isDataRemote() == False
argv = ['--noVerbose']
ret = sample.main(argv)
assert ret == 0
def test_runWithDataRemote(self):
print("\nSampleProject::test_runWithDataRemote")
TestSampleProject.serversForTests.stopServers()
TestSampleProject.serversForTests.startServers(allowedDirs=allowedDirs,
allowedFileTypes=allowedFileTypes,
dataRemote=True)
client = ClientInterface()
assert client.isDataRemote() == True
argv = ['--noVerbose']
ret = sample.main(argv)
assert ret == 0
def test_runWithInitWatch(self):
print("\nSampleProject::test_runWithDataRemote")
TestSampleProject.serversForTests.stopServers()
TestSampleProject.serversForTests.startServers(allowedDirs=allowedDirs,
allowedFileTypes=allowedFileTypes,
dataRemote=True)
client = ClientInterface()
assert client.isDataRemote() == True
argv = ['--useInitWatch', '--noVerbose']
ret = sample.main(argv)
assert ret == 0
def test_runWithoutProjectInterface(self):
print("\nSampleProject::test_runWithoutProjectInterface:")
TestSampleProject.serversForTests.stopServers()
time.sleep(0.1)
argv = ['-y']
ret = sample.main(argv)
assert ret == 0
|
nilq/baby-python
|
python
|
from . import meta_selector # noqa
from .pg import PatternGenerator
from .selector import Selector
PatternGenerator('')
Selector('')
|
nilq/baby-python
|
python
|
def binc(n,m):
bc = [[0 for i in range(1000)] for j in range(1000)];
for x in range(m+1):
bc[0][x] = 1;
bc[1][0] = 1;
for i in range(1,n):
for j in range(i+1):
print("I", i, "J", j);
bc[i][j] = bc[i-1][j-1] + bc[i-1][j];
print(bc[i][j]);
return bc[n][m];
print(binc(5,2));
|
nilq/baby-python
|
python
|
# coding: utf-8
# Python libs
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import shutil
import tempfile
import time
# Salt libs
import salt.utils.files
from salt.beacons import watchdog
from salt.ext.six.moves import range
# Salt testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mixins import LoaderModuleMockMixin
def check_events(config):
total_delay = 1
delay_per_loop = 20e-3
for _ in range(int(total_delay / delay_per_loop)):
events = watchdog.beacon(config)
if events:
return events
time.sleep(delay_per_loop)
return []
def create(path, content=None):
with salt.utils.files.fopen(path, 'w') as f:
if content:
f.write(content)
os.fsync(f)
@skipIf(not watchdog.HAS_WATCHDOG, 'watchdog is not available')
class IWatchdogBeaconTestCase(TestCase, LoaderModuleMockMixin):
'''
Test case for salt.beacons.watchdog
'''
def setup_loader_modules(self):
return {watchdog: {}}
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
watchdog.close({})
shutil.rmtree(self.tmpdir, ignore_errors=True)
def assertValid(self, config):
ret = watchdog.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
def test_empty_config(self):
config = [{}]
ret = watchdog.beacon(config)
self.assertEqual(ret, [])
def test_file_create(self):
path = os.path.join(self.tmpdir, 'tmpfile')
config = [{'directories': {self.tmpdir: {'mask': ['create']}}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
create(path)
ret = check_events(config)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0]['path'], path)
self.assertEqual(ret[0]['change'], 'created')
def test_file_modified(self):
path = os.path.join(self.tmpdir, 'tmpfile')
config = [{'directories': {self.tmpdir: {'mask': ['modify']}}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
create(path, 'some content')
ret = check_events(config)
self.assertEqual(len(ret), 2)
self.assertEqual(ret[0]['path'], os.path.dirname(path))
self.assertEqual(ret[0]['change'], 'modified')
self.assertEqual(ret[1]['path'], path)
self.assertEqual(ret[1]['change'], 'modified')
def test_file_deleted(self):
path = os.path.join(self.tmpdir, 'tmpfile')
create(path)
config = [{'directories': {self.tmpdir: {'mask': ['delete']}}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
os.remove(path)
ret = check_events(config)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0]['path'], path)
self.assertEqual(ret[0]['change'], 'deleted')
def test_file_moved(self):
path = os.path.join(self.tmpdir, 'tmpfile')
create(path)
config = [{'directories': {self.tmpdir: {'mask': ['move']}}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
os.rename(path, path + '_moved')
ret = check_events(config)
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0]['path'], path)
self.assertEqual(ret[0]['change'], 'moved')
def test_file_create_in_directory(self):
config = [{'directories': {self.tmpdir: {'mask': ['create', 'modify']}}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
path = os.path.join(self.tmpdir, 'tmpfile')
create(path)
ret = check_events(config)
self.assertEqual(len(ret), 2)
self.assertEqual(ret[0]['path'], path)
self.assertEqual(ret[0]['change'], 'created')
self.assertEqual(ret[1]['path'], self.tmpdir)
self.assertEqual(ret[1]['change'], 'modified')
def test_trigger_all_possible_events(self):
path = os.path.join(self.tmpdir, 'tmpfile')
moved = path + '_moved'
config = [{'directories': {
self.tmpdir: {},
}}]
self.assertValid(config)
self.assertEqual(watchdog.beacon(config), [])
# create
create(path)
# modify
create(path, 'modified content')
# move
os.rename(path, moved)
# delete
os.remove(moved)
ret = check_events(config)
self.assertEqual(len(ret), 8)
# create
self.assertEqual(ret[0]['path'], path)
self.assertEqual(ret[0]['change'], 'created')
self.assertEqual(ret[1]['path'], self.tmpdir)
self.assertEqual(ret[1]['change'], 'modified')
# modify
self.assertEqual(ret[2]['path'], path)
self.assertEqual(ret[2]['change'], 'modified')
self.assertEqual(ret[3]['path'], path)
self.assertEqual(ret[3]['change'], 'modified')
# move
self.assertEqual(ret[4]['path'], path)
self.assertEqual(ret[4]['change'], 'moved')
self.assertEqual(ret[5]['path'], self.tmpdir)
self.assertEqual(ret[5]['change'], 'modified')
# delete
self.assertEqual(ret[6]['path'], moved)
self.assertEqual(ret[6]['change'], 'deleted')
self.assertEqual(ret[7]['path'], self.tmpdir)
self.assertEqual(ret[7]['change'], 'modified')
|
nilq/baby-python
|
python
|
# PART 1
def draw_stars(x):
for count in range(0, len(x)):
print '*' * x[count]
x = [1, 2, 4, 8, 16, 32]
draw_stars(x)
# PART 2
def draw_star(x):
for count in range(0, len(x)):
if(isinstance(x[count], str)):
print x[count].lower()[:1] * len(x[count])
else:
print '*' * x[count]
|
nilq/baby-python
|
python
|
from influence_module.interface import IInfluencer
from music_module.interface import IMusic
from graphics_module.interface import IVisuals
from parse_module.interface import parse_config
from timeit import default_timer as timer
import numpy as np
configs = None
i_visuals = None
i_influencer = None
i_music = None
def print_time_since(start, what_txt):
end = timer()
elapsed = round((end - start) * 1000,3)
print("%s took: %s ms\n" % (what_txt,elapsed))
def parse(*arg):
global configs
start = timer()
configs = parse_config(arg[0]) #this needs to handle plural and directory
np.random.seed(configs.program.random_seed)
print_time_since(start,"parsing")
def _get_influencer_config(config):
if config.program.influencer_type == "random": return config.random_influencer
if config.program.influencer_type == "network": return config.network_influencer
def build(): #build everything
print("started building")
global configs, i_influencer, i_visuals,i_music
start = timer()
i_influencer = IInfluencer(_get_influencer_config(configs))
i_influencer.build(configs.program.init_particle_amount)
i_visuals = IVisuals(i_influencer.influence_visual_object, i_influencer.influencer_description, configs.graphics,configs.program)
i_visuals.build()
i_music = IMusic(configs.music)
i_music.build()
print_time_since(start,"modules build")
def run(): #run the whole thing
print("running program")
i_visuals.run()
if __name__ == "__main__": #TODO take in config file as command arg
program_start = timer()
config_file = 'C:\\Users\\sindr\\Source\\Repos\\The-Playground\\Python\\Machine Learning\\LSTM Music Visualizer\\LSTM Music Visualizer\\config.json'
parse(config_file)
build()
run()
print_time_since(start,"program run")
|
nilq/baby-python
|
python
|
"""
This file stores a subclass of DistanceSolver, UPGMA. The inference procedure is
a hierarchical clustering algorithm proposed by Sokal and Michener (1958) that
iteratively joins together samples with the minimum dissimilarity.
"""
from typing import Callable, Dict, List, Optional, Tuple, Union
import abc
from collections import defaultdict
import networkx as nx
import numba
import numpy as np
import pandas as pd
from cassiopeia.data import CassiopeiaTree
from cassiopeia.solver import DistanceSolver, dissimilarity_functions
class UPGMASolver(DistanceSolver.DistanceSolver):
"""
UPGMA CassiopeiaSolver.
Implements the UPGMA algorithm described as a derived class of
DistanceSolver. This class inherits the generic `solve` method, but
implements its own procedure for finding cherries by minimizing the
dissimilarity between samples. After joining nodes, the dissimilarities
are updated by averaging the distances of elements in the new cluster
with each existing node. Produces a rooted tree that is assumed to be
ultrametric.
Args:
dissimilarity_function: A function by which to compute the dissimilarity
map. Optional if a dissimilarity map is already provided.
prior_transformation: Function to use when transforming priors into
weights. Supports the following transformations:
"negative_log": Transforms each probability by the negative
log (default)
"inverse": Transforms each probability p by taking 1/p
"square_root_inverse": Transforms each probability by the
the square root of 1/p
Attributes:
dissimilarity_function: Function used to compute dissimilarity between
samples.
add_root: Whether or not to add an implicit root the tree.
prior_transformation: Function to use when transforming priors into
weights.
"""
def __init__(
self,
dissimilarity_function: Optional[
Callable[
[np.array, np.array, int, Dict[int, Dict[int, float]]], float
]
] = dissimilarity_functions.weighted_hamming_distance,
prior_transformation: str = "negative_log",
):
super().__init__(
dissimilarity_function=dissimilarity_function,
add_root=True,
prior_transformation=prior_transformation,
)
self.__cluster_to_cluster_size = defaultdict(int)
def root_tree(
self, tree: nx.Graph, root_sample: str, remaining_samples: List[str]
):
"""Roots a tree produced by UPGMA.
Adds the root at the top of the UPGMA reconstructed tree. By the
ultrametric assumption, the root is placed as the parent to the last
two unjoined nodes.
Args:
tree: Networkx object representing the tree topology
root_sample: Ignored in this case, the root is known in this case
remaining_samples: The last two unjoined nodes in the tree
Returns:
A rooted tree.
"""
tree.add_node("root")
tree.add_edges_from(
[("root", remaining_samples[0]), ("root", remaining_samples[1])]
)
rooted_tree = nx.DiGraph()
for e in nx.dfs_edges(tree, source="root"):
rooted_tree.add_edge(e[0], e[1])
return rooted_tree
def find_cherry(self, dissimilarity_matrix: np.array) -> Tuple[int, int]:
"""Finds a pair of samples to join into a cherry.
Finds the pair of samples with the minimum dissimilarity by finding the
minimum value in the provided dissimilarity matrix
Args:
dissimilarity_matrix: A sample x sample dissimilarity matrix
Returns:
A tuple of integers representing rows in the dissimilarity matrix
to join.
"""
dissimilarity_matrix = dissimilarity_matrix.astype(float)
np.fill_diagonal(dissimilarity_matrix, np.inf)
return np.unravel_index(
np.argmin(dissimilarity_matrix, axis=None),
dissimilarity_matrix.shape,
)
def update_dissimilarity_map(
self,
dissimilarity_map: pd.DataFrame,
cherry: Tuple[str, str],
new_node: str,
) -> pd.DataFrame:
"""Update dissimilarity map after finding a cherry.
Updates the dissimilarity map after joining together two nodes (m1, m2)
at a cherry m. For all other nodes v, the new dissimilarity map d' is:
d'(m, v) = (<m1> * d(m1, v) + <m2> * d(m2, v))/(<m1> + <m2>)
where <m1> is the size of cluster m1, i.e. the number of sample leaves
under node m1.
Args:
dissimilarity_map: A dissimilarity map to update
cherry: A tuple of indices in the dissimilarity map that are joining
new_node: New node name, to be added to the new dissimilarity map
Returns:
A new dissimilarity map, updated with the new node
"""
i_size, j_size = (
max(1, self.__cluster_to_cluster_size[cherry[0]]),
max(1, self.__cluster_to_cluster_size[cherry[1]]),
)
self.__cluster_to_cluster_size[new_node] = i_size + j_size
i, j = (
np.where(dissimilarity_map.index == cherry[0])[0][0],
np.where(dissimilarity_map.index == cherry[1])[0][0],
)
dissimilarity_array = self.__update_dissimilarity_map_numba(
dissimilarity_map.to_numpy(), i, j, i_size, j_size
)
sample_names = list(dissimilarity_map.index) + [new_node]
dissimilarity_map = pd.DataFrame(
dissimilarity_array, index=sample_names, columns=sample_names
)
# drop out cherry from dissimilarity map
dissimilarity_map.drop(
columns=[cherry[0], cherry[1]],
index=[cherry[0], cherry[1]],
inplace=True,
)
return dissimilarity_map
@staticmethod
@numba.jit(nopython=True)
def __update_dissimilarity_map_numba(
dissimilarity_map: np.array,
cherry_i: int,
cherry_j: int,
size_i: int,
size_j: int,
) -> np.array:
"""A private, optimized function for updating dissimilarities.
A faster implementation of updating the dissimilarity map for UPGMA,
invoked by `self.update_dissimilarity_map`.
Args:
dissimilarity_map: A matrix of dissimilarities to update
cherry_i: Index of the first item in the cherry
cherry_j: Index of the second item in the cherry
Returns:
An updated dissimilarity map
"""
# add new row & column for incoming sample
N = dissimilarity_map.shape[1]
new_row = np.array([0.0] * N)
updated_map = np.vstack((dissimilarity_map, np.atleast_2d(new_row)))
new_col = np.array([0.0] * (N + 1))
updated_map = np.hstack((updated_map, np.atleast_2d(new_col).T))
new_node_index = updated_map.shape[0] - 1
for v in range(dissimilarity_map.shape[0]):
if v == cherry_i or v == cherry_j:
continue
updated_map[v, new_node_index] = updated_map[new_node_index, v] = (
size_i * dissimilarity_map[v, cherry_i]
+ size_j * dissimilarity_map[v, cherry_j]
) / (size_i + size_j)
updated_map[new_node_index, new_node_index] = 0
return updated_map
def setup_root_finder(self, cassiopeia_tree: CassiopeiaTree) -> None:
"""Defines the implicit rooting strategy for the UPGMASolver.
By default, the UPGMA algorithm returns an rooted tree. Therefore,
the implicit root will be placed and specified at the end of the
solving procedure as the parent of the last two unjoined nodes.
Args:
cassiopeia_tree: Input CassiopeiaTree to `solve`
"""
cassiopeia_tree.root_sample_name = "root"
|
nilq/baby-python
|
python
|
from pathlib import Path
from unittest import mock
from credsweeper.file_handler.patch_provider import PatchProvider
class TestPatchProvider:
def test_load_patch_data_p(self) -> None:
"""Evaluate base load diff file"""
dir_path = Path(__file__).resolve().parent.parent
file_path = dir_path / "samples" / "password.patch"
patch_provider = PatchProvider([file_path], "added")
raw_patches = patch_provider.load_patch_data()
expected = [[
'diff --git a/.changes/1.16.98.json b/.changes/1.16.98.json', #
'new file mode 100644', #
'index 00000000..7ebf3947', #
'--- /dev/null', #
'+++ b/.changes/1.16.98.json', #
'@@ -0,0 +1,4 @@', #
'+{', #
'+ "category": "``cloudformation``",', #
'+ "password": "dkajco1"', #
'+}', #
'', #
'' #
]]
assert raw_patches == expected
@mock.patch("logging.info")
def test_load_patch_data_utf16_n(self, mock_logging_info: mock) -> None:
"""Evaluate load diff file with UTF-16 encoding"""
dir_path = Path(__file__).resolve().parent.parent
file_path = dir_path / "samples" / "password_utf16.patch"
patch_provider = PatchProvider([file_path], "added")
raw_patches = patch_provider.load_patch_data()
expected = [[
'diff --git a/.changes/1.16.98.json b/.changes/1.16.98.json', #
'new file mode 100644', #
'index 00000000..7ebf3947', #
'--- /dev/null', #
'+++ b/.changes/1.16.98.json', #
'@@ -0,0 +1,4 @@', #
'+{', #
'+ "category": "``cloudformation``",', #
'+ "password": "dkajco1"', #
'+}', #
'', #
'' #
]]
warning_message = f"UnicodeError: Can't read content from \"{file_path}\" as utf8."
mock_logging_info.assert_called_once_with(warning_message)
assert raw_patches == expected
@mock.patch("logging.info")
def test_load_patch_data_western_n(self, mock_logging_info: mock) -> None:
"""Evaluate load diff file with Western encoding"""
dir_path = Path(__file__).resolve().parent.parent
file_path = dir_path / "samples" / "password_western.patch"
patch_provider = PatchProvider([file_path], "added")
raw_patches = patch_provider.load_patch_data()
expected = [[
'diff --git a/.changes/1.16.98.json b/.changes/1.16.98.json', #
'new file mode 100644', #
'index 00000000..7ebf3947', #
'--- /dev/null', #
'+++ b/.changes/1.16.98.json', #
'@@ -0,0 +1,4 @@', #
'+{', #
'+ "category": "``cloudformation``",', #
'+ "password": "dkajcö1"', #
'+}', #
'', #
'' #
]]
warning_message = f"UnicodeError: Can't read content from \"{file_path}\" as utf16."
mock_logging_info.assert_called_with(warning_message)
assert raw_patches == expected
@mock.patch("logging.info")
def test_load_patch_data_n(self, mock_logging_info: mock) -> None:
"""Evaluate warning occurrence while load diff file with ISO-IR-111 encoding"""
dir_path = Path(__file__).resolve().parent.parent
file_path = dir_path / "samples" / "iso_ir_111.patch"
patch_provider = PatchProvider([file_path], "added")
raw_patches = patch_provider.load_patch_data()
expected = [[
'ëÉÒÉÌÌÉÃÁ', #
'diff --git a/.changes/1.16.98.json b/.changes/1.16.98.json', #
'new file mode 100644', #
'index 00000000..7ebf3947', #
'--- /dev/null', #
'+++ b/.changes/1.16.98.json', #
'@@ -0,0 +1,4 @@', #
'+{', #
'+ "category": "``cloudformation``",', #
'+ "password": "dkajco1"', #
'+}', #
'', #
'' #
]]
warning_message = f"UnicodeError: Can't read content from \"{file_path}\" as utf16."
mock_logging_info.assert_called_with(warning_message)
assert raw_patches == expected
|
nilq/baby-python
|
python
|
import md5
i = 0
while 1:
key = 'ckczppom' + str(i)
md = md5.new(key).hexdigest()
if md[:5] == '00000':
break
i+=1
print i
|
nilq/baby-python
|
python
|
from django.contrib import admin
# Register your models here.
from .models import Item
class ItemAdmin(admin.ModelAdmin):
list_display = ['item_id', 'price', 'type', 'seller',
'customer_id', 'quantity_per_item', 'total_price' ]
admin.site.register(Item, ItemAdmin)
|
nilq/baby-python
|
python
|
import sys
from base64 import b64encode
from nacl import encoding, public
"""
This script is used to encrypt the github secrets for the
Debricked login, since the bindings for golang suck.
"""
def encrypt(public_key: str, secret_value: str) -> str:
"""Encrypt a Unicode string using the public key."""
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
if __name__ == "__main__":
print(encrypt(sys.argv[1], sys.argv[2]))
|
nilq/baby-python
|
python
|
from qgis.core import *
import psycopg2
QgsApplication.setPrefixPath("/usr", True)
qgs = QgsApplication([], False)
qgs.initQgis()
uri = QgsDataSourceURI()
uri.setConnection("192.168.50.8", "5432", "pub", "ddluser", "ddluser")
try:
conn = psycopg2.connect("dbname='soconfig' user='ddluser' host='192.168.50.8' password='ddluser'")
except:
print "I am unable to connect to the soconfig database"
cur = conn.cursor()
stmt = """
SELECT
data_source.connection_type,
data_set.data_set_name,
split_part(data_set.data_set_name,'.', 1) AS db_schema,
split_part(data_set.data_set_name,'.', 2) AS db_table,
data_set_view."name",
data_set_view.geometry_column,
CASE
WHEN split_part(data_set_view."name",'.', 2) = 'swisstopo' THEN split_part(data_set_view."name",'.', 1) || '.' || split_part(data_set_view."name",'.', 2)
ELSE split_part(data_set_view."name",'.', 1) || '.' || split_part(data_set_view."name",'.', 2) || '.' || split_part(data_set_view."name",'.', 3)
END AS prefix,
CASE
WHEN split_part(data_set_view."name",'.', 2) = 'swisstopo' THEN 'http://' || split_part(data_set_view."name",'.', 2) || '.' || split_part(data_set_view."name",'.', 1)
ELSE 'http://' || split_part(data_set_view."name",'.', 3) || '.' || split_part(data_set_view."name",'.', 2) || '.' || split_part(data_set_view."name",'.', 1)
END AS uri,
data_set_view.description,
ows_layer.title
FROM
gdi_knoten.data_set AS data_set
LEFT JOIN gdi_knoten.data_set_view AS data_set_view
ON data_set.gdi_oid = data_set_view.gdi_oid_data_set
LEFT JOIN gdi_knoten.ows_layer_data AS ows_layer_data
ON data_set_view.gdi_oid = ows_layer_data.gdi_oid_data_set_view
LEFT JOIN gdi_knoten.ows_layer AS ows_layer
ON ows_layer_data.gdi_oid = ows_layer.gdi_oid
LEFT JOIN gdi_knoten.data_source AS data_source
ON data_source.gdi_oid = data_set.gdi_oid_data_source
WHERE
data_source.connection_type = 'database'
AND
data_set_view."name" IS NOT NULL
;
"""
cur.execute(stmt)
rows = cur.fetchall()
for row in rows:
# print data_set_view name
print row[4]
db_schema = row[2]
db_table = row[3]
name = row[4]
geometry_column = row[5]
try:
conn_pub = psycopg2.connect("dbname='pub' user='ddluser' host='192.168.50.8' password='ddluser'")
except:
print "I am unable to connect to the pub database"
cur_pub = conn_pub.cursor()
# only one geometry column
if geometry_column is None:
stmt_pub = "SELECT f_geometry_column FROM public.geometry_columns WHERE f_table_schema = '" + db_schema + "' AND f_table_name = '"+db_table+"' LIMIT 1 ;"
else:
stmt_pub = "SELECT f_geometry_column FROM public.geometry_columns WHERE f_table_schema = '" + db_schema + "' AND f_table_name = '"+db_table+"' AND f_geometry_column = '"+geometry_column+"' LIMIT 1 ;"
cur_pub.execute(stmt_pub)
# soconfig != pub
result = cur_pub.fetchone()
if result is None:
print "layer not found in pub database"
#print stmt_pub
continue
geometry_column = result[0]
print geometry_column
uri.setDataSource(db_schema, db_table, geometry_column)
vlayer = QgsVectorLayer(uri.uri(False), name, "postgres")
if not vlayer:
print "Layer failed to load!"
#QgsMapLayerRegistry.instance().addMapLayer(vlayer)
#for field in vlayer.pendingFields():
# print field.name(), field.typeName()
retStr = vlayer.loadNamedStyle(name+".qml")
print retStr
#vlayer.saveNamedStyle(name+"-fubar.qml")
vlayer.saveSldStyle(name+".sld")
# with open(name+".qml", 'r') as myfile:
# data = myfile.read()
# print data
# with open(name+".sld", 'r') as myfile:
# data = myfile.read()
# print data
#print QgsMapLayerRegistry.instance().mapLayers()
qgs.exitQgis()
|
nilq/baby-python
|
python
|
from django.conf.urls import include, url
from olympia.reviews.feeds import ReviewsRss
from . import views
# These all start with /addon/:id/reviews/:review_id/.
review_detail_patterns = [
url('^$', views.review_list, name='addons.reviews.detail'),
url('^reply$', views.reply, name='addons.reviews.reply'),
url('^flag$', views.flag, name='addons.reviews.flag'),
url('^delete$', views.delete, name='addons.reviews.delete'),
url('^edit$', views.edit, name='addons.reviews.edit'),
]
urlpatterns = [
url('^$', views.review_list, name='addons.reviews.list'),
url('^add$', views.add, name='addons.reviews.add'),
url('^(?P<review_id>\d+)/', include(review_detail_patterns)),
url('^format:rss$', ReviewsRss(), name='addons.reviews.list.rss'),
url('^user:(?P<user_id>\d+)$', views.review_list,
name='addons.reviews.user'),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import json
import urllib.request
import mirrorz
import config
def fetch_json(url):
print(f"fetching {url}")
response = urllib.request.urlopen(url)
data = response.read()
return json.loads(data)
def main():
for name, cfg in config.sites.items():
values=[]
for fn in ("site", "tunasync", "info", "options", "cname", "disk"):
if fn not in cfg or cfg[fn] == "":
values.append({})
else:
values.append(fetch_json(cfg[fn]))
result = mirrorz.generate(*values)
with open(name+".json", "w") as f:
f.write(json.dumps(result))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from pprint import pprint # noqa
from datetime import datetime
from normality import stringify
REMOVE = [
"Shape.STArea()",
"Shape.STLength()",
"Shape.len",
"SHAPE.len",
"SHAPE.fid" "FullShapeGeometryWKT",
"Shape__Length",
]
RENAME = {
"SDELiberiaProd.DBO.MLMELicenses_20160119.Area": "Area",
"Shape.area": "Area",
"SHAPE.area": "Area",
"Shape__Area": "Area",
"CODE": "Code",
"NAME": "Name",
"STATUS": "Status",
}
def convert_data(data):
# this converts all values in the attribute data to a
# form suitable for the database storage.
row = {}
for name, val in data.items():
name = RENAME.get(name, name)
uname = name.upper()
if val is not None and isinstance(val, int):
if (
uname.startswith("DTE")
or uname.endswith("_DAT")
or uname.endswith("_DATE")
or uname.endswith("_D")
or uname == "COMPLETED"
):
dt = datetime.fromtimestamp(int(val) / 1000)
val = dt.date().isoformat()
if uname.startswith("GUID"):
continue
if name in REMOVE:
continue
if uname == "AREA":
if isinstance(val, str):
val = val.split(" ")[0]
val = min(int(val), (2 ** 31) - 1)
val = stringify(val)
if val is not None:
row[name] = val
return row
|
nilq/baby-python
|
python
|
from jsonrpc11base.errors import APIError
from src import exceptions
class UnknownTypeError(APIError):
code = 1000
message = 'Unknown type'
def __init__(self, message):
self.error = {
'message': message
}
class AuthorizationError(APIError):
code = 2000
message = 'Auth error'
def __init__(self, message):
self.error = {
'message': message
}
class UnknownIndexError(APIError):
code = 3000
message = 'Unknown index'
def __init__(self, message):
self.error = {
'message': message
}
class ElasticsearchServerError(APIError):
code = 4000
message = 'Elasticsearch server error'
def __init__(self, message):
self.error = {
'message': message
}
# def __init__(self, url, resp_text):
# msg = f"User profile service error:\nResponse: {resp_text}\nURL: {url}"
# super().__init__(code=-32004, message=msg)
class UserProfileServiceError(APIError):
code = 50000
message = 'User profile service error'
def __init__(self, url, resp_text):
self.error = {
'url': url,
'resp_text': resp_text
}
def __str__(self):
return f"{self.message}\nResponse: {self.error['resp_text']}\nURL: {self.error['url']}"
def trap_error(fun):
try:
return fun()
except exceptions.UnknownType as ut:
raise UnknownTypeError(ut.message)
except exceptions.AuthError as ae:
raise AuthorizationError(ae.message)
except exceptions.ElasticsearchError as ee:
raise ElasticsearchServerError(ee.message)
except exceptions.UnknownIndex as ue:
raise UnknownIndexError(ue.message)
except exceptions.UserProfileError as upe:
raise UserProfileServiceError(upe.url, upe.resp_text)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import Tag, Category, Article, About
# Register your models here.
admin.site.register(Tag)
admin.site.register(Category)
admin.site.register(About)
@admin.register(Article)
class PostAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
|
nilq/baby-python
|
python
|
# import the necessary package
from functools import wraps
from flask import request
from PIL import Image
from io import BytesIO
from app.main import config
import numpy as np
import base64
import cv2
import os
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
data, status = Auth.get_logged_in_user(request)
token = data.get('data')
if not token:
return data, status
return f(*args, **kwargs)
return decorated
def admin_token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
data, status = Auth.get_logged_in_user(request)
token = data.get('data')
if not token:
return data, status
admin = token.get('admin')
if not admin:
response_object ={
'status': 'fail',
'message': 'admin token required'
}
return response_object, 401
return f(*args, **kwargs)
return decorated
def face_dir(data):
""" make sure if the training data directory exists"""
train_data = os.path.join(config.BASE_DATA_DIR)
if not os.path.isdir(train_data):
# create training data dir for system user
os.mkdir(train_data)
if not os.path.isdir(os.path.join(train_data, data['label'])):
os.mkdir(os.path.join(train_data, data['label']))
os.mkdir(os.path.sep.join([train_data, data['label'], 'context']))
# save user's face images in specified directory
"""split face in decode before saving"""
faces = data['face'].split('-----')
face_context = data['face_context']
faces = [ face for face in faces if len(face) > 0]
saved = 0
for i in range(len(faces)):
img_string = faces[i].split(',')[1]
img_decode = base64.b64decode(img_string)
image_data = Image.open(BytesIO(img_decode))
image = image_data.resize((160, 160))
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.sep.join([train_data, data['label'],
str(i) + '.jpg']), image,
[int(cv2.IMWRITE_JPEG_QUALITY), 90])
saved += 1
context = open(os.path.sep.join([train_data, data['label'], 'context', 'face_context' + '.txt']), 'w')
context.write(face_context)
context.close()
# return nbr saved faces
return saved
|
nilq/baby-python
|
python
|
import time
from typing import Union
import pyglet
from kge.core import events
from kge.core.constants import DEFAULT_FPS
from kge.core.system import System
class Updater(System):
def __init__(self, engine=None, time_step=1 / (DEFAULT_FPS), **kwargs):
super().__init__(engine, **kwargs)
self.event_to_dispatch = events.Update
self.after_event = events.LateUpdate
self.time_step = time_step
# Mean between updates
self.n_updates = 0
self.sum = 0
def __enter__(self):
if type(self) != Updater:
pyglet.clock.schedule_interval_soft(
self.update, self.time_step)
else:
pyglet.clock.schedule(self.update)
def update(self, dt):
self.engine.append_job(
self.update_entities, dt
)
def update_entities(self, time_delta: float):
start = time.monotonic()
dispatch = self._dispatch
scene = self.engine.current_scene
if self.engine.running and scene.rendered:
# Calculate the mean
self.n_updates += 1
self.sum += time_delta
mean = self.sum / self.n_updates
if type(self) != Updater:
self.engine.fixed_dt = mean
else:
self.engine.update_dt = mean
event = self.event_to_dispatch.__call__(time_delta, scene) # type: Union[events.Update, events.FixedUpdate]
# Dispatch to behaviours
self._dispatch(event)
# Get registered entities for event
entities = event.scene.registered_entities(event)
for e in entities:
if self.engine.running:
e.__fire_event__(event, dispatch)
else:
break
# then dispatch late update
if self.engine.running:
if isinstance(event, events.Update):
dt = event.delta_time
else:
dt = event.fixed_delta_time
# add the time elapsed in the loop
dt += time.monotonic() - start
self._dispatch(self.after_event.__call__(
delta_time=dt, scene=event.scene))
|
nilq/baby-python
|
python
|
NUMBERS = [
".",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]
OPERATIONS = [
"+",
"-",
"=",
"×",
"÷",
]
FUNCTIONS = [
"%",
"(",
")",
"⁺⁄₋",
"¹⁄ₓ",
"10ˣ",
"2ⁿᵈ",
"²√x",
"³√x",
"AC",
"cos",
"cosh",
"e",
"EE",
"eˣ",
"ln",
"log₁₀",
"m+",
"m-",
"mc",
"mr",
"Rad",
"Rand",
"sin",
"sinh",
"tan",
"tanh",
"x!",
"x²",
"x³",
"xʸ",
"ʸ√x",
"π",
]
OTHERS = [
"Result",
]
ALL = [
*OTHERS,
*NUMBERS,
*OPERATIONS,
*FUNCTIONS,
]
GROUPS = [
"All",
"Numbers",
"Operations",
"Functions",
]
DEFAULT = [
{
"id": "all",
"styles": "align-items: center;\nborder: 2px solid #424344;\ncolor: #f5f5f5;\ndisplay: flex;\nfont-size: 24px;\nfont-weight: 500;\njustify-content: center;\ntext-align: center;\n"
},
{
"id": ".",
"position": {
"x": 1880,
"y": 1417
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "0",
"position": {
"x": 1410,
"y": 1417
},
"size": {
"width": 478,
"height": 221
},
"styles": ""
},
{
"id": "1",
"position": {
"x": 1410,
"y": 1202
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "2",
"position": {
"x": 1645,
"y": 1202
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "3",
"position": {
"x": 1880,
"y": 1202
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "4",
"position": {
"x": 1410,
"y": 987
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "5",
"position": {
"x": 1645,
"y": 987
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "6",
"position": {
"x": 1880,
"y": 987
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "7",
"position": {
"x": 1410,
"y": 772
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "8",
"position": {
"x": 1645,
"y": 772
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "9",
"position": {
"x": 1880,
"y": 772
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "+",
"position": {
"x": 2115,
"y": 1202
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "-",
"position": {
"x": 2115,
"y": 987
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "=",
"position": {
"x": 2115,
"y": 1417
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "\u00d7",
"position": {
"x": 2115,
"y": 772
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "\u00f7",
"position": {
"x": 2115,
"y": 557
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "%",
"position": {
"x": 1880,
"y": 557
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "(",
"position": {
"x": 0,
"y": 557
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": ")",
"position": {
"x": 235,
"y": 557
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "\u207a\u2044\u208b",
"position": {
"x": 1645,
"y": 557
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "\u00b9\u2044\u2093",
"position": {
"x": 0,
"y": 987
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "10\u02e3",
"position": {
"x": 1175,
"y": 772
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "2\u207f\u1d48",
"position": {
"x": 0,
"y": 772
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "\u00b2\u221ax",
"position": {
"x": 235,
"y": 987
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "\u00b3\u221ax",
"position": {
"x": 470,
"y": 987
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "ac",
"position": {
"x": 1410,
"y": 557
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "cos",
"position": {
"x": 470,
"y": 1202
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "cosh",
"position": {
"x": 470,
"y": 1417
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "e",
"position": {
"x": 940,
"y": 1202
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "ee",
"position": {
"x": 1175,
"y": 1202
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "e\u02e3",
"position": {
"x": 940,
"y": 772
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "ln",
"position": {
"x": 940,
"y": 987
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "log\u2081\u2080",
"position": {
"x": 1175,
"y": 987
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "m+",
"position": {
"x": 705,
"y": 557
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "m-",
"position": {
"x": 940,
"y": 557
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "mc",
"position": {
"x": 470,
"y": 557
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "mr",
"position": {
"x": 1175,
"y": 557
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "rad",
"position": {
"x": 0,
"y": 1417
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "rand",
"position": {
"x": 1175,
"y": 1417
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "sin",
"position": {
"x": 235,
"y": 1202
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "sinh",
"position": {
"x": 235,
"y": 1417
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "tan",
"position": {
"x": 705,
"y": 1202
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "tanh",
"position": {
"x": 705,
"y": 1417
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "x!",
"position": {
"x": 0,
"y": 1202
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "x\u00b2",
"position": {
"x": 235,
"y": 772
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "x\u00b3",
"position": {
"x": 470,
"y": 772
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "x\u02b8",
"position": {
"x": 705,
"y": 772
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "\u02b8\u221ax",
"position": {
"x": 705,
"y": 987
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "\u03c0",
"position": {
"x": 940,
"y": 1417
},
"size": {
"width": 241,
"height": 221
},
"styles": ""
},
{
"id": "result",
"position": {
"x": 0,
"y": 0
},
"size": {
"width": 2358,
"height": 561
},
"styles": "align-items: flex-end;\nbackground-color: #424344;\nfont-size: 48px;\nfont-weight: 300;\njustify-content: flex-end;\npadding-right: 0.4em;\npadding-bottom: 0.2em;"
},
{
"id": "functions",
"styles": "background-color: #555556;"
},
{
"id": "operations",
"styles": "background-color: #ff9e0b;\nfont-size: 32px;"
},
{
"id": "numbers",
"styles": "background-color: #717172;"
}
]
|
nilq/baby-python
|
python
|
import json
from collections import OrderedDict
from raven_preprocess.np_json_encoder import NumpyJSONEncoder
from ravens_metadata_apps.utils.basic_response import \
(ok_resp, err_resp)
def json_dump(data_dict, indent=None):
"""Dump JSON to a string w/o indents"""
if indent is not None and \
not isinstance(indent, int):
# quick sanity check
return err_resp('indent must be None or an integer')
try:
# dump it to a string
jstring = json.dumps(data_dict,
indent=indent,
cls=NumpyJSONEncoder)
return ok_resp(jstring)
except TypeError as err_obj:
# uh oh
user_msg = ('Failed to convert to JSON: %s'
' (json_util)\n\n%s') % \
(err_obj, str(data_dict)[:200])
return err_resp(user_msg)
def json_loads(json_str):
"""wrapper for json.loads that outputs an OrderedDict"""
try:
json_dict = json.loads(json_str,
object_pairs_hook=OrderedDict)
except json.decoder.JSONDecodeError as err_obj:
err_msg = 'Failed to convert string to JSON: %s' % (err_obj)
return err_resp(err_msg)
except TypeError as err_obj:
err_msg = 'Failed to convert string to JSON: %s' % (err_obj)
return err_resp(err_msg)
return ok_resp(json_dict)
def remove_nan_from_dict(info_dict):
"""For dict (or OrderedDict) objects, that contain np.Nan,
change np.Nan to None
reference: https://stackoverflow.com/questions/35297868/how-could-i-fix-the-unquoted-nan-value-in-json-using-python
"""
if not isinstance(info_dict, dict):
user_msg = ('"info_dict" must be a dict object'
' (which includes OrderedDict)')
return err_resp(user_msg)
# 1 - Dump the info_dict to a string
#
json_info = json_dump(info_dict)
if not json_info.success:
return err_resp(json_info.err_msg)
# 2- Within the string, replace 'NaN' with 'null'
#
json_str = json_info.result_obj.replace('NaN', 'null')
# 3 - Load the string back to a dict and return it
#
formatted_json_data = json.loads(json_str,
object_pairs_hook=OrderedDict)
return ok_resp(formatted_json_data)
|
nilq/baby-python
|
python
|
__author__ = 'Govind Patidar'
class Locator(object):
# open page locator All ID
logo = "//img[@alt='Mercury Tours']"
btn_skip = "com.flipkart.android:id/btn_skip"
banner_text = "com.flipkart.android:id/banner_text"
mobile_no = "com.flipkart.android:id/mobileNo"
btn_msignup = "com.flipkart.android:id/btn_msignup"
btn_mlogin = "com.flipkart.android:id/btn_mlogin"
# home page locator
menu = "//android.widget.ImageButton[@content-desc='Drawer']"
bell = "com.flipkart.android:id/in_app_notification_bell"
cart_icon = "com.flipkart.android:id/cart_bg_icon"
search = "com.flipkart.android:id/search_widget_textbox"
# # Registration page locator
# regis_txt = "//*[contains(text(),'basic information')]"
# firstName = "//input[@name='firstName']"
# lastName = "//input[@name='lastName']"
# phone = "//input[@name='phone']"
# email = "//input[@name='userName']"
# country = "//select[@name='country']"
# userName = "//input[@name='email']"
# password = "//input[@name='password']"
# confirmPassword = "//input[@name='confirmPassword']"
# submit = "//input[@name='register']"
#
# # Post Registration locator
# thank_you = "//*[contains(text(),'Thank you for registering')]"
# post_user = "//*[contains(text(),'Your user name is')]"
#
# # sign on page locator
# signOn_userName = "//input[@name='userName']"
# signOn_password = "//input[@name='password']"
# signOn_login = "//input[@name='login']"
# signOn_txt = "//*[contains(text(),'Enter your user')]"
# signOn_registerLink = "//a[@href='mercuryregister.php']"
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime, date, timedelta
from functools import reduce
from django.db.models import Count
from rest_framework import serializers
from common.consts import CFEI_TYPES, PARTNER_TYPES
from common.mixins.views import PartnerIdsMixin
from common.models import Sector
from partner.models import Partner
from agency.models import Agency
from project.models import EOI, Application, Pin
class AgencyDashboardSerializer(serializers.ModelSerializer):
DAYS_AGO = 15
new_partners_last_15_count = serializers.SerializerMethodField()
new_partners_last_15_by_day_count = serializers.SerializerMethodField()
new_cfei_last_15_by_day_count = serializers.SerializerMethodField()
num_cn_to_score = serializers.SerializerMethodField()
partner_breakdown = serializers.SerializerMethodField()
def _get_days_ago_date(self):
date_N_days_ago = datetime.now() - timedelta(days=self.DAYS_AGO)
return date_N_days_ago
def get_partners_since_days_ago(self):
return Partner.objects.filter(created__gte=self._get_days_ago_date()).exclude(hq__isnull=False)
def get_new_partners_last_15_count(self, obj):
return self.get_partners_since_days_ago().count()
def get_new_partners_last_15_by_day_count(self, obj):
all_dates = self.get_partners_since_days_ago().dates('created', 'day')
dates_dict = {}
for _date in all_dates:
dates_dict[str(_date)] = self.get_partners_since_days_ago().filter(created__contains=_date).count()
return dates_dict
def get_new_cfei_last_15_by_day_count(self, obj):
return EOI.objects.filter(created__gte=self._get_days_ago_date(),
display_type=CFEI_TYPES.open).count()
def get_num_cn_to_score(self, obj):
user = self.context['request'].user
open_eois_as_reviewer = user.eoi_as_reviewer.filter(completed_reason=None,
completed_date=None)
applications = Application.objects.filter(
eoi__in=open_eois_as_reviewer).exclude(assessments__reviewer=user)
return applications.count()
def get_partner_breakdown(self, obj):
return {
PARTNER_TYPES.cbo: Partner.objects.filter(display_type=PARTNER_TYPES.cbo).count(),
PARTNER_TYPES.national: Partner.objects.filter(display_type=PARTNER_TYPES.national).count(),
PARTNER_TYPES.international: Partner.objects.filter(
display_type=PARTNER_TYPES.international
).exclude(hq__isnull=False).count(),
PARTNER_TYPES.academic: Partner.objects.filter(display_type=PARTNER_TYPES.academic).count(),
PARTNER_TYPES.red_cross: Partner.objects.filter(display_type=PARTNER_TYPES.red_cross).count(),
}
class Meta:
model = Agency
fields = (
'new_partners_last_15_count',
'new_partners_last_15_by_day_count',
'new_cfei_last_15_by_day_count',
'num_cn_to_score',
'partner_breakdown',
)
class PartnerDashboardSerializer(PartnerIdsMixin, serializers.ModelSerializer):
DAYS_AGO = 10
new_cfei_by_sectors_last_days_ago = serializers.SerializerMethodField()
num_of_submitted_cn = serializers.SerializerMethodField()
num_of_pinned_cfei = serializers.SerializerMethodField()
num_of_awards = serializers.SerializerMethodField()
last_profile_update = serializers.DateTimeField(source='last_update_timestamp', read_only=True, allow_null=True)
class Meta:
model = Partner
fields = (
'new_cfei_by_sectors_last_days_ago',
'num_of_submitted_cn',
'num_of_pinned_cfei',
'num_of_awards',
'last_profile_update',
)
def get_new_cfei_by_sectors_last_days_ago(self, obj):
cfei_new = EOI.objects.filter(
start_date__gte=(date.today()-timedelta(days=self.DAYS_AGO))
).values_list('specializations__category__name', 'id').distinct()
mapped = list(map(lambda x: x[0], cfei_new))
result = {}
for sector in Sector.objects.all():
result[sector.name] = mapped.count(sector.name)
return result
def get_num_of_submitted_cn(self, obj):
details = Agency.objects.filter(applications__partner_id__in=self.get_partner_ids()).annotate(
count=Count('applications')).values('name', 'count')
count = 0
if len(details) > 0:
count = reduce(lambda x, y: x + y, map(lambda x: x['count'], details))
return {
'details': details,
'count': count
}
def get_num_of_pinned_cfei(self, obj):
today = date.today()
return Pin.objects.filter(
eoi__deadline_date__range=(today, today + timedelta(days=self.DAYS_AGO)),
partner_id__in=self.get_partner_ids(),
).order_by().distinct('eoi').count()
def get_num_of_awards(self, obj):
return Application.objects.filter(did_win=True, partner_id__in=self.get_partner_ids()).count()
|
nilq/baby-python
|
python
|
import base64
import mimetypes
from io import BytesIO
from time import time
from typing import Any, Dict, List, TypedDict
from PyPDF2 import PdfFileReader
from PyPDF2.utils import PdfReadError
from ....models.models import Mediafile
from ....permissions.permissions import Permissions
from ....shared.exceptions import ActionException
from ....shared.filters import And, FilterOperator
from ....shared.patterns import KEYSEPARATOR
from ...action import original_instances
from ...generics.create import CreateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData
from .calculate_mixins import calculate_inherited_groups_helper_with_parent_id
from .delete import MediafileDelete
from .mixins import MediafileMixin
PDFInformation = TypedDict(
"PDFInformation",
{
"pages": int,
"encrypted": bool,
},
total=False,
)
@register_action("mediafile.upload")
class MediafileUploadAction(MediafileMixin, CreateAction):
"""
Action to upload a mediafile.
"""
model = Mediafile()
schema = DefaultSchema(Mediafile()).get_create_schema(
required_properties=["title", "owner_id", "filename"],
optional_properties=["token", "access_group_ids", "parent_id"],
additional_required_fields={"file": {"type": "string"}},
)
permission = Permissions.Mediafile.CAN_MANAGE
@original_instances
def get_updated_instances(self, action_data: ActionData) -> ActionData:
tokens: List[Any] = []
for instance in action_data:
collection, _ = self.get_owner_data(instance)
if collection != "organization":
continue
tokens.append(instance.get("token"))
results = self.datastore.filter(
self.model.collection,
And(
FilterOperator("token", "=", instance["token"]),
FilterOperator(
"owner_id", "=", "organization" + KEYSEPARATOR + "1"
),
),
["id"],
)
if len(results) == 0:
continue
elif len(results) == 1:
id = next(iter(results))
self.execute_other_action(MediafileDelete, [{"id": id}])
else:
text = f'Database corrupt: The resource token has to be unique, but there are {len(results)} tokens "{instance["token"]}".'
self.logger.error(text)
raise ActionException(text)
if len(tokens) != len(set(tokens)):
raise ActionException(
"It is not permitted to use the same token twice in a request."
)
return action_data
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
instance = super().update_instance(instance)
instance["create_timestamp"] = round(time())
filename_ = instance.get("filename", "")
file_ = instance.pop("file")
instance["mimetype"] = mimetypes.guess_type(filename_)[0]
if instance["mimetype"] is None:
raise ActionException(f"Cannot guess mimetype for {filename_}.")
decoded_file = base64.b64decode(file_)
instance["filesize"] = len(decoded_file)
id_ = instance["id"]
mimetype_ = instance["mimetype"]
if instance["mimetype"] == "application/pdf":
instance["pdf_information"] = self.get_pdf_information(decoded_file)
collection, _ = self.get_owner_data(instance)
if collection == "meeting":
(
instance["is_public"],
instance["inherited_access_group_ids"],
) = calculate_inherited_groups_helper_with_parent_id(
self.datastore,
instance.get("access_group_ids"),
instance.get("parent_id"),
)
self.media.upload_mediafile(file_, id_, mimetype_)
return instance
def get_pdf_information(self, file_bytes: bytes) -> PDFInformation:
bytes_io = BytesIO(file_bytes)
try:
pdf = PdfFileReader(bytes_io)
return {"pages": pdf.getNumPages()}
except PdfReadError:
# File could be encrypted but not be detected by PyPDF.
return {
"pages": 0,
"encrypted": True,
}
|
nilq/baby-python
|
python
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exposure and risks of superfund sites to natural hazards
This dataset is associated with the following publication: Summers, K., A. Lamaper, and K. Buck. National Hazards Vulnerability and the Remediation, Restoration and Revitalization of Contaminated Sites – 1. Superfund. ENVIRONMENTAL MANAGEMENT. Springer-Verlag, New York, NY, USA, 14, (2021).
This script proecsses the file:
- ./data/SF_CRSI_OLEM.xlsx
The dataset lists all active and upcoming Superfund sites and their vulnerability to 12 natural hazards using a vulnerability score between 0 and 100. Additional risk/exposure metrices are also imported.
"""
import os
from absl import app, flags
import pandas as pd
_RISK_TEMPLATE_MCF = """Node: E:SuperfundSite->E0
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_HurricaneEvent
value: C:SuperfundSite->HURR_EXP
Node: E:SuperfundSite->E1
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_TornadoEvent
value: C:SuperfundSite->TORN_EXP
Node: E:SuperfundSite->E2
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_LandslideEvent
value: C:SuperfundSite->LSLD_EXP
Node: E:SuperfundSite->E3
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_ExtremeColdWindChillEvent
value: C:SuperfundSite->LTMP_EXP
Node: E:SuperfundSite->E4
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_ExcessiveHeatEvent
value: C:SuperfundSite->HTMP_EXP
Node: E:SuperfundSite->E5
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_HailEvent
value: C:SuperfundSite->HAIL_EXP
Node: E:SuperfundSite->E6
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_WildfireEvent
value: C:SuperfundSite->FIRE_EXP
Node: E:SuperfundSite->E7
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_EarthquakeEvent
value: C:SuperfundSite->EQ_EXP
Node: E:SuperfundSite->E8
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_DroughtEvent
value: C:SuperfundSite->DRGH_EXP
Node: E:SuperfundSite->E9
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_FloodEvent
value: C:SuperfundSite->IFLD_EXP
Node: E:SuperfundSite->E10
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_CoastalFloodEvent
value: C:SuperfundSite->CFLD_EXP
Node: E:SuperfundSite->E11
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite_HighWindEvent
value: C:SuperfundSite->WIND_EXP
Node: E:SuperfundSite->E12
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardExposureScore_SuperfundSite
value: C:SuperfundSite->EXPOSURE_SCORE
Node: E:SuperfundSite->E13
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:NaturalHazardRiskScore_SuperfundSite
value: C:SuperfundSite->RISK_SCORE
Node: E:SuperfundSite->E14
typeOf: dcs:StatVarObservation
observationAbout: C:SuperfundSite->Site_EPA_ID
observationDate: C:SuperfundSite->observationDate
variableMeasured: dcid:CrsiScore_SuperfundSite
value: C:SuperfundSite->CRSI_SCORE
"""
_DATASET_NAME = "./SF_CRSI_OLEM.xlsx"
_DATA_COLS = [
'Site_EPA_ID', 'CFLD_EXP', 'IFLD_EXP', 'DRGH_EXP', 'EQ_EXP', 'FIRE_EXP',
'HAIL_EXP', 'HTMP_EXP', 'LTMP_EXP', 'HURR_EXP', 'LSLD_EXP', 'TORN_EXP',
'WIND_EXP', 'EXPOSURE_SCORE', 'RISK_SCORE', 'CRSI_SCORE'
]
def process_site_hazards(input_path: str, output_path: str) -> int:
"""
Processes ./SF_CRSI_OLEM.xlsx to generate clean csv and tmcf files
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
risk_score = pd.read_excel(os.path.join(input_path, _DATASET_NAME),
usecols=_DATA_COLS)
risk_score[
'Site_EPA_ID'] = 'epaSuperfundSiteId/' + risk_score['Site_EPA_ID']
risk_score['observationDate'] = 2021
risk_score.to_csv(os.path.join(output_path, 'superfund_hazardExposure.csv'),
index=False)
f = open(os.path.join(output_path, 'superfund_hazardExposure.tmcf'), 'w')
f.write(_RISK_TEMPLATE_MCF)
f.close()
site_count = len(risk_score['Site_EPA_ID'].unique())
return int(site_count)
def main(_) -> None:
FLAGS = flags.FLAGS
flags.DEFINE_string('input_path', './data',
'Path to the directory with input files')
flags.DEFINE_string(
'output_path', './data/output',
'Path to the directory where generated files are to be stored.')
site_count = process_site_hazards(FLAGS.input_path, FLAGS.output_path)
print(f"Processing of {site_count} superfund sites is complete.")
if __name__ == '__main__':
app.run(main)
|
nilq/baby-python
|
python
|
import io
import os
from unittest.mock import MagicMock, patch
from uuid import uuid4
from django.core.files.uploadedfile import SimpleUploadedFile
from django.template.exceptions import \
TemplateSyntaxError as DjangoTemplateSyntaxError
from django.test import TestCase
from jinja2 import TemplateSyntaxError
from terra_accounts.tests.factories import TerraUserFactory
from terracommon.document_generator.helpers import DocumentGenerator
from terracommon.document_generator.models import (DocumentTemplate,
DownloadableDocument)
from terracommon.trrequests.tests.factories import UserRequestFactory
def mock_libreoffice(arguments):
# Get temporary directory passed as --out parameter value in subprocess.run
tmpdir = arguments[arguments.index('--outdir') + 1]
tmp_pdf_root = os.path.splitext(os.path.basename(arguments[-1]))[0]
tmp_pdf = os.path.join(tmpdir, f'{tmp_pdf_root}.pdf')
with open(tmp_pdf, 'wb') as pdf_file:
pdf_file.write(b'some content')
class DocumentGeneratorTestCase(TestCase):
def setUp(self):
self.user = TerraUserFactory()
self.userrequest = UserRequestFactory()
self.docx_file = os.path.join(os.path.dirname(__file__), 'empty.docx')
with open(self.docx_file, 'rb') as docx:
self.template = DocumentTemplate.objects.create(
name='emptydocx',
documenttemplate=SimpleUploadedFile(
str(uuid4()),
docx.read()
)
)
self.downloadable = DownloadableDocument.objects.create(
user=self.user,
document=self.template,
linked_object=self.userrequest
)
@patch('terracommon.document_generator.helpers.logger')
def test_bad_html_template(self, mock_logger):
html_file = os.path.join(os.path.dirname(__file__), 'simple_html_template.html')
with open(html_file, 'rb') as read_file:
html_template = DocumentTemplate.objects.create(
name='htmltemplate',
documenttemplate=SimpleUploadedFile(
str(uuid4()),
read_file.read()
)
)
html_downloadable = DownloadableDocument.objects.create(
user=self.user,
document=html_template,
linked_object=self.userrequest
)
dg = DocumentGenerator(html_downloadable)
dg.get_html = MagicMock(side_effect=DjangoTemplateSyntaxError('Error'))
with self.assertRaises(DjangoTemplateSyntaxError):
dg.get_pdf()
mock_logger.warning.assert_called()
def test_empty_html_template(self):
html_file = os.path.join(os.path.dirname(__file__), 'empty_html_template.html')
with open(html_file, 'rb') as read_file:
html_template = DocumentTemplate.objects.create(
name='htmltemplate',
documenttemplate=SimpleUploadedFile(
str(uuid4()),
read_file.read()
)
)
html_downloadable = DownloadableDocument.objects.create(
user=self.user,
document=html_template,
linked_object=self.userrequest
)
dg = DocumentGenerator(html_downloadable)
html_content = dg.get_html({})
self.assertEqual('', html_content)
def test_get_html_without_data(self):
html_file = os.path.join(os.path.dirname(__file__), 'simple_html_template.html')
with open(html_file, 'rb') as read_file:
html_template = DocumentTemplate.objects.create(
name='htmltemplate',
documenttemplate=SimpleUploadedFile(
str(uuid4()),
read_file.read()
)
)
html_downloadable = DownloadableDocument.objects.create(
user=self.user,
document=html_template,
linked_object=self.userrequest
)
dg = DocumentGenerator(html_downloadable)
html_content = dg.get_html({})
self.assertEqual('<html><body>It is now .</body></html>', html_content)
def test_get_html_with_data(self):
html_file = os.path.join(os.path.dirname(__file__), 'simple_html_template.html')
with open(html_file, 'rb') as read_file:
html_template = DocumentTemplate.objects.create(
name='htmltemplate',
documenttemplate=SimpleUploadedFile(
str(uuid4()),
read_file.read()
)
)
html_downloadable = DownloadableDocument.objects.create(
user=self.user,
document=html_template,
linked_object=self.userrequest
)
dg = DocumentGenerator(html_downloadable)
html_content = dg.get_html({'current_date': '2019-05-15'})
self.assertEqual('<html><body>It is now 2019-05-15.</body></html>', html_content)
def test_pdf_is_generated_from_html_template(self):
html_file = os.path.join(os.path.dirname(__file__), 'simple_html_template.html')
with open(html_file, 'rb') as read_file:
html_template = DocumentTemplate.objects.create(
name='htmltemplate',
documenttemplate=SimpleUploadedFile(
str(uuid4()),
read_file.read()
)
)
html_downloadable = DownloadableDocument.objects.create(
user=self.user,
document=html_template,
linked_object=self.userrequest
)
dg = DocumentGenerator(html_downloadable)
pdf_path = dg.get_pdf()
self.assertTrue(os.path.isfile(pdf_path))
os.remove(pdf_path)
@patch('subprocess.run', side_effect=mock_libreoffice)
def test_pdf_is_generated_from_enriched_docx(self, mock_run):
# Patch libroffice call, that should write a pdf file of the same name
# as temporary docx file
# Now patch get_docx to return dumb content
docx_path = os.path.join(os.path.dirname(__file__), 'empty.docx')
with open(docx_path, 'rb') as docx_file:
with patch.object(
DocumentGenerator, 'get_docx',
return_value=io.BytesIO(docx_file.read())
) as mock_docx:
dg = DocumentGenerator(self.downloadable)
pdf_path = dg.get_pdf()
mock_docx.assert_called()
os.remove(pdf_path)
def test_everything_seems_to_work_without_variables(self):
dg = DocumentGenerator(self.downloadable)
dg.get_docx({}) # No exceptions are raised
def test_everything_seems_to_work_with_variables(self):
template_path = os.path.join(os.path.dirname(__file__),
'template_with_img.docx')
with open(template_path, 'rb') as template_fd:
template = DocumentTemplate.objects.create(
name='template_with_img',
documenttemplate=SimpleUploadedFile(template_path,
template_fd.read())
)
downloadable = DownloadableDocument.objects.create(
user=self.user,
document=template,
linked_object=self.userrequest
)
image_path = os.path.join(os.path.dirname(__file__), 'new_img.png')
dg = DocumentGenerator(downloadable)
dg.get_docx({
'name': 'Makina Corpus',
'logo': image_path,
}) # No exceptions are raised
def test_raises_exception_typeerror(self):
with self.assertRaises(TypeError):
DocumentGenerator('')
@patch('terracommon.document_generator.helpers.logger')
def test_raises_exception_when_template_is_not_found(self, mock_logger):
dg = DocumentGenerator(self.downloadable)
dg.get_docx = MagicMock(side_effect=FileNotFoundError)
with self.assertRaises(FileNotFoundError):
dg.get_pdf()
mock_logger.warning.assert_called()
@patch('subprocess.run', side_effect=mock_libreoffice)
def test_cache_is_created(self, mock_run):
dg = DocumentGenerator(self.downloadable)
pdf_path = dg.get_pdf()
self.assertTrue(os.path.isfile(pdf_path))
os.remove(pdf_path)
@patch('terracommon.document_generator.helpers.logger')
def test_raises_templatesyntaxerror_exception(self, mock_logger):
dg = DocumentGenerator(self.downloadable)
dg.get_docx = MagicMock(side_effect=TemplateSyntaxError('', 0))
with self.assertRaises(TemplateSyntaxError):
dg.get_pdf()
mock_logger.warning.assert_called()
@patch('subprocess.run', side_effect=mock_libreoffice)
def test_pdf_is_generated_again_when_data_are_updated(self, mock_run):
dg = DocumentGenerator(self.downloadable)
pdf_path = dg.get_pdf()
pdf_mtime = os.path.getmtime(pdf_path)
self.assertTrue(os.path.isfile(pdf_path))
# Update the updated_at date
self.userrequest.save()
pdf_path_bis = dg.get_pdf()
self.assertTrue(os.path.isfile(pdf_path_bis))
self.assertNotEqual(os.path.getmtime(pdf_path_bis), pdf_mtime)
os.remove(pdf_path_bis)
|
nilq/baby-python
|
python
|
#python3 Steven 12/05/20,Auckland,NZ
#pytorch backbone models
import torch
from commonTorch import ClassifierCNN_NetBB
from summaryModel import summaryNet
from backbones import*
def main():
nClass = 10
net = ClassifierCNN_NetBB(nClass, backbone=alexnet)
summaryNet(net, (3,512,512))
#net = ClassifierCNN_NetBB(nClass, backbone=vgg16)
#summaryNet(net, (3,640,480))
# net = ClassifierCNN_NetBB(nClass, backbone=resnet18)
# summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=squeezenet)
#summaryNet(net, (3,640,480))
##net = ClassifierCNN_NetBB(nClass, backbone=densenet)
##summaryNet(net, (3, 512, 512))
##net = ClassifierCNN_NetBB(nClass, backbone=inception)
##summaryNet(net, (3,640,480))
##net = ClassifierCNN_NetBB(nClass, backbone=googlenet)
##summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=shufflenet)
#summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=mobilenet)
#summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=resnext50_32x4d)
#summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=wide_resnet50_2)
#summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=mnasnet)
#summaryNet(net, (3,640,480))
return
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from fastapi import FastAPI
app = FastAPI()
@app.get("/keyword-weights/", response_model=dict[str, float])
async def read_keyword_weights():
return {"foo": 2.3, "bar": 3.4}
|
nilq/baby-python
|
python
|
"""Submodule providing embedding lookup layer."""
from typing import Tuple, Dict
import tensorflow as tf
from tensorflow.keras.layers import Flatten, Layer # pylint: disable=import-error,no-name-in-module
class EmbeddingLookup(Layer):
"""Layer implementing simple embedding lookup layer."""
def __init__(
self,
**kwargs: Dict
):
"""Create new Embedding Lookup layer.
Parameters
----------------------
**kwargs: Dict,
Kwargs to pass to the parent Layer class.
"""
super().__init__(**kwargs)
self._flatten_layer = None
def build(self, input_shape) -> None:
"""Build the embedding lookup layer.
Parameters
------------------------------
input_shape
Shape of the output of the previous layer.
"""
self._flatten_layer = Flatten()
super().build(input_shape)
def call(
self,
inputs: Tuple[tf.Tensor],
) -> tf.Tensor:
"""Returns called embeddingg lookup.
Parameters
---------------------------
inputs: Tuple[tf.Tensor],
"""
node_ids, node_features = inputs
return self._flatten_layer(tf.nn.embedding_lookup(
node_features,
ids=node_ids
))
|
nilq/baby-python
|
python
|
import sys
import os
import numpy as np
from numpy import array
import datetime
import calendar
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import FuncFormatter
from swaty.swaty_read_model_configuration_file import swat_read_model_configuration_file
from swaty.classes.pycase import swaty
from pyearth.system.define_global_variables import *
from pyearth.toolbox.reader.text_reader_string import text_reader_string
from pyearth.visual.timeseries.plot_time_series_data import plot_time_series_data
from pyearth.visual.scatter.scatter_plot_data import scatter_plot_data
#ftsz = 18
#plt.rcParams['xtick.labelsize']=ftsz
#plt.rcParams['ytick.labelsize']=ftsz
#plt.rcParams['axes.labelsize']=ftsz
#plt.rcParams['axes.titlesize']=ftsz
#from swaty.plot.swat_convert_data_daily_2_monthly import swat_convert_data_daily_2_monthly
def swat_tsplot_stream_discharge(oSwat_in):
iYear_start = oSwat_in.iYear_start
iYear_end = oSwat_in.iYear_end
nstress_month = oSwat_in.nstress_month
sWorkspace_simulation_case = oSwat_in.sWorkspace_simulation_case
sFilename1 = '/global/u1/l/liao313/data/swat/arw/auxiliary/usgs/discharge/stream_discharge_monthly.txt'
aData = text_reader_string(sFilename1)
aDischarge_obs = np.array( aData ).astype(float)
aDischarge_obs = aDischarge_obs.flatten() * cms2cmd
sFilename2 = sWorkspace_simulation_case + slash + 'stream_discharge_monthly.txt'
aData = text_reader_string(sFilename2)
aDischarge_simulation1 = np.array( aData ).astype(float)
aDischarge_simulation1 = aDischarge_simulation1.flatten() * cms2cmd
sFilename3 = '/global/u1/l/liao313/data/swat/arw/auxiliary/usgs/discharge/stream_discharge_monthly_opt.txt'
aData = text_reader_string(sFilename3)
aDischarge_simulation2 = array( aData ).astype(float)
aDischarge_simulation2 = aDischarge_simulation2.flatten() * cms2cmd
#dummy1 = np.percentile(aDischarge_simulation, 99)
#dummy2 = np.where( aDischarge_simulation > dummy1 )
#plot simulation
dates = list()
for iYear in range(iYear_start, iYear_end+1):
for iMonth in range(1,13):
dSimulation = datetime.datetime(iYear, iMonth, 1)
dates.append(dSimulation)
sLabel_Y =r'Stream discharge ($m^{3} \, day^{-1}$)'
sLabel_legend = 'Simulated stream discharge'
aDate= np.tile( dates , (3,1))
aData = np.array([aDischarge_obs , aDischarge_simulation1,aDischarge_simulation2])
aLabel_legend = ['Default','Initial','Calibrated']
aColor_in = ['black', 'red', 'blue']
sFilename_out = sWorkspace_simulation_case + slash + 'discharge_monthly_scatter1.png'
scatter_plot_data(aDischarge_obs,aDischarge_simulation1,sFilename_out,\
iFlag_scientific_notation_x_in=1,\
iFlag_scientific_notation_y_in=1,\
dMin_x_in = 0.0, \
dMax_x_in = 1E7, \
dMin_y_in = 0.0, \
dMax_y_in = 1E7, \
iSize_x_in = 8, \
iSize_y_in = 8,\
sLabel_legend_in = 'Initial',\
sLabel_x_in = r'Observed discharge ($m^{3} \, day^{-1}$)',\
sLabel_y_in = r'Simulated discharge ($m^{3} \, day^{-1}$)' )
sFilename_out = sWorkspace_simulation_case + slash + 'discharge_monthly_scatter2.png'
scatter_plot_data(aDischarge_obs,aDischarge_simulation2,sFilename_out,\
iFlag_scientific_notation_x_in=1,\
iFlag_scientific_notation_y_in=1,\
dMin_x_in = 0.0, \
dMax_x_in = 1E7, \
dMin_y_in = 0.0, \
dMax_y_in = 1E7, \
iSize_x_in = 8, \
iSize_y_in = 8,\
sLabel_legend_in = 'Calibrated',\
sLabel_x_in =r'Observed discharge ($m^{3} \, day^{-1}$)',\
sLabel_y_in = r'Calibrated discharge ($m^{3} \, day^{-1}$)' )
sFilename_out = sWorkspace_simulation_case + slash + 'discharge_monthly.png'
plot_time_series_data(aDate, aData,\
sFilename_out,\
sTitle_in = '', \
sLabel_y_in= sLabel_Y,\
aColor_in =aColor_in,\
aLabel_legend_in = aLabel_legend, \
iSize_x_in = 12,\
iSize_y_in = 5)
print("finished")
if __name__ == '__main__':
sFilename_configuration_in = '/global/homes/l/liao313/workspace/python/swaty/swaty/shared/swat_simulation.xml'
aConfig = swat_read_model_configuration_file(sFilename_configuration_in)
# iCase_index_in=iCase_index_in, sJob_in=sJob_in, iFlag_mode_in=iFlag_mode_in)
aConfig['sFilename_model_configuration'] = sFilename_configuration_in
oSwat = swaty(aConfig)
swat_tsplot_stream_discharge(oSwat)
|
nilq/baby-python
|
python
|
class Solution:
cache = {0: 0, 1: 1}
def fib(self, N: int) -> int:
if N in self.cache:
return self.cache[N]
self.cache[N] = self.fib(N - 1) + self.fib(N - 2)
return self.cache[N]
# Contributed by LeetCode user mereck.
class Solution2:
def fib(self, N: int) -> int:
golden_ratio = (1 + (5 ** 0.5)) / 2
return int(round((golden_ratio ** N) / (5 ** 0.5)))
n = int(input())
p = Solution().fib(n)
print(p)
|
nilq/baby-python
|
python
|
import unittest
from src.command.shutter_command import ShutterCommand, ShutterCommandType
class TestShutterCommand(unittest.TestCase):
def test_parse(self):
self.assertEqual(ShutterCommand.parse(" Up "), ShutterCommand(ShutterCommandType.POSITION, 0))
self.assertEqual(ShutterCommand.parse(" DoWn"), ShutterCommand(ShutterCommandType.POSITION, 100))
self.assertEqual(ShutterCommand.parse(" Off "), ShutterCommand(ShutterCommandType.POSITION, 0))
self.assertEqual(ShutterCommand.parse(" On "), ShutterCommand(ShutterCommandType.POSITION, 100))
self.assertEqual(ShutterCommand.parse(" 42 "), ShutterCommand(ShutterCommandType.POSITION, 42))
self.assertEqual(ShutterCommand.parse(" 42.24"), ShutterCommand(ShutterCommandType.POSITION, 42))
self.assertEqual(ShutterCommand.parse(" 42 calibrate"), ShutterCommand(ShutterCommandType.POSITION, 42, force_calibration=True))
self.assertEqual(ShutterCommand.parse(" 42calibrate "), ShutterCommand(ShutterCommandType.POSITION, 42, force_calibration=True))
self.assertEqual(ShutterCommand.parse(" 42.24 calibrate"), ShutterCommand(ShutterCommandType.POSITION, 42, force_calibration=True))
self.assertEqual(ShutterCommand.parse(" -10 "), ShutterCommand(ShutterCommandType.POSITION, 0))
self.assertEqual(ShutterCommand.parse(" 120 "), ShutterCommand(ShutterCommandType.POSITION, 100))
self.assertEqual(ShutterCommand.parse('{"COMMAND": " uP "}'), ShutterCommand(ShutterCommandType.POSITION, 0))
self.assertEqual(ShutterCommand.parse('{"command": " DowN "}'),
ShutterCommand(ShutterCommandType.POSITION, 100))
self.assertEqual(ShutterCommand.parse('{"cmd": " 50.5 "}'), ShutterCommand(ShutterCommandType.POSITION, 50))
self.assertEqual(ShutterCommand.parse(" learn "), ShutterCommand(ShutterCommandType.LEARN))
self.assertEqual(ShutterCommand.parse(" teach "), ShutterCommand(ShutterCommandType.LEARN))
self.assertEqual(ShutterCommand.parse(" Update "), ShutterCommand(ShutterCommandType.UPDATE))
self.assertEqual(ShutterCommand.parse(" refresh "), ShutterCommand(ShutterCommandType.UPDATE))
self.assertEqual(ShutterCommand.parse(" stop "), ShutterCommand(ShutterCommandType.STOP))
|
nilq/baby-python
|
python
|
#
# Photo Fusion
#
# Peter Turney, February 8, 2021
#
# Read a fusion pickle file (fusion_storage.bin) and
# make photos of the fusion events.
#
import golly as g
import model_classes as mclass
import model_functions as mfunc
import model_parameters as mparam
import numpy as np
import time
import pickle
import os
import re
import sys
#
# Number of steps to run Game of Life or Immigration Rule or
# Management Rule.
#
num_steps = 1000
#
# Ask the user to select the desired fusion pickle file.
#
fusion_path = g.opendialog("Choose a fusion pickle file",
"fusion*.bin", g.getdir("app"))
#
g.note("Verify Selection\n\n" + \
"Fusion pickle file:\n\n" + \
fusion_path + "\n\n" + \
"Exit now if this is incorrect.")
#
# Open the fusion pickle file -- "ab+" opens a file for
# both appending and reading in binary mode.
#
fusion_handle = open(fusion_path, "ab+")
fusion_handle.seek(0) # start at the beginning of the file
#
# Read the fusion pickle file into a list.
#
fusion_list = []
#
while True:
try:
part = pickle.load(fusion_handle)
fusion_list.append(part)
except (EOFError, pickle.UnpicklingError):
break
#
fusion_handle.close()
#
# The list fusion_list is a repeating sequence of four items:
#
# [s2, s3, s4, n, ..., s2, s3, s4, n]
#
# - s2 is part of s4 (after rotation)
# - s3 is part of s4 (after rotation)
# - s4 is the fusion of s2 and s3
# - s4 is the n-th child born
#
# For each [s2, s3, s4, n] tuple, we will create X photos:
#
# (1) a photo of the red seed s2 in its initial state (left part = red = state 1)
# (2) a photo of the red seed s2 in its final state
# (3) a photo of the blue seed s3 in its initial state (right part = blue = state 2)
# (4) a photo of the blue seed s3 in its final state
# (5) a photo of the fused seed s4 in its initial state (left/red & right/blue)
# (6) a photo of the fused seed s4 in its final state using the Immigration Rule
# (7) a photo of the fused seed s4 in its final state using the Management Rule
#
# The seven files will be assigned names of the following form:
#
# format: <leaf directory>-<birth n>-<photo type: 1 to 7>.<file type: png>
# example: "run1-birth29-photo1.png
#
# extract the target directory from fusion_path -- we assume that the
# the fusion pickle file is given by fusion_path and we assume that
# the photos will be stored in the same directory as the pickle file
photo_directory = os.path.dirname(fusion_path)
# extract leaf directory from photo_directory (so we know where it came from,
# in case it gets moved)
leaf_dir = os.path.basename(os.path.normpath(photo_directory))
# allow time for Golly image to stabilize before entering loop below
time.sleep(2)
# pause between images, in seconds
pause = 0.1
#
# read four items at a time
for (s2, s3, s4, n) in zip(*[iter(fusion_list)] * 4):
# file 1: a photo of the red seed s2 in its initial state
# (left part = red = state 1)
file_path = photo_directory + "/" + leaf_dir + "-birth" + \
str(n) + "-photo1.png"
rule_name = "Immigration"
seed_list = [s2]
live_states = [1]
steps = 0 # initial state
description = "child number " + str(n) + ", left part, red, " + \
"initial state, Immigration"
mfunc.snap_photo(g, file_path, rule_name, seed_list, live_states, \
steps, description, pause)
# file 2: a photo of the red seed s2 in its final state
# (left part = red = state 1)
file_path = photo_directory + "/" + leaf_dir + "-birth" + \
str(n) + "-photo2.png"
rule_name = "Immigration"
seed_list = [s2]
live_states = [1]
steps = num_steps # final state
description = "child number " + str(n) + ", left part, red, " + \
"final state, Immigration"
mfunc.snap_photo(g, file_path, rule_name, seed_list, live_states, \
steps, description, pause)
# file 3: a photo of the blue seed s3 in its initial state
# (right part = blue = state 2)
file_path = photo_directory + "/" + leaf_dir + "-birth" + \
str(n) + "-photo3.png"
rule_name = "Immigration"
seed_list = [s3]
live_states = [2]
steps = 0 # initial state
description = "child number " + str(n) + ", right part, blue, " + \
"initial state, Immigration"
mfunc.snap_photo(g, file_path, rule_name, seed_list, live_states, \
steps, description, pause)
# file 4: a photo of the red seed s3 in its final state
# (right part = blue = state 2)
file_path = photo_directory + "/" + leaf_dir + "-birth" + \
str(n) + "-photo4.png"
rule_name = "Immigration"
seed_list = [s3]
live_states = [2]
steps = num_steps # final state
description = "child number " + str(n) + ", right part, blue, " + \
"final state, Immigration"
mfunc.snap_photo(g, file_path, rule_name, seed_list, live_states, \
steps, description, pause)
# file 5: a photo of the fused seed s4 in its initial state
# (left/red & right/blue)
file_path = photo_directory + "/" + leaf_dir + "-birth" + \
str(n) + "-photo5.png"
rule_name = "Immigration"
seed_list = [s2, s3]
live_states = [1, 2]
steps = 0 # initial state
description = "child number " + str(n) + ", right red, left blue, " + \
"initial state, Immigration"
mfunc.snap_photo(g, file_path, rule_name, seed_list, live_states, \
steps, description, pause)
# file 6: a photo of the fused seed s4 in its final state
# (red, blue)
file_path = photo_directory + "/" + leaf_dir + "-birth" + \
str(n) + "-photo6.png"
rule_name = "Immigration"
seed_list = [s2, s3]
live_states = [1, 2]
steps = num_steps # final state
description = "child number " + str(n) + ", right red, left blue, " + \
"final state, Immigration"
mfunc.snap_photo(g, file_path, rule_name, seed_list, live_states, \
steps, description, pause)
# file 7: a photo of the fused seed s4 in its final state
# (red, blue, orange, green)
file_path = photo_directory + "/" + leaf_dir + "-birth" + \
str(n) + "-photo7.png"
rule_name = "Management"
seed_list = [s2, s3]
live_states = [1, 2]
steps = num_steps # final state
description = "child number " + str(n) + ", right red, left blue, " + \
"final state, Management"
mfunc.snap_photo(g, file_path, rule_name, seed_list, live_states, \
steps, description, pause)
#
#
|
nilq/baby-python
|
python
|
from subprocess import call
import re
import json
# cache for `dependencies`
dependencies = dict()
# parses and represents a carthage dependency
class Dependency(object):
def __init__(self, line, origin):
self.line = line
self.origin = origin
match = re.match(r"^(?P<identifier>(github|git|binary)\s+\"[^/]+/(?:.+?)\")(?:\s+(?P<predicate>.+)?)?", line)
self.identifier = match.group("identifier")
self.predicate = match.group("predicate")
def __str__(self):
return "`{}` defined `{}`.".format(self.origin, self.predicate)
# function to cache one carthage dependency
def cache_dependency(line, origin):
parsed_dependency = Dependency(line, origin)
identifier = parsed_dependency.identifier
if identifier in dependencies: # cache hit
dependencies[identifier].append(parsed_dependency)
else:
dependencies[identifier] = [parsed_dependency]
# read in `CarthageConfig.json`
carthage_config = json.load(open("./Carthage/CarthageConfig.json"))
# 1. collecting all dependencies as specified in `CarthageConfig.json`
print("1. collecting all dependencies as specified in `CarthageConfig.json`")
# reading in each `Cartfile` as specified in `CarthageConfig.json`
for cartfile_path in carthage_config["cartfiles"]:
cartfile = open(cartfile_path).read()
lines = [line.strip() for line in cartfile.splitlines()]
lines = list(filter(lambda x: len(x) > 0, lines))
for line in lines:
cache_dependency(line, cartfile_path)
# 2. checking for conflicts
print("2. checking for conflicts")
hasDiffer = False
for identifier in dependencies:
children = dependencies[identifier]
if len(children) == 1:
continue
differ = False
first = children[0]
for i in range(1, len(children)):
current = children[i]
differ = first.predicate != current.predicate
if differ is True:
hasDiffer = True
print("entries for {} differ".format(identifier))
for c in children:
print("\t=> {}".format(c))
print("")
if hasDiffer:
print("...please resolve conflicts first!")
raise SystemExit
print("...no conflicts found!")
# 3. writing overall `Cartfile`
print("3. writing overall `Cartfile`")
with open("./Cartfile", "w") as overall_cartfile:
for dep in list(sorted(dependencies.values(), key=lambda dep: dep[0].identifier)):
overall_cartfile.write(f"{dep[0].line}\n")
# 4. run `carthage update --platform iOS [...]`
call_chain =["carthage", "update", "--platform", "iOS"]
if carthage_config.get("no-use-binaries", False):
call_chain.append("--no-use-binaries")
if carthage_config.get("cache-builds", False):
call_chain.append("--cache-builds")
if carthage_config.get("use-ssh", False):
call_chain.append("--use-ssh")
print(f"4. run `{' '.join(call_chain)}`")
call(call_chain)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import argparse
import gzip
import logging
import hashlib
from glob import glob
from json import load
from inscriptis import get_text
from inscriptis.model.config import ParserConfig
from collections import defaultdict
from harvest import posts
from harvest.extract import extract_posts
from urllib.parse import urlparse
from corpus.createGoldDocuments.file import write_to_json
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(description='Forum harvester - generate gold standard document for further processing')
parser.add_argument('corpus_path', metavar='corpus_path', help='Path to the input corpus')
parser.add_argument('--result-directory', dest='result_directory', help='Optional directory for storing json results.')
parser.add_argument('--corpus-include-string', dest='corpus_include_string',
help='Optionally restrict the input corpus to URLs that match the corpus include string.')
args = parser.parse_args()
result = defaultdict(list)
for no, fname in enumerate(glob(args.corpus_path + "*.json.gz")):
opener = gzip.open if fname.endswith(".gz") else open
with opener(fname) as f:
forum = load(f)
domain = urlparse(forum['url']).netloc
if args.corpus_include_string and args.corpus_include_string not in forum['url']:
continue
logging.info("Processing " + forum['url'])
postXPath = posts.extract_posts(forum)
if postXPath['xpath_pattern']:
config = ParserConfig(display_links=True, display_anchors=True)
text = get_text(forum['html'], config)
text = " ".join([c.strip() for c in text.split("\n") if c.strip()])
document = {"id": f"i{int(hashlib.md5(forum['url'].encode('utf-8')).hexdigest(), 16)}",
"url": forum['url'], "html": forum['html'], "text": text, "gold_standard_annotation": []}
if args.result_directory:
for post in extract_posts(forum['html'], forum['url'],
postXPath['text_xpath_pattern'],
postXPath['url_xpath_pattern'],
postXPath['date_xpath_pattern'],
postXPath['user_xpath_pattern'], result_as_datetime=False):
post_element = {"post_text": {"surface_form": post.post},
"datetime": {"surface_form": post.date},
"user": {"surface_form": post.user}}
if postXPath['url_xpath_pattern']:
post_element["post_link"] = {"surface_form": post.url}
document["gold_standard_annotation"].append(post_element)
write_to_json(forum['url'], args.result_directory, document)
else:
logging.error(f'Could not process {forum["url"]}')
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import timeit
from graphtheory.structures.edges import Edge
from graphtheory.structures.graphs import Graph
from graphtheory.structures.factory import GraphFactory
from graphtheory.traversing.bfs import BFSWithQueue
from graphtheory.traversing.bfs import SimpleBFS
V = 10
#V = 1000000 # OK
graph_factory = GraphFactory(Graph)
G = graph_factory.make_cyclic(V, False)
E = G.e()
#G.show()
print ("Testing BFSWithQueue ..." )
t1 = timeit.Timer(lambda: BFSWithQueue(G).run())
print ( "{} {} {}".format(V, E, t1.timeit(1)) ) # single run
print ("Testing SimpleBFS ..." )
t1 = timeit.Timer(lambda: SimpleBFS(G).run())
print ( "{} {} {}".format(V, E, t1.timeit(1)) ) # single run
# EOF
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import colorsimple as cs
entry_dict = {
"A (Rhodopsin)" : { "shape" : 7, "ps" : 1.5, "clr" : "#FFB8B8" },
"B1 (Secretin)" : { "shape" : 7, "ps" : 1.5, "clr" : "#00A600" },
"C (Glutamate)" : { "shape" : 7, "ps" : 1.5, "clr" : "#0080FF" },
"F (Frizzled)" : { "shape" : 7, "ps" : 1.5, "clr" : "#AB00FF" },
"Inactive" : { "shape" : 6, "ps" : 1.5, "clr" : "black" },
"Intermediate" : { "shape" : 8, "ps" : 1.5, "clr" : "black" },
"Active" : { "shape" : 2, "ps" : 1.5, "clr" : "black" },
"Resolution (>3.5 {\305})": { "shape" : 7, "ps" : 0.5, "clr" : "black" },
}
color_dict = {}
for k, v in entry_dict.items():
title = k
shape = v["shape"]
ps = v["ps"]
clr = v["clr"]
color_dict[title] = {
"style" : f"u 1:2 w p pt {shape} ps {ps} lw 1.0 lc rgb '{clr}' title '{title}'",
"entry" : [],
}
cs.color_table(color_dict, filename = "xfam-loop.color_table")
|
nilq/baby-python
|
python
|
from typing import Dict, Optional, Tuple
from datadog import initialize, statsd
from .base import BaseClient
class DogstatsdClient(BaseClient):
def __init__(self, agent_host: str, port: int) -> None:
initialize(statsd_host=agent_host, statsd_port=port)
def increment_counter(
self, name: str, labels: Optional[Dict[str, str]] = None, value: int = 1
) -> None:
tags = [f"{key}:{value}" for key, value in labels.items()] if labels else None
statsd.increment(metric=name, tags=tags, value=value)
def set_gauge_value(
self, name: str, labels: Optional[Dict[str, str]] = None, value: float = 0.0
) -> None:
tags = [f"{key}:{value}" for key, value in labels.items()] if labels else None
statsd.gauge(metric=name, tags=tags, value=value)
def register_metric(
self,
metric_type: str,
name: str,
documentation: str,
label_names: Optional[Tuple[str, ...]] = None,
) -> None:
pass
|
nilq/baby-python
|
python
|
symbols = ["DOLLAR SIGN", "BANANA", "CHERRY", "DIAMOND", "SEVEN", "BAR"]
import random
reel_1 = random.choice(symbols)
reel_2 = random.choice(symbols)
reel_3 = random.choice(symbols)
if reel_1 == reel_2 and reel_2 == reel_3:
print("%s! %s! %s! LUCKY STRIKE! YOU WIN £10"% (reel_1, reel_2, reel_3))
elif reel_1 == reel_2 or reel_1 == reel_3 or reel_2 == reel_3:
print("%s! %s! %s! NOT BAD, YOU WON £5" % (reel_1, reel_2, reel_3))
else:
print("%s! %s! %s! YOU LOSE..." % (reel_1, reel_2, reel_3))
|
nilq/baby-python
|
python
|
from tkinter import Canvas
class GraphicItem:
itemType: str
coords: list
config: dict
def __init__(self, cnv: Canvas):
self.cnv = cnv
self.uid = None
def update(self):
if self.uid is None:
self.uid = self.cnv._create(
itemType=self.itemType,
args=self.coords,
kw=self.config
)
else:
self.cnv.coords(self.uid, *self.coords)
|
nilq/baby-python
|
python
|
class cel:
def __init__(self):
self.temp = 1234567890
|
nilq/baby-python
|
python
|
import datetime
import dateutil.parser
import pytz
import pytz.exceptions
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseNotFound
from django.shortcuts import redirect, render
from django.utils import timezone, translation
from django.utils.translation import gettext as _
from .auth import needs_login, send_login_email, send_register_email, \
clear_login, EmailRateLimit
from .models import CYMUser, Task, TaskDone
def index(request):
"""Website index, redirects either to landing page or profile.
"""
if request.cym_user is not None:
return redirect('profile')
else:
return redirect('landing')
def landing(request):
"""The landing page, giving a description of what this is.
"""
return render(request, 'call_your_mom/landing.html')
def register(request):
"""Registration-or-login page, via which users sign up for the website.
"""
if request.method == 'POST':
email = request.POST.get('email')
if not email or len(email) < 3:
messages.add_message(request, messages.ERROR,
_("Please provide an email address"))
return redirect('register')
# Find out if an account exists for the email
try:
user = CYMUser.objects.get(email=email)
except ObjectDoesNotExist:
user = None
try:
if user is not None:
send_login_email(user)
user.last_login_email = timezone.now()
user.save()
else:
user = CYMUser(
email=email,
created=timezone.now(),
last_login_email=timezone.now(),
)
send_register_email(user)
user.save()
except EmailRateLimit:
messages.add_message(
request, messages.ERROR,
_("Rate-limiting is active. Not sending another email to "
"{0}.").format(user.email))
return redirect('confirm')
messages.add_message(
request, messages.INFO,
_("We have sent an email to {0}. Please follow the link inside to "
"start creating tasks.").format(email))
return redirect('confirm')
else:
return render(request, 'call_your_mom/register.html')
def login(request):
"""Login page.
Prompt the user for an email address, to which a log-in link will be sent.
"""
path = request.GET.get('path', '')
if request.method == 'POST':
email = request.POST.get('email')
if not email or len(email) < 3:
messages.add_message(request, messages.ERROR,
_("Please provide an email address"))
return redirect('login', path=path)
# Find out if an account exists for the email
try:
user = CYMUser.objects.get(email=email)
except ObjectDoesNotExist:
pass
else:
try:
send_login_email(user, path)
user.last_login_email = timezone.now()
user.save()
except EmailRateLimit:
messages.add_message(
request, messages.ERROR,
_("Rate-limiting is active. Not sending another email to "
"{0}.").format(user.email))
return redirect('confirm')
messages.add_message(
request, messages.INFO,
_("We have sent an email to {0}, if such an account exist. Please "
"follow the link inside to log in.").format(email))
return redirect('confirm')
else:
return render(request, 'call_your_mom/login.html')
def logout(request):
"""Log out the current user.
"""
clear_login(request)
messages.add_message(request, messages.INFO,
_("You have been logged out."))
return redirect('confirm')
def confirm(request):
"""Confirmation page, no userful content but displays messages.
"""
return render(request, 'call_your_mom/confirm.html')
_somedate = datetime.datetime(2018, 1, 2, 13, 0)
_timezones = []
for name in pytz.common_timezones:
tz = pytz.timezone(name)
offset = tz.utcoffset(_somedate) - tz.dst(_somedate)
offset = orig = int(offset.total_seconds())
offset_str = '+'
if offset < 0:
offset = -offset
offset_str = '-'
offset_str = '{}{:02}:{:02}'.format(offset_str,
offset // 3600,
(offset // 60) % 60)
_timezones.append((orig, offset_str, name))
_timezones = [(n, s) for (o, s, n) in sorted(_timezones)]
@needs_login
def profile(request):
"""A user's profile, listing all his tasks.
"""
if request.method == 'POST':
if 'timezone' in request.POST:
try:
tz = pytz.timezone(request.POST['timezone'])
except pytz.exceptions.UnknownTimeZoneError:
pass
else:
request.cym_user.timezone = tz
request.cym_user.save()
messages.add_message(
request, messages.INFO,
_("Timezone updated"))
redirect('profile')
return render(request, 'call_your_mom/profile.html',
{'cym_user': request.cym_user,
'tasks': request.cym_user.task_set.all(),
'timezones': _timezones})
@needs_login
def change_task(request, task_id):
"""Creation or modification of a task.
Note that this is different from acknowledgement page, linked from reminder
emails.
"""
if task_id == 'new':
task = None
task_done_previously = []
else:
try:
task_id = int(task_id)
task = Task.objects.get(id=task_id)
except (ObjectDoesNotExist, ValueError):
task = None
if not task or task.user.id != request.cym_user.id:
return HttpResponseNotFound(_("Couldn't find this task!"))
task_done_previously = (
TaskDone.objects.filter(task=task)
.order_by('-done')
.all()[:30]
)
if request.method == 'POST':
task_name = request.POST.get('name', '')
task_description = request.POST.get('description', '')
task_due = request.POST.get('due', '')
task_interval_days = request.POST.get('interval_days', '')
valid = True
if not task_name:
messages.add_message(request, messages.ERROR,
_("Please give your task a name"))
valid = False
if task_due:
try:
task_due = dateutil.parser.parse(task_due).date()
except ValueError:
task_due = None
if not task_due:
messages.add_message(request, messages.ERROR,
_("Please give your task a due date"))
if task:
task_due = task.due
else:
task_due = (timezone.now() +
datetime.timedelta(days=task_interval_days))
task_due = timezone.make_naive(task_due)
valid = False
if task_interval_days:
try:
task_interval_days = int(task_interval_days)
except ValueError:
task_interval_days = None
if task_interval_days < 1:
task_interval_days = None
if not task_interval_days:
messages.add_message(request, messages.ERROR,
_("Please give your task an interval in days "
"between occurrences"))
task_interval_days = 7
valid = False
if valid:
if task:
task.name = task_name
task.description = task_description
task.interval_days = task_interval_days
task.due = task_due
task.save()
messages.add_message(request, messages.INFO,
_("Task updated"))
else:
task = Task(user_id=request.cym_user.id,
name=task_name,
description=task_description,
interval_days=task_interval_days,
due=task_due)
task.save()
messages.add_message(request, messages.INFO,
_("Task created"))
return redirect('profile')
elif task:
task_name = task.name
task_description = task.description
task_interval_days = task.interval_days
task_due = task.due
task_is_due = task.is_due(request.cym_user.timezone)
else:
task_name = ''
task_description = ''
task_interval_days = 7
task_due = (timezone.now() +
datetime.timedelta(days=task_interval_days))
task_due = timezone.make_naive(task_due).date()
task_is_due = False
return render(request, 'call_your_mom/change_task.html',
{'task_id': task_id,
'task_name': task_name,
'task_description': task_description,
'task_interval_days': task_interval_days,
'task_due': task_due,
'task_is_due': task_is_due,
'task_done_previously': task_done_previously,
'new': task is None})
@needs_login
def delete_task(request, task_id):
"""Delete a task.
"""
try:
task_id = int(task_id)
task = Task.objects.get(id=task_id)
except (ObjectDoesNotExist, ValueError):
task = None
if not task or task.user.id != request.cym_user.id:
return HttpResponseNotFound(_("Couldn't find this task!"))
task.delete()
messages.add_message(request, messages.INFO,
_("Task deleted"))
return redirect('profile')
@needs_login
def ack_task(request, task_id):
"""Acknowledge a task, from a reminder.
This is the page that reminder emails link to. It allows the user to set
when the task was done, and when it is due next.
"""
try:
task = Task.objects.get(id=task_id)
except ObjectDoesNotExist:
task = None
if not task or task.user.id != request.cym_user.id:
return HttpResponseNotFound(_("Couldn't find this task!"))
if task and request.method == 'POST':
task_done = request.POST.get('done', '')
task_due = request.POST.get('due', '')
valid = True
if task_done:
try:
task_done = dateutil.parser.parse(task_done).date()
except ValueError:
task_done = None
if not task_done:
messages.add_message(request, messages.ERROR,
_("Please enter the date you performed the "
"task"))
task_done = timezone.make_naive(timezone.now()).date()
valid = False
if task_due:
try:
task_due = dateutil.parser.parse(task_due).date()
except ValueError:
task_due = None
if not task_due:
messages.add_message(request, messages.ERROR,
_("Please enter the date this task is due "
"next"))
task_due = task_done + datetime.timedelta(days=task.interval_days)
valid = False
if valid:
done = TaskDone(task=task, done=task_done)
done.save()
task.due = task_due
task.save()
return redirect('profile')
else:
task_done = timezone.make_naive(timezone.now()).date()
task_due = task_done + datetime.timedelta(days=task.interval_days)
return render(request, 'call_your_mom/ack_task.html',
{'task': task,
'task_done': task_done,
'task_due': task_due,
'task_is_due': task.is_due(request.cym_user.timezone)})
def set_lang(request, lang):
"""Change the language.
"""
translation.activate(lang)
request.session[translation.LANGUAGE_SESSION_KEY] = lang
if request.cym_user:
request.cym_user.language = lang
request.cym_user.save()
return redirect('index')
|
nilq/baby-python
|
python
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import priority_group_table
import priority_table
import remap
class cee_map(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-cee-map - based on the path /cee-map. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__name','__precedence','__priority_group_table','__priority_table','__remap',)
_yang_name = 'cee-map'
_rest_name = 'cee-map'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__remap = YANGDynClass(base=remap.remap, is_container='container', presence=False, yang_name="remap", rest_name="remap", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Class of Service (CoS) to be \n remapped', u'callpoint': u'qos_cee_remap', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='container', is_config=True)
self.__priority_table = YANGDynClass(base=priority_table.priority_table, is_container='container', presence=False, yang_name="priority-table", rest_name="priority-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Priority Table', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'qos_priority_map', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='container', is_config=True)
self.__name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='cee-map-name-type', is_config=True)
self.__precedence = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'1 .. 100']}), is_leaf=True, yang_name="precedence", rest_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Precedence value'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='int32', is_config=True)
self.__priority_group_table = YANGDynClass(base=YANGListType("PGID",priority_group_table.priority_group_table, yang_name="priority-group-table", rest_name="priority-group-table", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='PGID', extensions={u'tailf-common': {u'info': u' Configure Priority Group Table', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'qos_priority_group'}}), is_container='list', yang_name="priority-group-table", rest_name="priority-group-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Priority Group Table', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'qos_priority_group'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'cee-map']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'cee-map']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /cee_map/name (cee-map-name-type)
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /cee_map/name (cee-map-name-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='cee-map-name-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with cee-map-name-type""",
'defined-type': "brocade-cee-map:cee-map-name-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='cee-map-name-type', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,31})'}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='cee-map-name-type', is_config=True)
def _get_precedence(self):
"""
Getter method for precedence, mapped from YANG variable /cee_map/precedence (int32)
YANG Description: CEE map precedence value
"""
return self.__precedence
def _set_precedence(self, v, load=False):
"""
Setter method for precedence, mapped from YANG variable /cee_map/precedence (int32)
If this variable is read-only (config: false) in the
source YANG file, then _set_precedence is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_precedence() directly.
YANG Description: CEE map precedence value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'1 .. 100']}), is_leaf=True, yang_name="precedence", rest_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Precedence value'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='int32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """precedence must be of a type compatible with int32""",
'defined-type': "int32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'1 .. 100']}), is_leaf=True, yang_name="precedence", rest_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Precedence value'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='int32', is_config=True)""",
})
self.__precedence = t
if hasattr(self, '_set'):
self._set()
def _unset_precedence(self):
self.__precedence = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['-2147483648..2147483647']}, int_size=32), restriction_dict={'range': [u'1 .. 100']}), is_leaf=True, yang_name="precedence", rest_name="precedence", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Precedence value'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='int32', is_config=True)
def _get_priority_group_table(self):
"""
Getter method for priority_group_table, mapped from YANG variable /cee_map/priority_group_table (list)
"""
return self.__priority_group_table
def _set_priority_group_table(self, v, load=False):
"""
Setter method for priority_group_table, mapped from YANG variable /cee_map/priority_group_table (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority_group_table is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority_group_table() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("PGID",priority_group_table.priority_group_table, yang_name="priority-group-table", rest_name="priority-group-table", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='PGID', extensions={u'tailf-common': {u'info': u' Configure Priority Group Table', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'qos_priority_group'}}), is_container='list', yang_name="priority-group-table", rest_name="priority-group-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Priority Group Table', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'qos_priority_group'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority_group_table must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("PGID",priority_group_table.priority_group_table, yang_name="priority-group-table", rest_name="priority-group-table", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='PGID', extensions={u'tailf-common': {u'info': u' Configure Priority Group Table', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'qos_priority_group'}}), is_container='list', yang_name="priority-group-table", rest_name="priority-group-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Priority Group Table', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'qos_priority_group'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='list', is_config=True)""",
})
self.__priority_group_table = t
if hasattr(self, '_set'):
self._set()
def _unset_priority_group_table(self):
self.__priority_group_table = YANGDynClass(base=YANGListType("PGID",priority_group_table.priority_group_table, yang_name="priority-group-table", rest_name="priority-group-table", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='PGID', extensions={u'tailf-common': {u'info': u' Configure Priority Group Table', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'qos_priority_group'}}), is_container='list', yang_name="priority-group-table", rest_name="priority-group-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Priority Group Table', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'qos_priority_group'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='list', is_config=True)
def _get_priority_table(self):
"""
Getter method for priority_table, mapped from YANG variable /cee_map/priority_table (container)
YANG Description: Configure Priority Table
"""
return self.__priority_table
def _set_priority_table(self, v, load=False):
"""
Setter method for priority_table, mapped from YANG variable /cee_map/priority_table (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority_table is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority_table() directly.
YANG Description: Configure Priority Table
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=priority_table.priority_table, is_container='container', presence=False, yang_name="priority-table", rest_name="priority-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Priority Table', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'qos_priority_map', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority_table must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=priority_table.priority_table, is_container='container', presence=False, yang_name="priority-table", rest_name="priority-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Priority Table', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'qos_priority_map', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='container', is_config=True)""",
})
self.__priority_table = t
if hasattr(self, '_set'):
self._set()
def _unset_priority_table(self):
self.__priority_table = YANGDynClass(base=priority_table.priority_table, is_container='container', presence=False, yang_name="priority-table", rest_name="priority-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Priority Table', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'qos_priority_map', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='container', is_config=True)
def _get_remap(self):
"""
Getter method for remap, mapped from YANG variable /cee_map/remap (container)
"""
return self.__remap
def _set_remap(self, v, load=False):
"""
Setter method for remap, mapped from YANG variable /cee_map/remap (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_remap is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_remap() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=remap.remap, is_container='container', presence=False, yang_name="remap", rest_name="remap", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Class of Service (CoS) to be \n remapped', u'callpoint': u'qos_cee_remap', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """remap must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=remap.remap, is_container='container', presence=False, yang_name="remap", rest_name="remap", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Class of Service (CoS) to be \n remapped', u'callpoint': u'qos_cee_remap', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='container', is_config=True)""",
})
self.__remap = t
if hasattr(self, '_set'):
self._set()
def _unset_remap(self):
self.__remap = YANGDynClass(base=remap.remap, is_container='container', presence=False, yang_name="remap", rest_name="remap", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' Configure Class of Service (CoS) to be \n remapped', u'callpoint': u'qos_cee_remap', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='container', is_config=True)
name = __builtin__.property(_get_name, _set_name)
precedence = __builtin__.property(_get_precedence, _set_precedence)
priority_group_table = __builtin__.property(_get_priority_group_table, _set_priority_group_table)
priority_table = __builtin__.property(_get_priority_table, _set_priority_table)
remap = __builtin__.property(_get_remap, _set_remap)
_pyangbind_elements = {'name': name, 'precedence': precedence, 'priority_group_table': priority_group_table, 'priority_table': priority_table, 'remap': remap, }
|
nilq/baby-python
|
python
|
from collections import OrderedDict
class Decision:
def __init__(self, id, name):
self.id = id
self.name = name
self.decisionTables = []
class DecisionTable:
def __init__(self, id, name):
self.id = id
self.name = name
self.inputs = []
self.outputs = []
self.rules = []
class Input:
def __init__(self, id, label, name, typeRef):
self.id = id
self.label = label
self.name = name
self.typeRef = typeRef
class InputEntry:
def __init__(self, id, input):
self.id = id
self.input = input
self.description = ''
self.text = ''
self.operators = []
class Output:
def __init__(self, id, label, name, typeRef):
self.id = id
self.label = label
self.name = name
self.typeRef = typeRef
class OutputEntry:
def __init__(self, id, output):
self.id = id
self.output = output
self.description = ''
self.text = ''
self.parsedValue = None
class Rule:
def __init__(self, id):
self.id = id
self.description = ''
self.inputEntries = []
self.outputEntries = []
def outputAsDict(self):
out = OrderedDict()
for outputEntry in self.outputEntries:
out[outputEntry.output.label] = outputEntry.parsedValue # TODO: label?
return out
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import numpy as np
import math
import random
import time
import rospy
import tf
from geometry_msgs.msg import Point, Pose, Twist
from utils import generatePoint2D, bcolors, close2Home
WHEEL_OFFSET = 0
class Wanderer():
"""
Super class for all Wanderer pilots
"""
def __init__(self):
"""
"""
rospy.logdebug("Initiate Wanderer...")
# parameters
self.cmd_vel = Twist()
self.stop_cmd = Twist()
# self._check_all_sensors_ready()
# subscribers
# rospy.Subscriber("/odom", Odometry, self._odom_callback)
# publishers
self._cmd_vel_pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
rospy.logdebug("Finished Wanderer init...")
# super(Wanderer, self).__init__()
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
def move(self):
self._check_publishers_connection()
rate = rospy.Rate(100)
for _ in range(10):
self._cmd_vel_pub.publish(self.cmd_vel)
rospy.logdebug("cmd_vel --> \nlinear:{} \nangular: {}".format(self.cmd_vel.linear.x, self.cmd_vel.angular.z))
rate.sleep()
def self_test(self):
"""
Moves Wanderer forward for 2 seconds
backwards for 2 seconds
Spin Wanderer counter-clockwise for 2 seceonds
clockwise for 2 seconds
Move Wanderer towards northwest for 2 seconds
southeast for 2 seceonds
northeast for 2 seconds
southwest for 2 seconds
"""
rospy.logdebug("Start self testing...")
self._check_publishers_connection()
# move forward
self.cmd_vel.linear.x = 0.4
self.cmd_vel.angular.z = 0
for _ in range(20):
self.move()
rospy.logdebug("Moving straight forward @ speed: {}".format(self.cmd_vel))
# move backward
self.cmd_vel.linear.x = -0.4
self.cmd_vel.angular.z = 0
for _ in range(20):
self.move()
rospy.logdebug("Moving straight backward @ speed: {}".format(self.cmd_vel))
# spin counter-clockwise
self.cmd_vel.linear.x = 0
self.cmd_vel.angular.z = np.pi/4
for _ in range(20):
self.move()
rospy.logdebug("Spinning counter-clockwise @ speed: {}".format(self.cmd_vel))
# spin clockwise
self.cmd_vel.linear.x = 0
self.cmd_vel.angular.z = -np.pi/4
for _ in range(20):
self.move()
rospy.logdebug("Spinning clockwise @ speed: {}".format(self.cmd_vel))
# move northwest
self.cmd_vel.linear.x = .4
self.cmd_vel.angular.z = np.pi/4
for _ in range(20):
self.move()
rospy.logdebug("Heading northwest @ speed: {}".format(self.cmd_vel))
# move southeast
self.cmd_vel.linear.x = -.4
self.cmd_vel.angular.z = -np.pi/4
for _ in range(20):
self.move()
rospy.logdebug("Backing southeast @ speed: {}".format(self.cmd_vel))
# move northeast
self.cmd_vel.linear.x = .4
self.cmd_vel.angular.z = -np.pi/4
for _ in range(20):
self.move()
rospy.logdebug("Heading northeast @ speed: {}".format(self.cmd_vel))
# move southwest
self.cmd_vel.linear.x = -.4
self.cmd_vel.angular.z = np.pi/4
for _ in range(20):
self.move()
rospy.logdebug("Backing southwest @ speed: {}".format(self.cmd_vel))
rospy.logdebug("Self-test done!!!")
def clean_shutdown(self):
print("\n\nTurning off the wanderer...")
self._cmd_vel_pub.publish(self.stop_cmd)
return True
|
nilq/baby-python
|
python
|
"""Quantum Inspire library
Copyright 2019 QuTech Delft
qilib is available under the [MIT open-source license](https://opensource.org/licenses/MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from setuptools import setup
def get_version_number(module: str) -> str:
""" Extract the version number from the source code.
Pass the source module that contains the version.py file.
This version number will be returned as a string.
Args:
module: module containing the version.py file
Returns:
the version number.
"""
with open(f'src/{module}/version.py') as f:
content = f.read()
return content.split('\'')[1]
def get_long_description() -> str:
""" Extract the long description from the README file."""
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
return long_description
setup(name='qilib',
description='Quantum Library for the Quantum Inspire platform',
long_description=get_long_description(),
long_description_content_type='text/markdown',
version=get_version_number('qilib'),
author='QuantumInspire',
python_requires='>=3.7',
package_dir={'': 'src'},
packages=['qilib', 'qilib.configuration_helper', 'qilib.configuration_helper.adapters',
'qilib.data_set', 'qilib.utils', 'qilib.utils.storage'],
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'],
license='Other/Proprietary License',
install_requires=['spirack>=0.1.8', 'numpy', 'serialize', 'zhinst', 'pymongo',
'requests', 'qcodes', 'qcodes_contrib_drivers', 'dataclasses-json'],
extras_require={
'dev': ['pytest>=3.3.1', 'coverage>=4.5.1', 'mongomock==3.20.0', 'mypy', 'pylint'],
})
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
from datetime import datetime
import os
import tensorflow as tf
def batch_size_from_env(default=1):
"""Get batch size from environment variable SALUS_BATCH_SIZE"""
try:
return int(os.environ.get('SALUS_BATCH_SIZE', ''))
except ValueError:
return default
def iteration_num_from_env(default=20):
"""Get iteration number from environment variable EXEC_ITER_NUMBER"""
try:
num = int(os.getenv('EXEC_ITER_NUMBER', default=''))
return num
except ValueError:
return default
@contextmanager
def initialized_scope(sess):
"""Initialize and start queue runners for session"""
sess.run(initialize_op())
coord = tf.train.Coordinator()
queue_threads = tf.train.start_queue_runners(sess, coord)
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
print(qr.name)
print("{}: Session initialized".format(datetime.now()))
yield coord
coord.request_stop()
coord.join(queue_threads)
def initialize_op():
"""Operation to initialize global and local variables"""
if hasattr(tf, 'global_variables_initializer'):
# tensorflow 0.12
return tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
else:
return tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
def global_variables():
if hasattr(tf, 'global_variables'):
return tf.global_variables()
else:
return tf.all_variables()
def image_summary(*args, **kwargs):
if hasattr(tf.summary, 'image'):
return tf.summary.image(*args, **kwargs)
else:
return tf.image_summary(*args, **kwargs)
def scalar_summary(*args, **kwargs):
if hasattr(tf.summary, 'scalar'):
return tf.summary.scalar(*args, **kwargs)
else:
return tf.scalar_summary(*args, **kwargs)
def histogram_summary(*args, **kwargs):
if hasattr(tf.summary, 'histogram'):
return tf.summary.histogram(*args, **kwargs)
else:
return tf.histogram_summary(*args, **kwargs)
def merge_all_summaries(*args, **kwargs):
if hasattr(tf.summary, 'merge_all'):
return tf.summary.merge_all(*args, **kwargs)
else:
return tf.merge_all_summaries(*args, **kwargs)
def image_standardization(image):
if hasattr(tf.image, 'per_image_standardization'):
return tf.image.per_image_standardization(image)
else:
return tf.image.per_image_whitening(image)
|
nilq/baby-python
|
python
|
import logging
import redis
import time
import iloghub
iloghub = iloghub.LogHub()
iloghub.config()
# create logger
logger = logging.getLogger('simple_example')
#formater = logging.Formatter(style=" %(message)s")
fmt = "%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s"
datefmt = "%H:%M:%S"
formatter = logging.Formatter(fmt, datefmt)
pool = redis.ConnectionPool(host='10.8.3.51', port=6379, db=0)
r = redis.Redis(connection_pool=pool)
r.execute_command("AUTH", "12345678901234567890")
# 发布消息例子
# logging.basicConfig(level=logging.DEBUG,
# format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
# datefmt='%H:%M:%S',
# filename='D:/test1/test.log',
# filemode='w')
class MyHandle(logging.Handler):
def emit(self,record):
lineLog = self.formatter.format(record)
r.publish('hyp-dev.test', lineLog)
print(lineLog)
class MyFilter(logging.Filter):
def filter(self, record):
print("filter:" +record.msg)
myFilter = MyFilter()
handle = MyHandle()
handle.setFormatter(formatter)
# add ch to logger
logger.addHandler(handle)
#logger.addFilter(myFilter)
# 'application' code
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
for i in range(100):
logger.critical('critical message')
time.sleep(1)
|
nilq/baby-python
|
python
|
from os.path import exists
import speech_recognition as sr
import mss
import numpy as np
import os
from PIL import Image
path, dirs, files = next(os.walk("D:/Document/3INFO/BDD/Demon/"))
monitor =2
i = len(files)
import glob
def record_volume(path,i):
fichier=open(path[0:-3]+".txt","a")
r = sr.Recognizer()
with sr.Microphone(device_index = 3) as source:
print('.')
r.adjust_for_ambient_noise(source, duration=0.5) #
print('...')
audio = r.listen(source)
print('.')
try:
query = r.recognize_google(audio, language = 'fr-FR')
text = query.lower()
fichier.write(text+"\n")
fichier.close()
print(f' : {text}')
rename(path,i)
except:
print('Error')
rename(path,i)
def repartition(filename):
image_file = Image.open(filename)
nb = image_file.convert('1')
tab = np.array(nb.getdata())
nt = tab.size
n1 = np.count_nonzero(tab == tab.max())
return n1 / nt
def rename(path,i):
with mss.mss() as mss_instance:
mss_instance.shot(mon=2, output=path[0:-4]+"screen"+str(i)+"bis.png")
if repartition(path+"screen"+str(i)+".png")-repartition(path+"screen"+str(i)+"bis.png")>-0.000001:
print("if")
os.remove(path+"screen"+str(i)+".png")
os.rename(path+"screen"+str(i)+"bis.png", path)
record_volume(path,i)
else:
with mss.mss() as mss_instance:
mss_instance.shot(mon=2, output=path+"screen"+str(i+1)+".png")
record_volume(path,i+1)
# with mss.mss() as mss_instance:
# mss_instance.shot(mon=2, output=path)
# record_volume(path,i)
from pptx import Presentation
from pptx.util import Inches
def compile():
image=glob.glob(path+"*png")
print(image)
data=glob.glob(path+"*txt")
print(data)
prs = Presentation()
blank_slide_layout = prs.slide_layouts[6]
for img_path in image:
slide = prs.slides.add_slide(blank_slide_layout)
left = top = Inches(0)
pic = slide.shapes.add_picture(img_path, left, top,height=Inches(10))
notes_slide = slide.notes_slide
text_frame = notes_slide.notes_text_frame
if exists(img_path[0:-3]+".txt"):
text_frame.text = open(img_path[0:-3]+".txt","r").read()
prs.save('test.pptx')
def start():
with mss.mss() as mss_instance:
mss_instance.shot(mon=monitor, output=path)
record_volume(path,i)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""SymptomSuggestion.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1TCme3BRC34OqIgLUca6GkivYK-1HFs-j
"""
# !git clone https://github.com/rahul15197/Disease-Detection-based-on-Symptoms
# cd Disease-Detection-based-on-Symptoms
"""# **Disease Detection using Symptoms and Treatment recommendation**
This notebook contains code to detect disease using the symptoms entered and selected by the user and recommends the appropriate treatments.
"""
# Predicts diseases based on the symptoms entered and selected by the user.
# importing all necessary libraries
import warnings
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from sklearn.model_selection import train_test_split, cross_val_score
from statistics import mean
from nltk.corpus import wordnet
import requests
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from itertools import combinations
from time import time
from collections import Counter
import operator
# from xgboost import XGBClassifier
import math
# from Treatment import diseaseDetail
from sklearn.linear_model import LogisticRegression
import pickle
from sklearn.pipeline import make_pipeline
warnings.simplefilter("ignore")
"""Download resources required for NLTK pre-processing"""
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
"""**synonyms function** finds the synonymous terms of a symptom entered by the user.
This is necessary as the user may use a term for a symptom which may be different from the one present in dataset.
This improves the accuracy by reducing the wrong predictions even when symptoms for a disease are entered slightly different than the ones on which model is trained.
*Synonyms are searched on Thesaurus.com and NLTK Wordnet*
"""
# returns the list of synonyms of the input word from thesaurus.com (https://www.thesaurus.com/) and wordnet (https://www.nltk.org/howto/wordnet.html)
import re
from googlesearch import search
import warnings
warnings.filterwarnings("ignore")
import requests
from bs4 import BeautifulSoup
# Take input a disease and return the content of wikipedia's infobox for that specific disease
def diseaseDetail(term):
diseases=[term]
ret=term+"\n"
for dis in diseases:
# search "disease wilipedia" on google
query = dis+' wikipedia'
for sr in search(query+".co.in"):
# open wikipedia link
match=re.search(r'wikipedia',sr)
filled = 0
if match:
wiki = requests.get(sr,verify=False)
soup = BeautifulSoup(wiki.content, 'html5lib')
# Fetch HTML code for 'infobox'
info_table = soup.find("table", {"class":"infobox"})
if info_table is not None:
# Preprocess contents of infobox
for row in info_table.find_all("tr"):
data=row.find("th",{"scope":"row"})
if data is not None:
symptom=str(row.find("td"))
symptom = symptom.replace('.','')
symptom = symptom.replace(';',',')
symptom = symptom.replace('<b>','<b> \n')
symptom=re.sub(r'<a.*?>','',symptom) # Remove hyperlink
symptom=re.sub(r'</a>','',symptom) # Remove hyperlink
symptom=re.sub(r'<[^<]+?>',' ',symptom) # All the tags
symptom=re.sub(r'\[.*\]','',symptom) # Remove citation text
symptom=symptom.replace(">",">")
ret+=data.get_text()+" - "+symptom+"\n"
# print(data.get_text(),"-",symptom)
filled = 1
if filled:
break
return ret
# returns the list of synonyms of the input word from thesaurus.com (https://www.thesaurus.com/) and wordnet (https://www.nltk.org/howto/wordnet.html)
def synonyms(term):
synonyms = []
response = requests.get('https://www.thesaurus.com/browse/{}'.format(term))
soup = BeautifulSoup(response.content, "html.parser")
try:
container=soup.find('section', {'class': 'MainContentContainer'})
row=container.find('div',{'class':'css-191l5o0-ClassicContentCard'})
row = row.find_all('li')
for x in row:
synonyms.append(x.get_text())
except:
None
for syn in wordnet.synsets(term):
synonyms+=syn.lemma_names()
return set(synonyms)
# utlities for pre-processing
stop_words = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
splitter = RegexpTokenizer(r'\w+')
"""**Disease Symptom dataset** was created in a separate python program.
**Dataset scrapping** was done using **NHP website** and **wikipedia data**
Disease Combination dataset contains the combinations for each of the disease present in dataset as practically it is often observed that it is not necessary for a person to have a disease when all the symptoms are faced by the patient or the user.
*To tackle this problem, combinations are made with the symptoms for each disease.*
**This increases the size of the data exponentially and helps the model to predict the disease with much better accuracy.**
*df_comb -> Dataframe consisting of dataset generated by combining symptoms for each disease.*
*df_norm -> Dataframe consisting of dataset which contains a single row for each diseases with all the symptoms for that corresponding disease.*
**Dataset contains 261 diseases and their symptoms**
"""
# Load Dataset scraped from NHP (https://www.nhp.gov.in/disease-a-z) & Wikipedia
# Scrapping and creation of dataset csv is done in a separate program
df_comb = pd.read_csv("Health_Care_Unit/contents/dataset/dis_sym_dataset_comb.csv") # Disease combination
df_norm = pd.read_csv("Health_Care_Unit/contents/dataset/dis_sym_dataset_norm.csv") # Individual Disease
X = df_comb.iloc[:, 1:]
Y = df_comb.iloc[:, 0:1]
"""Using **Logistic Regression (LR) Classifier** as it gives better accuracy compared to other classification models as observed in the comparison of model accuracies in Model_latest.py
Cross validation is done on dataset with cv = 5
"""
# lr = LogisticRegression()
# lr = lr.fit(X, Y)
# # scores = cross_val_score(lr, X, Y, cv=5)
# pipeline_ls = make_pipeline(lemmatizer(tokenizer = RegexpTokenizer(r'[A-Za-z]+').tokenize,stop_words='english'), LogisticRegression())
# ##(r'\b(?:http|ftp)s?://\S*\w|\w+|[^\w\s]+') ([a-zA-Z]+)([0-9]+) -- these tolenizers giving me low accuray
# pickle.dump(lr,open('symptom.pkl','wb'))
X = df_norm.iloc[:, 1:]
Y = df_norm.iloc[:, 0:1]
# List of symptoms
dataset_symptoms = list(X.columns)
"""# Symptoms initially taken from user."""
def take_input(symptoms):
# Taking symptoms from user as input
user_symptoms = symptoms.lower().split(',')
# Preprocessing the input symptoms
processed_user_symptoms=[]
for sym in user_symptoms:
sym=sym.strip()
sym=sym.replace('-',' ')
sym=sym.replace("'",'')
sym = ' '.join([lemmatizer.lemmatize(word) for word in splitter.tokenize(sym)])
processed_user_symptoms.append(sym)
"""Pre-processing on symptoms entered by user is done."""
# Taking each user symptom and finding all its synonyms and appending it to the pre-processed symptom string
user_symptoms = []
for user_sym in processed_user_symptoms:
user_sym = user_sym.split()
str_sym = set()
for comb in range(1, len(user_sym)+1):
for subset in combinations(user_sym, comb):
subset=' '.join(subset)
subset = synonyms(subset)
str_sym.update(subset)
str_sym.add(' '.join(user_sym))
user_symptoms.append(' '.join(str_sym).replace('_',' '))
# query expansion performed by joining synonyms found for each symptoms initially entered
# print("After query expansion done by using the symptoms entered")
# print(user_symptoms)
"""The below procedure is performed in order to show the symptom synonmys found for the symptoms entered by the user.
The symptom synonyms and user symptoms are matched with the symptoms present in dataset. Only the symptoms which matches the symptoms present in dataset are shown back to the user.
"""
# Loop over all the symptoms in dataset and check its similarity score to the synonym string of the user-input
# symptoms. If similarity>0.5, add the symptom to the final list
found_symptoms = set()
for idx, data_sym in enumerate(dataset_symptoms):
data_sym_split=data_sym.split()
for user_sym in user_symptoms:
count=0
for symp in data_sym_split:
if symp in user_sym.split():
count+=1
if count/len(data_sym_split)>0.5:
found_symptoms.add(data_sym)
found_symptoms = list(found_symptoms)
"""## **Prompt the user to select the relevant symptoms by entering the corresponding indices.**"""
# Print all found symptoms
value = "Top matching symptoms from your search!\n"
for idx, symp in enumerate(found_symptoms):
value += str(idx)+":"+str(symp)+"\n"
return value, found_symptoms
def co_occur(rel_symps, found_symptoms):
# Show the related symptoms found in the dataset and ask user to select among them
select_list = rel_symps.split()
# Find other relevant symptoms from the dataset based on user symptoms based on the highest co-occurance with the
# ones that is input by the user
dis_list = set()
final_symp = []
counter_list = []
for idx in select_list:
symp=found_symptoms[int(idx)]
final_symp.append(symp)
dis_list.update(set(df_norm[df_norm[symp]==1]['label_dis']))
for dis in dis_list:
row = df_norm.loc[df_norm['label_dis'] == dis].values.tolist()
row[0].pop(0)
for idx,val in enumerate(row[0]):
if val!=0 and dataset_symptoms[idx] not in final_symp:
counter_list.append(dataset_symptoms[idx])
"""## To find symptoms which generally co-occur, for example with symptoms like cough, headache generally happens hence they co-occur."""
# Symptoms that co-occur with the ones selected by user
dict_symp = dict(Counter(counter_list))
dict_symp_tup = sorted(dict_symp.items(), key=operator.itemgetter(1),reverse=True)
#print(dict_symp_tup)
"""## User is presented with a list of co-occuring symptoms to select from and is performed iteratively to recommend more possible symptoms based on the similarity to the previously entered symptoms.
As the co-occuring symptoms can be in overwhelming numbers, only the top 5 are recommended to the user from which user can select the symptoms.
If user does not have any of those 5 symptoms and wants to see the next 5, he can do so by giving input as -1.
To stop the recommendation, user needs to give input as "No".
"""
found_symptoms=[]
count=0
value = "Common co-occuring symptoms:\n"
for tup in dict_symp_tup:
count+=1
found_symptoms.append(tup[0])
if count==len(dict_symp_tup):
for idx,ele in enumerate(found_symptoms):
value += str(idx)+":"+str(ele)+"\n"
break
return value, dict_symp_tup, final_symp
def final_pred(extra_symp, dict_symp_tup, final_symp):
# Iteratively, suggest top co-occuring symptoms to the user and ask to select the ones applicable
found_symptoms=[]
count=0
for tup in dict_symp_tup:
count+=1
found_symptoms.append(tup[0])
if count==len(dict_symp_tup):
select_list = extra_symp.split()
if select_list[0]=='no':
break
if select_list[0]=='-1':
found_symptoms = []
continue
for idx in select_list:
final_symp.append(found_symptoms[int(idx)])
found_symptoms = []
"""Final Symptom list"""
# Create query vector based on symptoms selected by the user
# print("\nFinal list of Symptoms that will be used for prediction:")
sample_x = [0 for x in range(0,len(dataset_symptoms))]
for val in final_symp:
# print(val)
sample_x[dataset_symptoms.index(val)]=1
"""Prediction of disease is done"""
# Predict disease
# lr = LogisticRegression()
# lr = lr.fit(X, Y)
loaded_model = pickle.load(open('Health_Care_Unit/contents/symptom.pkl', 'rb'))
prediction = loaded_model.predict_proba([sample_x])
"""Show top k diseases and their probabilities to the user.
K in this case is 10
"""
k = 10
diseases = list(set(Y['label_dis']))
diseases.sort()
topk = prediction[0].argsort()[-k:][::-1]
"""# **Showing the list of top k diseases to the user with their prediction probabilities.**
# **For getting information about the suggested treatments, user can enter the corresponding index to know more details.**
"""
value = "Top 10 diseases predicted based on symptoms\n"
topk_dict = {}
# Show top 10 highly probable disease to the user.
for idx,t in enumerate(topk):
match_sym=set()
row = df_norm.loc[df_norm['label_dis'] == diseases[t]].values.tolist()
row[0].pop(0)
for idx,val in enumerate(row[0]):
if val!=0:
match_sym.add(dataset_symptoms[idx])
prob = (len(match_sym.intersection(set(final_symp)))+1)/(len(set(final_symp))+1)
# prob *= mean(scores)
topk_dict[t] = prob
j = 0
topk_index_mapping = {}
topk_sorted = dict(sorted(topk_dict.items(), key=lambda kv: kv[1], reverse=True))
for key in topk_sorted:
prob = topk_sorted[key]*100
value += str(j) + " Disease name:"+str(diseases[key])+ "\tProbability:"+str(round(prob, 2))+"%"+"\n"
topk_index_mapping[j] = key
j += 1
return value, topk_index_mapping, diseases
def more_dat(more, topk_index_mapping, diseases):
if more!='-1':
dis=diseases[topk_index_mapping[int(more)]]
value = str(diseaseDetail(dis))
return value
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
"""
TXFMTrackService
(C) 2015
David Rieger
"""
import bottle
from bottle import route, run, response
from storagemanager import StorageManager
sm = StorageManager()
@route('/api/get/all')
def get_all_songs():
response.headers['Access-Control-Allow-Origin'] = '*'
return sm.get_songs()
@route('/api/get/time/<time>')
def get_song(time):
response.headers['Access-Control-Allow-Origin'] = '*'
return sm.get_songs(time=time, scope=20)
@route('/api/get/text/<text>')
def get_song(text):
response.headers['Access-Control-Allow-Origin'] = '*'
return sm.get_songs(text=text)
@route('/api/full/time/<time>')
def get_song(time):
response.headers['Access-Control-Allow-Origin'] = '*'
return sm.get_songs(time=time, scope=20, cache_only=False)
@route('/api/full/text/<text>')
def get_song(text):
response.headers['Access-Control-Allow-Origin'] = '*'
return sm.get_songs(text=text, cache_only=False)
app = bottle.default_app()
if __name__ == '__main__':
run(host="localhost", port=8080)
|
nilq/baby-python
|
python
|
from domain.Contest.database.contest_repository import ContestRepository
from domain.Contest.usecase.contest_interactor import ContestInteractor
from infrastructure.database.postgres.sqlhandler import SqlHandler
class ContestController:
def __init__(self, sqlhandler: SqlHandler):
self.interactor = ContestInteractor(ContestRepository(sqlhandler))
async def contests(self, req, resp):
contests = []
for contest in self.interactor.contests():
contests.append(contest.as_json())
resp.media = {"contests": contests}
resp.status_code = 200
async def contest(self, req, resp, *, contest_id):
contest = self.interactor.contest(contest_id)
if contest is None:
res_data = None
res_code = 400
else:
res_data = contest.as_json()
res_code = 200
resp.media = {"contest": res_data}
resp.status_code = res_code
|
nilq/baby-python
|
python
|
# dht11_serial.py - print humidity and temperature using DHT11 sensor
# (c) BotBook.com - Karvinen, Karvinen, Valtokari
import time
import serial # <1>
def main():
port = serial.Serial("/dev/ttyACM0", baudrate=115200, timeout=None) # <2>
while True:
line = port.readline() # <3>
arr = line.split() # <4>
if len(arr) < 3: # <5>
continue # <6>
dataType = arr[2]
data = float(arr[1]) # <7>
if dataType == '%':
print("Humidity: %.1f %%" % data)
else:
print("Temperature: %.1f C" % data)
time.sleep(0.01)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
corruptionValues = {
"Glimpse_of_Clarity_1": 15,
"Crit_DMG_1": 10,
"Crit_DMG_2": 15,
"Crit_DMG_3": 20,
"Flash_of_Insight_1": 20,
"Lash_of_the_Void_1": 25,
"Percent_Crit_1": 10,
"Percent_Crit_2": 15,
"Percent_Crit_3": 20,
"Percent_Haste_1": 10,
"Percent_Haste_2": 15,
"Percent_Haste_3": 20,
"Percent_Vers_1": 10,
"Percent_Vers_2": 15,
"Percent_Vers_3": 20,
"Percent_Mast_1": 10,
"Percent_Mast_2": 15,
"Percent_Mast_3": 20,
"Crit_Proc_1": 15,
"Crit_Proc_2": 20,
"Crit_Proc_3": 35,
"Haste_Proc_1": 15,
"Haste_Proc_2": 20,
"Haste_Proc_3": 35,
"Versatility_Proc_1": 15,
"Versatility_Proc_2": 20,
"Versatility_Proc_3": 35,
"Mastery_Proc_1": 15,
"Mastery_Proc_2": 20,
"Mastery_Proc_3": 35,
"Echoing_Void_1": 25,
"Echoing_Void_2": 35,
"Echoing_Void_3": 60,
"Infinite_Star_1": 20,
"Infinite_Star_2": 50,
"Infinite_Star_3": 75,
"Ineffable_Truth_1": 12,
"Ineffable_Truth_2": 30,
"Twilight_Devastation_1": 25,
"Twilight_Devastation_2": 50,
"Twilight_Devastation_3": 75,
"Twisted_Appendage_1": 15,
"Twisted_Appendage_2": 35,
"Twisted_Appendage_3": 66,
"Void_Ritual_1": 15,
"Void_Ritual_2": 35,
"Void_Ritual_3": 66,
"Gushing_Wound_1": 15
}
|
nilq/baby-python
|
python
|
from enum import Enum, auto
from fastapi import Request
from fastapi.responses import JSONResponse
class ErrCode(Enum):
NO_ERROR = 0
EMAIL_DUPLICATED = auto()
NO_ITEM = auto()
ErrDict = {
ErrCode.NO_ERROR: "정상",
ErrCode.EMAIL_DUPLICATED: "동일한 이메일이 존재합니다.",
ErrCode.NO_ITEM: "해당 항목이 존재하지 않습니다. ",
}
class ResError(Exception):
status_code = 0
err_code = ErrCode.NO_ERROR
def __init__(self, status_code: int, err_code: ErrCode):
self.status_code = status_code
self.err_code = err_code
def init_app(app):
@app.exception_handler(ResError)
async def exception_handler(request: Request, err: ResError):
content = {
"err_code": err.err_code.name,
"detail": ErrDict[err.err_code],
}
return JSONResponse(
status_code=err.status_code,
content=content,
)
|
nilq/baby-python
|
python
|
import numpy as np
from sklearn.model_selection import TimeSeriesSplit
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
import backtrader as bt
import backtrader.indicators as btind
import datetime as dt
import pandas as pd
import pandas_datareader as web
from pandas import Series, DataFrame
import random
from copy import deepcopy
class TimeSeriesSplitImproved(TimeSeriesSplit):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide `.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
>>> for train_index, test_index in tscv.split(X, fixed_length=True):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [1] TEST: [2]
TRAIN: [2] TEST: [3]
>>> for train_index, test_index in tscv.split(X, fixed_length=True,
... train_splits=2):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0 1] TEST: [2]
TRAIN: [1 2] TEST: [3]
Notes
-----
When ``fixed_length`` is ``False``, the training set has size
``i * train_splits * n_samples // (n_splits + 1) + n_samples %
(n_splits + 1)`` in the ``i``th split, with a test set of size
``n_samples//(n_splits + 1) * test_splits``, where ``n_samples``
is the number of samples. If fixed_length is True, replace ``i``
in the above formulation with 1, and ignore ``n_samples %
(n_splits + 1)`` except for the first training set. The number
of test sets is ``n_splits + 2 - train_splits - test_splits``.
"""
def split(self, X, y=None, groups=None, fixed_length=False,
train_splits=1, test_splits=1):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
fixed_length : bool, hether training sets should always have
common length
train_splits : positive int, for the minimum number of
splits to include in training sets
test_splits : positive int, for the number of splits to
include in the test set
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
train_splits, test_splits = int(train_splits), int(test_splits)
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
if (n_folds - train_splits - test_splits) < 0 and (test_splits > 0):
raise ValueError(
("Both train_splits and test_splits must be positive"
" integers."))
indices = np.arange(n_samples)
split_size = (n_samples // n_folds)
test_size = split_size * test_splits
train_size = split_size * train_splits
test_starts = range(train_size + n_samples % n_folds,
n_samples - (test_size - split_size),
split_size)
if fixed_length:
for i, test_start in zip(range(len(test_starts)),
test_starts):
rem = 0
if i == 0:
rem = n_samples % n_folds
yield (indices[(test_start - train_size - rem):test_start],
indices[test_start:test_start + test_size])
else:
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class SMAC(bt.Strategy):
"""A simple moving average crossover strategy; crossing of a fast and slow moving average generates buy/sell
signals"""
params = {"fast": 20, "slow": 50, # The windows for both fast and slow moving averages
"optim": False, "optim_fs": (20, 50)} # Used for optimization; equivalent of fast and slow, but a tuple
# The first number in the tuple is the fast MA's window, the
# second the slow MA's window
def __init__(self):
"""Initialize the strategy"""
self.fastma = dict()
self.slowma = dict()
self.regime = dict()
if self.params.optim: # Use a tuple during optimization
self.params.fast, self.params.slow = self.params.optim_fs # fast and slow replaced by tuple's contents
if self.params.fast > self.params.slow:
raise ValueError(
"A SMAC strategy cannot have the fast moving average's window be " + \
"greater than the slow moving average window.")
for d in self.getdatanames():
# The moving averages
self.fastma[d] = btind.SimpleMovingAverage(self.getdatabyname(d), # The symbol for the moving average
period=self.params.fast, # Fast moving average
plotname="FastMA: " + d)
self.slowma[d] = btind.SimpleMovingAverage(self.getdatabyname(d), # The symbol for the moving average
period=self.params.slow, # Slow moving average
plotname="SlowMA: " + d)
# Get the regime
self.regime[d] = self.fastma[d] - self.slowma[d] # Positive when bullish
def next(self):
"""Define what will be done in a single step, including creating and closing trades"""
for d in self.getdatanames(): # Looping through all symbols
pos = self.getpositionbyname(d).size or 0
if pos == 0: # Are we out of the market?
# Consider the possibility of entrance
# Notice the indexing; [0] always mens the present bar, and [-1] the bar immediately preceding
# Thus, the condition below translates to: "If today the regime is bullish (greater than
# 0) and yesterday the regime was not bullish"
if self.regime[d][0] > 0 and self.regime[d][-1] <= 0: # A buy signal
self.buy(data=self.getdatabyname(d))
else: # We have an open position
if self.regime[d][0] <= 0 and self.regime[d][-1] > 0: # A sell signal
self.sell(data=self.getdatabyname(d))
class PropSizer(bt.Sizer):
"""A position sizer that will buy as many stocks as necessary for a certain proportion of the portfolio
to be committed to the position, while allowing stocks to be bought in batches (say, 100)"""
params = {"prop": 0.1, "batch": 100}
def _getsizing(self, comminfo, cash, data, isbuy):
"""Returns the proper sizing"""
if isbuy: # Buying
target = self.broker.getvalue() * self.params.prop # Ideal total value of the position
price = data.close[0]
shares_ideal = target / price # How many shares are needed to get target
batches = int(shares_ideal / self.params.batch) # How many batches is this trade?
shares = batches * self.params.batch # The actual number of shares bought
if shares * price > cash:
return 0 # Not enough money for this trade
else:
return shares
else: # Selling
return self.broker.getposition(data).size # Clear the position
class AcctValue(bt.Observer):
alias = ('Value',)
lines = ('value',)
plotinfo = {"plot": True, "subplot": True}
def next(self):
self.lines.value[0] = self._owner.broker.getvalue() # Get today's account value (cash + stocks)
class AcctStats(bt.Analyzer):
"""A simple analyzer that gets the gain in the value of the account; should be self-explanatory"""
def __init__(self):
self.start_val = self.strategy.broker.get_value()
self.end_val = None
def stop(self):
self.end_val = self.strategy.broker.get_value()
def get_analysis(self):
return {"start": self.start_val, "end": self.end_val,
"growth": self.end_val - self.start_val, "return": self.end_val / self.start_val}
start = dt.datetime(2018, 1, 1)
end = dt.datetime(2020, 10, 31)
# Different stocks from past posts because of different data source (no plot for NTDOY)
symbols = ["BTC-USD", "ETH-USD", "BNB-USD"]
datafeeds = {s: web.DataReader(s, "yahoo", start, end) for s in symbols}
for df in datafeeds.values():
df["OpenInterest"] = 0 # PandasData reader expects an OpenInterest column;
# not provided by Google and we don't use it so set to 0
cerebro = bt.Cerebro(stdstats=False)
plot_symbols = ["BTC-USD", "ETH-USD", "BNB-USD"]
is_first = True
# plot_symbols = []
for s, df in datafeeds.items():
data = bt.feeds.PandasData(dataname=df, name=s)
if s in plot_symbols:
if is_first:
data_main_plot = data
is_first = False
else:
data.plotinfo.plotmaster = data_main_plot
else:
data.plotinfo.plot = False
cerebro.adddata(data) # Give the data to cerebro
cerebro.broker.setcash(1000000)
cerebro.broker.setcommission(0.02)
cerebro.addstrategy(SMAC)
cerebro.addobserver(AcctValue)
cerebro.addobservermulti(bt.observers.BuySell) # Plots up/down arrows
cerebro.addsizer(PropSizer)
cerebro.addanalyzer(AcctStats)
cerebro.run()
|
nilq/baby-python
|
python
|
import argparse
parser = argparse.ArgumentParser(prog='build_snp_map_for_neale_lab_gwas.py', description='''
Build the SNP map table: phased genotype variant <=> Neale's lab GWAS
''')
parser.add_argument('--genotype-pattern', help='''
In the form: prefix{chr}suffix.
Will load 1..22 chromosomes (no X).
''')
parser.add_argument('--genotype-sample', help='''
The corresponding sample file
''')
parser.add_argument('--output', help='''
File name of output (if not exists, it will be created)
''')
parser.add_argument('--gwas', help='''
Neale's lab GWAS (one GWAS as example,
they all share the same variant set)
''')
args = parser.parse_args()
import logging, time, sys
# configing util
logging.basicConfig(
level = logging.INFO,
stream = sys.stderr,
format = '%(asctime)s %(message)s',
datefmt = '%Y-%m-%d %I:%M:%S %p'
)
import bgen_reader
import pandas as pd
import helper
logging.info('Loading GWAS')
gwas = pd.read_csv(args.gwas, header=0, sep= '\t', compression='gzip')
map_table = pd.DataFrame()
for i in range(1, 23):
i = str(i)
logging.info(f'Processing chr{i}: Loading BGEN')
bgen = bgen_reader.read_bgen(
args.genotype_pattern.format(chr=i),
samples_filepath = args.genotype_sample
)
logging.info(f'Processing chr{i}: Loading variant table')
variant = bgen["variants"].compute()
variant['chrom'] = i
logging.info(f'Processing chr{i}: Building variant ID candidates')
variant['allele_1st'] = variant['allele_ids'].apply(lambda x: x.split(',')[0])
variant['allele_2nd'] = variant['allele_ids'].apply(lambda x: x.split(',')[1])
variant['varid1'] = variant[['chrom', 'pos', 'allele_1st', 'allele_2nd']].apply(lambda x: helper.make_id(x), axis=1)
variant['varid2'] = variant[['chrom', 'pos', 'allele_2nd', 'allele_1st']].apply(lambda x: helper.make_id(x), axis=1)
logging.info(f'Processing chr{i}: Running checker')
variant_check = helper.join_with_varid(
variant['varid1'],
variant['varid2'],
gwas['variant']
)
variant = pd.merge(variant, variant_check, left_on=['varid1', 'varid2'], right_on=['id1', 'id2'], how='left')
map_table = pd.concat([map_table, variant[['chrom', 'pos', 'allele_ids', 'id', 'rsid', 'assigned_id', 'assigned_sign']]])
# save
logging.info('Saving the results')
map_table.to_csv(args.output, compression='gzip', sep='\t', index = None)
|
nilq/baby-python
|
python
|
def hello_world():
return "hi"
|
nilq/baby-python
|
python
|
import filecmp
import os
import subprocess
import unittest
from clockwork import gvcf
from cluster_vcf_records import vcf_record
modules_dir = os.path.dirname(os.path.abspath(gvcf.__file__))
data_dir = os.path.join(modules_dir, "tests", "data", "gvcf")
def lines_from_vcf_ignore_file_date(vcf):
with open(vcf) as f:
return [x for x in f if not x.startswith("##fileDate=")]
class TestGvcf(unittest.TestCase):
def test_move_info_fields_to_format(self):
record = vcf_record.VcfRecord(
"ref\t1\t.\tC\tG\t.\t.\tfoo=bar;spam=eggs\tcleese\tchapman"
)
gvcf._move_info_fields_to_format(record)
assert record.INFO == {}
assert record.FORMAT == {"foo": "bar", "spam": "eggs", "cleese": "chapman"}
def test_gvcf_from_minos_vcf_and_samtools_gvcf(self):
ref_fasta = os.path.join(
data_dir, "gvcf_from_minos_vcf_and_samtools_gvcf.ref.fa"
)
minos_vcf = os.path.join(
data_dir, "gvcf_from_minos_vcf_and_samtools_gvcf.minos.vcf"
)
samtools_vcf = os.path.join(
data_dir, "gvcf_from_minos_vcf_and_samtools_gvcf.samtools.vcf"
)
tmp_out = "tmp.gvcf_from_minos_vcf_and_samtools_gvcf.out.vcf"
subprocess.check_output(f"rm -f {tmp_out}", shell=True)
gvcf.gvcf_from_minos_vcf_and_samtools_gvcf(
ref_fasta, minos_vcf, samtools_vcf, tmp_out
)
expect_lines = lines_from_vcf_ignore_file_date(
os.path.join(data_dir, "gvcf_from_minos_vcf_and_samtools_gvcf.out.vcf")
)
got_lines = lines_from_vcf_ignore_file_date(tmp_out)
self.assertEqual(expect_lines, got_lines)
os.unlink(tmp_out)
def test_samtools_vcf_record_to_frs(self):
record = vcf_record.VcfRecord(
"ref\t1\t.\tC\tG\t.\t.\tCALLER=samtools\tDP4\t1,2,14,13"
)
self.assertEqual(gvcf._samtools_vcf_record_to_frs(record, 0), 0.1)
self.assertEqual(gvcf._samtools_vcf_record_to_frs(record, 1), 0.9)
def test_vcf_record_pass_index(self):
record = vcf_record.VcfRecord(
"ref\t1\t.\tC\tG\t.\t.\tCALLER=samtools\tGT:DP:DP4\t1/1:20:1,2,14,13"
)
self.assertEqual(1, gvcf._vcf_record_pass_index(record, min_frs=0.9, min_dp=5))
self.assertEqual(
None, gvcf._vcf_record_pass_index(record, min_frs=0.9, min_dp=21)
)
self.assertEqual(
None, gvcf._vcf_record_pass_index(record, min_frs=0.99, min_dp=5)
)
record = vcf_record.VcfRecord(
"ref\t1\t.\tC\tG\t.\tPASS\tCALLER=minos\tGT:DP:FRS\t1/1:20:0.95"
)
self.assertEqual(1, gvcf._vcf_record_pass_index(record))
self.assertEqual(
1, gvcf._vcf_record_pass_index(record, require_minos_pass=False)
)
self.assertEqual(None, gvcf._vcf_record_pass_index(record, min_frs=0.96))
self.assertEqual(None, gvcf._vcf_record_pass_index(record, min_dp=21))
record = vcf_record.VcfRecord(
"ref\t1\t.\tC\tG\t.\tFAIL\tCALLER=minos\tGT:DP:FRS\t1/1:20:0.95"
)
self.assertEqual(None, gvcf._vcf_record_pass_index(record))
self.assertEqual(
1, gvcf._vcf_record_pass_index(record, require_minos_pass=False)
)
self.assertEqual(
None,
gvcf._vcf_record_pass_index(record, require_minos_pass=False, min_frs=0.96),
)
self.assertEqual(
None,
gvcf._vcf_record_pass_index(record, require_minos_pass=False, min_dp=21),
)
def test_gvcf_to_fasta(self):
vcf = os.path.join(data_dir, "gvcf_to_fasta.vcf")
tmp_out = "tmp.gvcf_to_fasta.fa"
subprocess.check_output(f"rm -f {tmp_out}", shell=True)
gvcf.gvcf_to_fasta(vcf, tmp_out)
expect_fasta = os.path.join(data_dir, "gvcf_to_fasta.fa")
self.assertTrue(filecmp.cmp(tmp_out, expect_fasta, shallow=False))
os.unlink(tmp_out)
|
nilq/baby-python
|
python
|
from torch.utils.data import dataloader
from torchvision.models.inception import inception_v3
from inception_v4 import inceptionv4
import torch
import torch.distributed as dist
import argparse
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torchvision.models import resnet
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import os
from utils import *
import time
model_names = ['alexnet', 'inception_v3',
'resnet50', 'resnet152', 'vgg16', 'inception_v4']
parser = argparse.ArgumentParser(
description="Pytorch imagenet distributed training")
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='alexnet',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: alexnet)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=1, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N',
help='mini-batch size (default: 64), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:7890', type=str,
help='url used to set up distributed training')
parser.add_argument('-p', '--print-freq', default=5, type=int,
metavar='N', help='print frequency (default: 5)')
parser.add_argument('--fast', action='store_true',
help='if setted, run only 100 mini batches.')
best_acc1 = 0
args = parser.parse_args()
def join_process_group():
print('==> Join process group')
if dist.is_available() and dist.is_nccl_available():
dist.init_process_group(
backend='nccl', init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
print('==> Process[{}] is ready.'.format(args.rank))
else:
raise RuntimeError(
"Error: Pytorch distributed framework or NCCL is unavailable.")
def main_worker():
global best_acc1
join_process_group()
# create model
if args.arch != 'inception_v4':
if args.arch != 'inception_v3':
model = models.__dict__[args.arch]()
else:
model = models.inception_v3(aux_logits=False)
else:
model = inceptionv4(num_classes=1000, pretrained=None)
device = torch.device('cuda', 0) # Set reasonable CUDA_VISIBLE_DEVICES
model = model.to(device)
# ddp
model = nn.parallel.DistributedDataParallel(model)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# model size
total_params = sum([torch.numel(p) for p in model.parameters()])
print('==> Model({}): {:.2f} MB'.format(
args.arch, total_params * 4 / (1024 * 1024)))
cudnn.benchmark = True
# data loading
print('==> Create Data Loader')
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
input_size = 224 if args.arch != 'inception_v3' else 299
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler
)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
normalize,
])
),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
# train & val iteration
print('==> Train and Val')
for epoch in range(args.start_epoch, args.epochs):
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer=optimizer, epoch=epoch, args=args)
if not args.fast:
train(train_loader=train_loader, model=model, criterion=criterion,
optimizer=optimizer, epoch=epoch, args=args)
else:
fast_test(train_loader=train_loader, model=model,
criterion=criterion, optimizer=optimizer, args=args)
def fast_test(train_loader, model, criterion, optimizer, args):
speed_meter = SpeedMerter(is_master=(dist.get_rank() == 0))
model.train()
start_time = time.time()
for i, (images, target) in enumerate(train_loader):
if i == 50:
break
images = images.cuda(0, non_blocking=True)
target = target.cuda(0, non_blocking=True)
output = model(images)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 10 == 0:
end_time = time.time()
num_images = args.batch_size * 10
speed = num_images / (end_time - start_time)
speed_meter.update(val=speed)
print('[{}/50] {} imgs/s'.format(i+1, speed))
start_time = time.time()
speed_meter.output()
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(0, non_blocking=True)
target = target.cuda(0, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
elapsed_time = time.time() - end
batch_time.update(elapsed_time)
end = time.time()
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda(0, non_blocking=True)
target = target.cuda(0, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
if __name__ == '__main__':
torch.cuda.empty_cache()
main_worker()
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.3 on 2018-11-02 08:18
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('pages', '0007_language_code'),
]
operations = [
migrations.AlterField(
model_name='media',
name='extension',
field=models.CharField(blank=True, editable=False, max_length=32),
),
migrations.AlterField(
model_name='page',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='pages.Page', verbose_name='parent'),
),
migrations.AlterField(
model_name='page',
name='redirect_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='redirected_pages', to='pages.Page', verbose_name='redirect to'),
),
migrations.AlterField(
model_name='page',
name='redirect_to_url',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='redirect to url'),
),
migrations.AlterField(
model_name='pagealias',
name='page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pages.Page', verbose_name='page'),
),
]
|
nilq/baby-python
|
python
|
from enum import Enum, unique
@unique
class BrowserType(Enum):
"""Class to define browser type, e.g. Chrome, Firefox, etc."""
CHROME = "Chrome"
EDGE = "Edge"
FIREFOX = "Firefox"
INTERNET_EXPLORER = "Internet Explorer"
OPERA = "Opera"
SAFARI = "Safari"
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""End to end test of running a job.
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
import os
# TODO(e-carlin): Tests that need to be implemented
# - agent never starts
# - agent response is bad (ex no req_id)
# - server_req is malformed
# - agent starts but we never get an incoming 'read_for_work' message
# - canceling of requests in the q and running requests
# - using only the resources that are available
# - agent sigterm -> sigkill progression
# - send kill to uknown agent
_REPORT = 'heightWeightReport'
def test_runCancel(fc):
from pykern import pkunit
from pykern.pkdebug import pkdc, pkdp, pkdlog
import time
d = fc.sr_sim_data()
d.models.simulation.name = 'srunit_long_run'
d = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=_REPORT,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
assert d.state != 'error'
if d.state == 'running':
break
time.sleep(d.nextRequestSeconds)
d = fc.sr_post('runStatus', d.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', d)
x = d.nextRequest
d = fc.sr_post(
'runCancel',
x,
)
assert d.state == 'canceled'
d = fc.sr_post(
'runStatus',
x,
)
assert d.state == 'canceled'
def test_runSimulation(fc):
from pykern import pkunit
from pykern.pkdebug import pkdp, pkdlog
from sirepo import job
import time
d = fc.sr_sim_data()
d = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=_REPORT,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
pkdlog(d)
assert d.state != 'error'
if d.state == 'completed':
break
time.sleep(d.nextRequestSeconds)
d = fc.sr_post('runStatus', d.nextRequest)
else:
pkunit.pkfail('runStatus: failed to complete: {}', d)
# Just double-check it actually worked
assert u'plots' in d
def test_remove_srw_report_dir(fc):
from pykern import pkio
from pykern import pkunit
import sirepo.srdb
m = 'intensityReport'
data = fc.sr_sim_data('NSLS-II ESM beamline')
fc.sr_run_sim(data, m)
g = pkio.sorted_glob(sirepo.srdb.root().join('user', fc.sr_uid, 'srw', '*', m))
pkunit.pkeq(1, len(g))
pkio.unchecked_remove(*g)
fc.sr_run_sim(data, m)
|
nilq/baby-python
|
python
|
n = int(input())
suma = 0
dif = []
for i in range(n):
a , b = map(int, input().split())
suma += b*(n-1)
dif.append(a-b)
dif = sorted(dif, reverse = True)
for j in range(n):
suma+= j*dif[j]
print(suma)
|
nilq/baby-python
|
python
|
"""Tools for converting model parameter from Caffe to Keras."""
import numpy as np
import os
import sys
import shutil
import h5py
import collections
import pickle
def dump_weights(model_proto, model_weights, weight_output, shape_output=None, caffe_home='~/caffe'):
"""Helper function to dump caffe model weithts in keras tf format
# Arguments
model_proto: path to the caffe model .prototxt file
model_weights: path to the caffe model .caffemodel file
weight_output: path to HDF5 output file
shape_output: path to pickle output file
# Notes
caffe requres to run the function in python 2.x
"""
def expand(path):
return os.path.abspath(os.path.expanduser(path))
caffe_home = expand(caffe_home)
model_proto = expand(model_proto)
model_weights = expand(model_weights)
#print(caffe_home + '\n' + model_proto + '\n' + model_weights + '\n' + weight_output + '\n' + shape_output )
# import caffe
sys.path.insert(0, os.path.join(caffe_home, 'python'))
import caffe
# create model
caffe.set_mode_cpu()
net = caffe.Net(model_proto, model_weights, caffe.TEST)
if os.path.exists(weight_output):
os.remove(weight_output)
f = h5py.File(weight_output, 'w')
# process the layers
layer_names = list(net._layer_names)
weights_shape = {}
for name in net.params:
layer = net.layers[layer_names.index(name)]
blobs = net.params[name]
blobs_shape = [list(b.shape) for b in blobs]
weights_shape[name] = blobs_shape
print('%-25s %-20s %-3s %s' % (name, layer.type, len(blobs), blobs_shape))
params = collections.OrderedDict()
if layer.type == 'Convolution':
W = blobs[0].data
W = W.transpose(2,3,1,0)
params[name+'_W_1:0'] = W
if len(blobs) > 1:
b = blobs[1].data
params[name+'_b_1:0'] = b
elif layer.type == 'Normalize':
gamma = blobs[0].data
params[name+'_gamma_1:0'] = gamma
elif layer.type == 'BatchNorm':
size = blobs[0].shape[0]
running_mean = blobs[0].data
running_std = blobs[1].data
gamma = np.empty(size)
gamma.fill(blobs[2].data[0])
beta = np.zeros(size)
params[name+'_gamma_1:0'] = gamma
params[name+'_beta_1:0'] = beta
params[name+'_running_mean_1:0'] = running_mean
params[name+'_running_std_1:0'] = running_std
elif layer.type == 'Scale':
gamma = blobs[0].data
beta = blobs[1].data
params[name+'_gamma_1:0'] = gamma
params[name+'_beta_1:0'] = beta
elif layer.type == 'InnerProduct':
W = blobs[0].data
W = W.T
b = blobs[1].data
params[name+'_W_1:0'] = W
params[name+'_b_1:0'] = b
else:
if len(blobs) > 0:
print('UNRECOGNISED BLOBS')
# create group and add parameters
g = f.create_group(name)
for weight_name, value in params.items():
param_dset = g.create_dataset(weight_name, value.shape, dtype=value.dtype)
if not value.shape:
# scalar
param_dset[()] = value
else:
param_dset[:] = value
g.attrs['weight_names'] = list(params.keys())
f.attrs['layer_names'] = layer_names
f.flush()
f.close()
# output model shape
if shape_output is not None:
output_shape = {}
for layer_name, blob in net.blobs.iteritems():
#print('%-40s %s' %(layer_name, str(blob.data.shape)))
output_shape[layer_name] = blob.data.shape
shape = {}
shape['output_shape'] = output_shape
shape['weights_shape'] = weights_shape
shape_output = expand(shape_output)
if os.path.exists(shape_output):
os.remove(shape_output)
with open(shape_output , 'wb') as f:
pickle.dump(shape, f, protocol=pickle.HIGHEST_PROTOCOL)
def add_missing_layers(model, input_file_name, output_file_name):
"""Helper function to add the missing keras layers in a HDF5 file
# Arguments
model: keras model
input_file_name: path to input HDF5 file
output_file_name: path to output HDF5 file
"""
shutil.copy(input_file_name, output_file_name)
f = h5py.File(output_file_name, 'r+')
# add missing layers
layer_names_model = [layer.name for layer in model.layers]
layer_names_new = []
for name in layer_names_model:
if not name in f.keys():
print('add %s' % name)
g = f.create_group(name)
g.attrs['weight_names'] = []
layer_names_new.append(name)
print('update layer_names')
f.attrs['layer_names'] = [s.encode('ascii') for s in layer_names_new]
f.flush()
f.close()
def compare_output_shape(model, shape_file):
"""Compares the output shape of the layers in caffe and keras model
# Arguments
model: keras model
shape_file: path to pickle file dumped by 'dump_weights'
"""
with open(shape_file, 'rb') as f:
shape = pickle.load(f)
#print('%-30s %-20s %-20s' % ('', 'caffe shape', 'keras shape'))
for layer in model.layers:
if layer.name in shape['output_shape']:
shape_caffe = list(shape['output_shape'][layer.name][1:])
# TODO: depends on layer type
if len(shape_caffe) == 3:
shape_caffe_mod = [shape_caffe[1], shape_caffe[2], shape_caffe[0]]
else:
shape_caffe_mod = list(shape_caffe)
shape_keras = list(layer.output_shape[1:])
mismatch = 'mismatch' if (shape_caffe_mod != shape_keras) else ''
print('%-30s %-20s %-20s %s' % (layer.name, shape_caffe, shape_keras, mismatch))
#print('%-30s \n%-20s \n%-20s' % (layer.name, shape_caffe, shape_keras))
def compare_weights_shape(model, shape_file):
"""Compares the parameter shape of the layers in caffe and keras model
# Arguments
model: keras model
shape_file: path to pickle file dumped by 'dump_weights'
"""
with open(shape_file, 'rb') as f:
shape = pickle.load(f)
#print('%-30s %-20s %-20s' % ('', 'caffe shape', 'keras shape'))
for layer in model.layers:
if layer.name in shape['weights_shape']:
shape_caffe = shape['weights_shape'][layer.name]
# TODO: depends on layer type
shape_caffe_mod = [ [s[2],s[3],s[1],s[0]] if len(s) == 4 else s for s in shape_caffe]
shape_keras = [w.shape.as_list() for w in layer.weights]
mismatch = 'mismatch' if not all([shape_caffe_mod[i] == shape_keras[i] for i in range(len(shape_keras))]) else ''
print('%-30s %-40s %-40s %s' % (layer.name, shape_caffe, shape_keras, mismatch))
#print('%-30s \n%-40s \n%-40s' % (layer.name, shape_caffe, shape_keras))
if __name__ == '__main__':
model_proto = './resnet152/ResNet-152-deploy.prototxt'
model_weights = './resnet152/ResNet-152-model.caffemodel'
weights_output = 'resnet152_weights.hdf5'
shape_output = 'resnet152_shape.pkl'
dump_weights(model_proto, model_weights, weights_output, shape_output=shape_output)
|
nilq/baby-python
|
python
|
import logging
from typing import List
from homeassistant.helpers.entity import Entity
from gehomesdk.erd import ErdCode, ErdApplianceType
from .base import ApplianceApi
from ..entities import GeErdSensor, GeErdBinarySensor
_LOGGER = logging.getLogger(__name__)
class DryerApi(ApplianceApi):
"""API class for dryer objects"""
APPLIANCE_TYPE = ErdApplianceType.DRYER
def get_all_entities(self) -> List[Entity]:
base_entities = super().get_all_entities()
dryer_entities = [
GeErdSensor(self, ErdCode.LAUNDRY_MACHINE_STATE),
GeErdSensor(self, ErdCode.LAUNDRY_MACHINE_SUBCYCLE),
GeErdBinarySensor(self, ErdCode.LAUNDRY_END_OF_CYCLE),
GeErdSensor(self, ErdCode.LAUNDRY_TIME_REMAINING),
GeErdSensor(self, ErdCode.LAUNDRY_CYCLE),
GeErdSensor(self, ErdCode.LAUNDRY_DELAY_TIME_REMAINING),
GeErdSensor(self, ErdCode.LAUNDRY_DOOR),
GeErdSensor(self, ErdCode.LAUNDRY_DRYNESSNEW_LEVEL),
GeErdSensor(self, ErdCode.LAUNDRY_TEMPERATURENEW_OPTION),
GeErdBinarySensor(self, ErdCode.LAUNDRY_REMOTE_STATUS)
]
entities = base_entities + dryer_entities
return entities
|
nilq/baby-python
|
python
|
import re
m = re.search(r'([a-zA-Z0-9])\1+', input().strip())
print(m.group(1) if m else -1)
|
nilq/baby-python
|
python
|
import logging
import sys, os
import datetime
import eons, esam
import pandas as pd
#Class name is what is used at cli, so we defy convention here in favor of ease-of-use.
class in_excel(esam.DataFunctor):
def __init__(self, name=eons.INVALID_NAME()):
super().__init__(name)
self.requiredKWArgs.append("file")
#self.data will be returned, so we shouldn't be asking for it.
self.requiredKWArgs.remove("data")
#Override of UserFunctor method.
def PreCall(self, **kwargs):
self.Clear()
def UserFunction(self, **kwargs):
xlsxFileName = kwargs.get("file")
xlsx = pd.ExcelFile(xlsxFileName)
for sheet in xlsx.sheet_names:
dataFrame = pd.read_excel(xlsx, sheet)
for i, r in enumerate(dataFrame.to_dict('records')):
recordDatum = eons.SelfRegistering("Pandatum")
recordDatum.FromDict(r)
recordDatum.uniqueId = f"{xlsxFileName}/{sheet}/{i}"
self.data.AddDatum(recordDatum)
return self.data
|
nilq/baby-python
|
python
|
from django.test import Client, TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from posts.forms import PostForm
from posts.models import Post
User = get_user_model()
class TaskCreateFormTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Создаем форму, если нужна проверка атрибутов
cls.form = PostForm()
cls.user = User.objects.create_user(username='auth')
cls.authorized_client = Client()
cls.authorized_client.force_login(cls.user)
def test_create_post(self):
"""Валидная форма создает запись в Post."""
post_count = Post.objects.count()
form_data = {
'text': 'Тестовый заголовок',
'pk': 1
}
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_data,
follow=True
)
self.assertRedirects(response, reverse(
'posts:profile', kwargs={'username': 'auth'}))
self.assertEqual(Post.objects.count(), post_count + 1)
self.assertTrue(
Post.objects.filter(
text='Тестовый заголовок',
pk=1
).exists()
)
def test_edit_post(self):
form_data = {
'text': 'Тестовый заголовок',
'pk': 1
}
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_data,
follow=True
)
form_data = {
'text': 'Тестовый отредактированный',
'pk': 1
}
response = self.authorized_client.post(
reverse('posts:post_edit', kwargs={'post_id': 1}),
data=form_data,
follow=True
)
self.assertRedirects(response, reverse(
'posts:post_detail', kwargs={'post_id': 1}))
self.assertTrue(
Post.objects.filter(
text='Тестовый отредактированный',
pk=1
).exists()
)
|
nilq/baby-python
|
python
|
from abc import ABC, abstractmethod
import asyncio
from typing import Callable
class AbstractConnectSignal(ABC):
def __init__(self) -> None:
self.targets = set()
def connect(self, target: Callable):
if target not in self.targets:
self.targets.add(target)
@abstractmethod
async def emit(self, *args, **kwargs):
# IDEA maybe as asyncio.task
await self._emit_to_targets(*args, **kwargs)
async def _emit_to_targets(self, *args, **kwargs):
for target in self.targets:
if asyncio.iscoroutinefunction(target):
asyncio.create_task(target(*args, **kwargs))
else:
target(*args, **kwargs)
|
nilq/baby-python
|
python
|
#pip install pdfplumber
import pdfplumber
pdf = pdfplumber.open('./Relação')
paginas = len(pdf.pages) #quantidade de paginas
text = ""
for i in range(paginas):
page = pdf.pages[i]
text += page.extract_text()
print(text)
|
nilq/baby-python
|
python
|
import logging
import json
logger = logging.getLogger(__name__)
def __virtual__():
'''
Only load if jenkins_common module exist.
'''
if 'jenkins_common.call_groovy_script' not in __salt__:
return (
False,
'The jenkins_smtp state module cannot be loaded: '
'jenkins_common not found')
return True
def config(name, host, username, password, reply_to=None,
port=25, ssl=False, charset="UTF-8"):
"""
Jenkins SMTP server config state method
:param name: configuration name
:param host: SMTP host
:param username: SMTP username
:param password: SMTP password
:param reply_to: sent emails ReplyTo header (optional)
:param port: SMTP port (optional, default 25)
:param ssl: use SSL for SMTP (optional, default False)
:param charset: SMTP charset (optional, default UTF-8)
:returns: salt-specified state dict
"""
template = __salt__['jenkins_common.load_template'](
'salt://jenkins/files/groovy/smtp.template',
__env__)
return __salt__['jenkins_common.api_call'](name, template,
['CHANGED', 'EXISTS'],
{'params': json.dumps({
'username': username,
'password': password,
'host': host,
'useReplyTo': True if reply_to else False,
'replyTo': reply_to,
'port': port if port else 25,
'ssl': True if ssl else False,
'charset': charset if charset else 'UTF-8'
})
},
'SMTP config')
def admin_email(name, email):
"""
Jenkins Admin user email config state method
:param name: jenkins admin email
:returns: salt-specified state dict
"""
template = __salt__['jenkins_common.load_template'](
'salt://jenkins/files/groovy/admin_email.template',
__env__)
return __salt__['jenkins_common.api_call'](name, template,
['CHANGED', 'EXISTS'],
{'email': email},
'Admin email config')
|
nilq/baby-python
|
python
|
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
CustomUser = get_user_model()
# TODO: are we using this form now that we have django-allauth?
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = CustomUser
fields = (
"email",
"username",
)
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = (
"email",
"username",
)
|
nilq/baby-python
|
python
|
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
from fairseq import utils, search
from glob import glob
import os
from morphodropout.binarize import SRC_SIDE, TGT_SIDE
from morphodropout.dataset import build_combined_dataset
from morphodropout.seq_gen import SequenceGenerator
@register_task('morpho_translation')
class MorphoTranslation(TranslationTask):
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
self.morpho_dropout_final = args.morpho_dropout
self.morpho_dropout_initial = args.morpho_dropout_initial
self.morpho_dropout_end_epoch = args.morpho_dropout_end_epoch
@staticmethod
def add_args(parser):
TranslationTask.add_args(parser)
parser.add_argument('--morpho-dropout', type=float, default=0.5)
parser.add_argument('--morpho-dropout-initial', type=float, default=None)
parser.add_argument('--morpho-dropout-end-epoch', type=int, default=None)
def morpho_dropout_for(self, epoch: int) -> float:
if self.morpho_dropout_initial is None:
return self.morpho_dropout_final
assert self.morpho_dropout_end_epoch is not None
initial = self.morpho_dropout_initial
final = self.morpho_dropout_final
period = float(self.morpho_dropout_end_epoch)
morpho_dropout = initial + (min(epoch, period) * (final - initial) / period)
return morpho_dropout
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
src_data_files = glob(split_path + "_{}_".format(SRC_SIDE) + "*")
tgt_data_files = glob(split_path + "_{}_".format(TGT_SIDE) + "*")
data_files = src_data_files + tgt_data_files
if not data_files:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
self.datasets[split] = build_combined_dataset(
self.src_dict,
src_data_files,
self.morpho_dropout_for(epoch) if split == 'train' else 0.0,
self.tgt_dict,
tgt_data_files,
self.args.seed,
epoch,
)
def build_generator(self, models, args):
# copied from fairseq_task.py to choose our implementation
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
return SequenceGenerator(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
)
|
nilq/baby-python
|
python
|
name = "pip_test_package"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os, os.path, sys
import socket
if __name__ == "__main__":
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',))
print "PROJECT_ROOT=", PROJECT_ROOT
sys.path.append(PROJECT_ROOT)
# Add virtualenv dirs to python path
host = socket.gethostname()
print "HOSTNAME=%s" % host
if host=='irrigatorpro':
if "test" in PROJECT_ROOT:
VIRTUAL_ENV_ROOT = '/www/VirtualEnvs/test/'
else:
VIRTUAL_ENV_ROOT = '/www/VirtualEnvs/irrigator_pro/'
else:
VIRTUAL_ENV_ROOT = os.path.join( PROJECT_ROOT, 'VirtualEnvs', 'irrigator_pro')
print "VIRTUAL_ENV_ROOT='%s'" % VIRTUAL_ENV_ROOT
activate_this = os.path.join(VIRTUAL_ENV_ROOT, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
# Get settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "irrigator_pro.settings")
import django
django.setup()
from farms.unified_field_data import generate_objects
from farms.models import *
from datetime import date, datetime
from django.contrib.auth.models import User
# Get the cumulative report in a given date range.
user = User.objects.get(email='aalebl@gmail.com')
print "user: ", user
# Get a crop season
crop_season = CropSeason.objects.get(name='Corn 2015', description='mine') # need one with probes.
field = Field.objects.get(name='North')
print 'crop season: ', crop_season
print 'field: ', field
unified_records = generate_objects(crop_season, field, user, date.today())
for r in unified_records:
print r.date
print r.water_register
print r.uga_records
print r.manual_records
|
nilq/baby-python
|
python
|
import glob
import os
import re
import requests
from Bio.SeqIO import SeqRecord
from Bio import SeqIO
from .utils import is_fasta
class PrimerDesigner:
"""Class for designing primers from FASTA files.
It will send a FASTA alignment to `primers4clades`_ in order to design
degenerate primers. Input data needed is an alignment in FASTA format
containing at least 4 sequences.
It is recommended that the beginning of each FASTA sequence description
contains the taxon name between square brackets.
Parameters:
folder (str): path of folder containing the FASTA file alignments
taxon_for_codon_usage (str): optional taxon name that will be inserted in the
description of FASTA sequences between square
brackets so that can be used by primer4clades
to infer the codon table to use
tm (str): temperature
min_amplength (str): minimum amplicon length
max_amplength (str): maximum amplicon length
gencode (str): genetic code. See below for all available genetic
codes
clustype (str): cluster distance metric: ``dna``, ``protein``.
amptype (str): substitution model used to estimate phylogenetic
information
email (str): your email address so that primer4clades can send
you email with detailed results
Example:
>>> # The values shown are the default. Change them if needed.
>>> from primer_designer import PrimerDesigner
>>> pd = PrimerDesigner()
>>> pd.folder = "alignments" # folder containing the FASTA file alignments
>>> pd.tm = "55" # annealing temperature
>>> pd.min_amplength = "250" # minimum amplicon length
>>> pd.max_amplength = "500" # maximum amplicon length
>>> pd.gencode = "universal" # see below for all available genetic codes
>>> pd.mode = "primers"
>>> pd.clustype = "dna"
>>> pd.amptype = "dna_GTRG" # substitution model used to estimate phylogenetic information
>>> pd.email = "youremail@email.com" # primer4clades will send you an email with very detailed results
>>> pd.design_primers()
>>>
>>> # You can input a taxon name to include in the description of every
>>> # FASTA sequence so that primer4clades can infer the correct codon
>>> # table to apply to the analysis.
>>> pd.taxon_for_codon_usage = "Bombyx mori"
>>> pd.design_primers()
The best primer pairs will be printed to your screen. Detailed results will
be saved as HTML files in your alignments folder. But it is recommended if
you also get the results by email. primers4clades_ will send you one email
for each alignment.
The genetic code table (variable ``gencode``) can be any of the following:
* ``universal`` for standard
* ``2`` for vertebrate mitochondrial
* ``3`` for yeast mitochondrial
* ``4`` for mold and protozoa mitochondrial
* ``5`` for invertebrate mitochondrial
* ``6`` for ciliate
* ``9`` for echinoderm and flatworm
* ``10`` for euplotid nuclear
* ``11`` for bacterial and plastid
* ``12`` for alternative yeast nuclear
* ``13`` for ascidian mitochondrial
* ``14`` for flatworm mitochondrial
* ``15`` for Blepharisma nuclear
* ``16`` for Chlorophycean mitochondrial
* ``21`` for Trematode mitochondrial
* ``22`` for Scenedesmus obliquus mitochondrial
* ``23`` for Thraustochytrium mitochondrial
The evolutionary substitution model can be any of the following (variable
``amptype``):
* ``protein_WAGG`` for protein WAG+G
* ``protein_JTTG`` for protein JTT+G
* ``protein_Blosum62G`` for protein Blosum62+G
* ``protein_VTG`` for protein VT+G
* ``protein_DayhoffG`` for protein Dayhoff+G
* ``protein_MtREVG`` for protein MtREV+G
* ``dna_HKYG`` for dna HKY+G
* ``dna_GTRG`` for dna GTR+G
* ``dna_K80G`` for dna K80+G
* ``dna_TrNG`` for dna TrN+G
* ``dna_JC69G`` for dna JC69+G
.. _primers4clades: http://floresta.eead.csic.es/primers4clades/#0
"""
def __init__(self, folder=None, taxon_for_codon_usage=None, tm="55",
min_amplength="100", max_amplength="500", gencode="universal",
mode="primers", clustype="dna", amptype="dna_GTR", email=None):
self.folder = folder
self.taxon_for_codon_usage = taxon_for_codon_usage
self.tm = tm
self.min_amplength = min_amplength
self.max_amplength = max_amplength
self.gencode = gencode
self.mode = mode
self.clustype = clustype
self.amptype = amptype
self.email = email
self.report = ""
def design_primers(self):
alns = self.get_alignments()
if alns:
self.call_primer4clades_for_primers(alns)
# Write primers to alignment file
with open("primers_report.txt", "a") as handle:
handle.write(self.report)
print("\nDone.\nAll primers have been saved in the file \"primers_report.txt\"")
return self.report
else:
msg = "\nError! the folder {0} is empty.\n".format(self.folder)
raise AttributeError(msg)
def call_primer4clades_for_primers(self, alns):
for aln in alns:
if is_fasta(aln):
if self.taxon_for_codon_usage:
aln = self.insert_taxon_in_new_fasta_file(aln)
print("\nProcessing file \"{0}\"".format(aln))
r = self.request_primers(aln)
self.process_response(aln, r.text)
def get_alignments(self):
if os.path.exists(self.folder):
all_files = os.path.join(self.folder, "*")
alns = glob.glob(all_files)
else:
msg = "\nError! the folder {0} does not exist.\n".format(self.folder)
raise AttributeError(msg)
return alns
def insert_taxon_in_new_fasta_file(self, aln):
"""primer4clades infers the codon usage table from the taxon names in the
sequences.
These names need to be enclosed by square brackets and be
present in the description of the FASTA sequence. The position is not
important. I will insert the names in the description in a new FASTA
file.
Returns:
Filename of modified FASTA file that includes the name of the taxon.
"""
new_seq_records = []
for seq_record in SeqIO.parse(aln, 'fasta'):
new_seq_record_id = "[{0}] {1}".format(self.taxon_for_codon_usage, seq_record.id)
new_seq_record = SeqRecord(seq_record.seq, id=new_seq_record_id)
new_seq_records.append(new_seq_record)
base_filename = os.path.splitext(aln)
new_filename = '{0}_modified{1}'.format(base_filename[0], base_filename[1])
SeqIO.write(new_seq_records, new_filename, "fasta")
return new_filename
def process_response(self, aln, response_body):
this_file = os.path.split(aln)[1]
this_file = re.sub(".fas.*", "", this_file)
msg = 'Writing detailed results as file "{0}.html"'.format(aln)
print(msg)
with open("{0}.html".format(aln), "w") as handle:
handle.write(response_body)
self.make_report_from_html_file(response_body, this_file)
def make_report_from_html_file(self, response_body, this_file):
"""Processes the results from primer4clades (a html file).
Makes a report based on the best possible primer pair (with highest
quality and longest amplicon).
"""
amplicon_tuples = self.get_amplicon_data_as_tuples(response_body)
best_amplicon = self.choose_best_amplicon(amplicon_tuples)
if best_amplicon is not None:
self.report += """\n\n\
####################################################
# Alignment {0}
""".format(this_file)
self.report += self.format_amplicon(best_amplicon)
def get_amplicon_data_as_tuples(self, response_body):
amplicons = re.findall("(## Amplicon.+) codon", response_body)
primers_codehop = self.group_primers(re.findall("(\w+ codeh)_corr.+\n", response_body))
primers_relaxed = self.group_primers(re.findall("(\w+ relax)_corr.+\n", response_body))
primers_degen = self.group_primers(re.findall("(\w+ degen)_corr.+\n", response_body))
primer_pair_qualities = re.findall("# primer pair.+= ([0-9]+)%\n", response_body)
expected_pcr_product_lengths = re.findall("# expected PCR .+= ([0-9]+)\n", response_body)
forward_temperatures = re.findall("(# fwd: minTm.+)\n", response_body)
reverse_temperatures = re.findall("(# rev: minTm.+)\n", response_body)
amplicon_tuples = zip(amplicons, primers_codehop, primers_relaxed,
primers_degen,
primer_pair_qualities,
expected_pcr_product_lengths,
forward_temperatures, reverse_temperatures)
return amplicon_tuples
def format_amplicon(self, best_amplicon):
best_amplicon_formatted = ""
for idx, value in enumerate(best_amplicon):
if idx == 0:
best_amplicon_formatted += "{0}".format(value).replace("##", "# Best")
elif idx in [2, 3]:
best_amplicon_formatted += "\n\n{0}".format(value)
elif idx == 4:
best_amplicon_formatted += "\n\n# primer pair quality = {0}%".format(
value)
elif idx == 5:
best_amplicon_formatted += "\n# expected PCR product length (nt) = {0}".format(
value)
else:
best_amplicon_formatted += "\n{0}".format(value)
return best_amplicon_formatted
def group_primers(self, my_list):
"""Group elements in list by certain number 'n'"""
new_list = []
n = 2
for i in range(0, len(my_list), n):
grouped_primers = my_list[i:i + n]
forward_primer = grouped_primers[0].split(" ")
reverse_primer = grouped_primers[1].split(" ")
formatted_primers = ">F_{0}\n{1}".format(forward_primer[1], forward_primer[0])
formatted_primers += "\n>R_{0}\n{1}".format(reverse_primer[1], reverse_primer[0])
new_list.append(formatted_primers)
return new_list
def choose_best_amplicon(self, amplicon_tuples):
"""Iterates over amplicon tuples and returns the one with highest quality
and amplicon length.
"""
quality = 0
amplicon_length = 0
best_amplicon = None
for amplicon in amplicon_tuples:
if int(amplicon[4]) >= quality and int(amplicon[5]) >= amplicon_length:
quality = int(amplicon[4])
amplicon_length = int(amplicon[5])
best_amplicon = amplicon
return best_amplicon
def request_primers(self, aln):
url = "http://floresta.eead.csic.es/primers4clades/primers4clades.cgi"
params = {
'tm': self.tm,
'min_amplength': self.min_amplength,
'max_amplength': self.max_amplength,
'mode': self.mode,
'gencode': self.gencode,
'clustype': self.clustype,
'email': self.email,
}
files = {'sequencefile': open(aln, 'rb')}
r = requests.post(url, files=files, data=params)
return r
|
nilq/baby-python
|
python
|
#!/bin/env python3
import random
import sys
import os
import time
from collections import defaultdict
from typing import Dict, Tuple, Union, Set
import requests
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import expand_utilities as eu
from expand_utilities import QGOrganizedKnowledgeGraph
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../") # ARAXQuery directory
from ARAX_response import ARAXResponse
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../") # ARAX directory
from biolink_helper import BiolinkHelper
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../../") # code directory
from RTXConfiguration import RTXConfiguration
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../UI/OpenAPI/python-flask-server/")
from openapi_server.models.node import Node
from openapi_server.models.edge import Edge
from openapi_server.models.attribute import Attribute
from openapi_server.models.query_graph import QueryGraph
class KG2Querier:
def __init__(self, response_object: ARAXResponse):
self.response = response_object
self.biolink_helper = BiolinkHelper()
self.kg2_infores_curie = "infores:rtx-kg2"
self.max_allowed_edges = 1000000
self.max_edges_per_input_curie = 1000
self.curie_batch_size = 100
def answer_one_hop_query(self, query_graph: QueryGraph) -> QGOrganizedKnowledgeGraph:
"""
This function answers a one-hop (single-edge) query using KG2c, via PloverDB.
:param query_graph: A TRAPI query graph.
:return: An (almost) TRAPI knowledge graph containing all of the nodes and edges returned as
results for the query. (Organized by QG IDs.)
"""
log = self.response
final_kg = QGOrganizedKnowledgeGraph()
# Verify this is a valid one-hop query graph
if len(query_graph.edges) != 1:
log.error(f"answer_one_hop_query() was passed a query graph that is not one-hop: "
f"{query_graph.to_dict()}", error_code="InvalidQuery")
return final_kg
if len(query_graph.nodes) != 2:
log.error(f"answer_one_hop_query() was passed a query graph with more than two nodes: "
f"{query_graph.to_dict()}", error_code="InvalidQuery")
return final_kg
# Get canonical versions of the input curies
qnode_keys_with_curies = [qnode_key for qnode_key, qnode in query_graph.nodes.items() if qnode.ids]
for qnode_key in qnode_keys_with_curies:
qnode = query_graph.nodes[qnode_key]
canonical_curies = eu.get_canonical_curies_list(qnode.ids, log)
log.debug(f"Using {len(canonical_curies)} curies as canonical curies for qnode {qnode_key}")
qnode.ids = canonical_curies
qnode.categories = None # Important to clear this, otherwise results are limited (#889)
# Send the query to plover in batches of input curies
qedge_key = next(qedge_key for qedge_key in query_graph.edges)
input_qnode_key = self._get_input_qnode_key(query_graph)
input_curies = query_graph.nodes[input_qnode_key].ids
input_curie_set = set(input_curies)
curie_batches = [input_curies[i:i+self.curie_batch_size] for i in range(0, len(input_curies), self.curie_batch_size)]
log.debug(f"Split {len(input_curies)} input curies into {len(curie_batches)} batches to send to Plover")
log.info(f"Max edges allowed per input curie for this query is: {self.max_edges_per_input_curie}")
batch_num = 1
for curie_batch in curie_batches:
log.debug(f"Sending batch {batch_num} to Plover (has {len(curie_batch)} input curies)")
query_graph.nodes[input_qnode_key].ids = curie_batch
plover_answer, response_status = self._answer_query_using_plover(query_graph, log)
if response_status == 200:
batch_kg = self._load_plover_answer_into_object_model(plover_answer, log)
final_kg = eu.merge_two_kgs(batch_kg, final_kg)
# Prune down highly-connected input curies if we're over the max number of allowed edges
if final_kg.edges_by_qg_id.get(qedge_key):
if len(final_kg.edges_by_qg_id[qedge_key]) > self.max_allowed_edges:
log.debug(f"Have exceeded max num allowed edges ({self.max_allowed_edges}); will attempt to "
f"reduce the number of edges by pruning down highly connected nodes")
final_kg = self._prune_highly_connected_nodes(final_kg, qedge_key, input_curie_set,
input_qnode_key, self.max_edges_per_input_curie,
log)
# Error out if this pruning wasn't sufficient to bring down the edge count
if len(final_kg.edges_by_qg_id[qedge_key]) > self.max_allowed_edges:
log.error(f"Query for qedge {qedge_key} produced more than {self.max_allowed_edges} edges, "
f"which is too much for the system to handle. You must somehow make your query "
f"smaller (specify fewer input curies or use more specific predicates/categories).",
error_code="QueryTooLarge")
return final_kg
else:
log.error(f"Plover returned response of {response_status}. Answer was: {plover_answer}", error_code="RequestFailed")
return final_kg
batch_num += 1
return final_kg
def answer_single_node_query(self, single_node_qg: QueryGraph) -> QGOrganizedKnowledgeGraph:
log = self.response
qnode_key = next(qnode_key for qnode_key in single_node_qg.nodes)
qnode = single_node_qg.nodes[qnode_key]
final_kg = QGOrganizedKnowledgeGraph()
# Convert qnode curies as needed (either to synonyms or to canonical versions)
if qnode.ids:
qnode.ids = eu.get_canonical_curies_list(qnode.ids, log)
qnode.categories = None # Important to clear this to avoid discrepancies in types for particular concepts
# Send request to plover
plover_answer, response_status = self._answer_query_using_plover(single_node_qg, log)
if response_status == 200:
final_kg = self._load_plover_answer_into_object_model(plover_answer, log)
else:
log.error(f"Plover returned response of {response_status}. Answer was: {plover_answer}", error_code="RequestFailed")
return final_kg
@staticmethod
def _prune_highly_connected_nodes(kg: QGOrganizedKnowledgeGraph, qedge_key: str, input_curies: Set[str],
input_qnode_key: str, max_edges_per_input_curie: int, log: ARAXResponse) -> QGOrganizedKnowledgeGraph:
# First create a lookup of which edges belong to which input curies
input_nodes_to_edges_dict = defaultdict(set)
for edge_key, edge in kg.edges_by_qg_id[qedge_key].items():
if edge.subject in input_curies:
input_nodes_to_edges_dict[edge.subject].add(edge_key)
if edge.object in input_curies:
input_nodes_to_edges_dict[edge.object].add(edge_key)
# Then prune down highly-connected nodes (delete edges per input curie in excess of some set limit)
for node_key, connected_edge_keys in input_nodes_to_edges_dict.items():
connected_edge_keys_list = list(connected_edge_keys)
if len(connected_edge_keys_list) > max_edges_per_input_curie:
random.shuffle(connected_edge_keys_list) # Make it random which edges we keep for this input curie
edge_keys_to_remove = connected_edge_keys_list[max_edges_per_input_curie:]
log.debug(f"Randomly removing {len(edge_keys_to_remove)} edges from answer for input curie {node_key}")
for edge_key in edge_keys_to_remove:
kg.edges_by_qg_id[qedge_key].pop(edge_key, None)
# Document that not all answers for this input curie are included
node = kg.nodes_by_qg_id[input_qnode_key].get(node_key)
if node:
if not node.attributes:
node.attributes = []
if not any(attribute.attribute_type_id == "biolink:incomplete_result_set"
for attribute in node.attributes):
node.attributes.append(Attribute(attribute_type_id="biolink:incomplete_result_set", # TODO: request this as actual biolink item?
value_type_id="metatype:Boolean",
value=True,
attribute_source="infores:rtx-kg2",
description=f"This attribute indicates that not all "
f"nodes/edges returned as answers for this input "
f"curie were included in the final answer due to "
f"size limitations. {max_edges_per_input_curie} "
f"edges for this input curie were kept."))
# Then delete any nodes orphaned by removal of edges
node_keys_used_by_edges = kg.get_all_node_keys_used_by_edges()
for qnode_key, nodes in kg.nodes_by_qg_id.items():
orphan_node_keys = set(nodes).difference(node_keys_used_by_edges)
if orphan_node_keys:
log.debug(f"Removing {len(orphan_node_keys)} {qnode_key} nodes orphaned by the above step")
for orphan_node_key in orphan_node_keys:
del kg.nodes_by_qg_id[qnode_key][orphan_node_key]
return kg
@staticmethod
def _answer_query_using_plover(qg: QueryGraph, log: ARAXResponse) -> Tuple[Dict[str, Dict[str, Union[set, dict]]], int]:
rtxc = RTXConfiguration()
rtxc.live = "Production"
# First prep the query graph (requires some minor additions for Plover)
dict_qg = qg.to_dict()
dict_qg["include_metadata"] = True # Ask plover to return node/edge objects (not just IDs)
dict_qg["respect_predicate_symmetry"] = True # Ignore direction for symmetric predicate, enforce for asymmetric
# Allow subclass_of reasoning for qnodes with a small number of curies
for qnode in dict_qg["nodes"].values():
if qnode.get("ids") and len(qnode["ids"]) < 5:
if "allow_subclasses" not in qnode or qnode["allow_subclasses"] is None:
qnode["allow_subclasses"] = True
# Then send the actual query
response = requests.post(f"{rtxc.plover_url}/query", json=dict_qg, timeout=60,
headers={'accept': 'application/json'})
if response.status_code == 200:
log.debug(f"Got response back from Plover")
return response.json(), response.status_code
else:
log.warning(f"Plover returned a status code of {response.status_code}. Response was: {response.text}")
return dict(), response.status_code
def _load_plover_answer_into_object_model(self, plover_answer: Dict[str, Dict[str, Union[set, dict]]],
log: ARAXResponse) -> QGOrganizedKnowledgeGraph:
answer_kg = QGOrganizedKnowledgeGraph()
# Load returned nodes into TRAPI object model
for qnode_key, nodes in plover_answer["nodes"].items():
num_nodes = len(nodes)
log.debug(f"Loading {num_nodes} {qnode_key} nodes into TRAPI object model")
start = time.time()
for node_key, node_tuple in nodes.items():
node = self._convert_kg2c_plover_node_to_trapi_node(node_tuple)
answer_kg.add_node(node_key, node, qnode_key)
log.debug(f"Loading {num_nodes} {qnode_key} nodes into TRAPI object model took "
f"{round(time.time() - start, 2)} seconds")
# Load returned edges into TRAPI object model
for qedge_key, edges in plover_answer["edges"].items():
num_edges = len(edges)
log.debug(f"Loading {num_edges} edges into TRAPI object model")
start = time.time()
for edge_key, edge_tuple in edges.items():
edge = self._convert_kg2c_plover_edge_to_trapi_edge(edge_tuple)
answer_kg.add_edge(edge_key, edge, qedge_key)
log.debug(f"Loading {num_edges} {qedge_key} edges into TRAPI object model took "
f"{round(time.time() - start, 2)} seconds")
return answer_kg
@staticmethod
def _convert_kg2c_plover_node_to_trapi_node(node_tuple: list) -> Node:
node = Node(name=node_tuple[0], categories=eu.convert_to_list(node_tuple[1]))
return node
def _convert_kg2c_plover_edge_to_trapi_edge(self, edge_tuple: list) -> Edge:
edge = Edge(subject=edge_tuple[0], object=edge_tuple[1], predicate=edge_tuple[2], attributes=[])
knowledge_sources = edge_tuple[3]
# Indicate that this edge came from the KG2 KP
edge.attributes.append(Attribute(attribute_type_id="biolink:aggregator_knowledge_source",
value=self.kg2_infores_curie,
value_type_id="biolink:InformationResource",
attribute_source=self.kg2_infores_curie))
# Create knowledge source attributes for each of this edge's knowledge sources
knowledge_source_attributes = [Attribute(attribute_type_id="biolink:knowledge_source",
value=infores_curie,
value_type_id="biolink:InformationResource",
attribute_source=self.kg2_infores_curie)
for infores_curie in knowledge_sources]
edge.attributes += knowledge_source_attributes
return edge
@staticmethod
def _get_input_qnode_key(one_hop_qg: QueryGraph) -> str:
qedge = next(qedge for qedge in one_hop_qg.edges.values())
qnode_a_key = qedge.subject
qnode_b_key = qedge.object
qnode_a = one_hop_qg.nodes[qnode_a_key]
qnode_b = one_hop_qg.nodes[qnode_b_key]
if qnode_a.ids and qnode_b.ids:
# Considering the qnode with fewer curies the 'input' is more efficient for querying Plover
return qnode_a_key if len(qnode_a.ids) < len(qnode_b.ids) else qnode_b_key
elif qnode_a.ids:
return qnode_a_key
else:
return qnode_b_key
|
nilq/baby-python
|
python
|
import pprint
import cyok
bit_file = 'foobar.bit'
# load DLL
cyok.load_library()
# check version
print('FrontPanel DLL built on: %s, %s' % cyok.get_version())
# connect to device
dev = cyok.PyFrontPanel()
print('Opening device connection.')
dev.open_by_serial()
print('Getting device information.')
dev_info = dev.get_device_info()
pprint.pprint(dev_info)
print('Program FPGA with bit file.')
dev.configure_fpga(bit_file)
if not dev.is_front_panel_enabled():
raise ValueError('FrontPanel is not enabled on the device.')
print('Closing device.')
dev.close()
# free DLL
cyok.free_library()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.