content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from django.conf.urls import url
from ..views.admin import SubmissionRejudgeAPI, ClassSubmissionListAPI
urlpatterns = [
url(r"^submission/rejudge?$", SubmissionRejudgeAPI.as_view(), name="submission_rejudge_api"),
url(r"^class_submission/?$", ClassSubmissionListAPI.as_view(), name="class_submission_api"),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Core settings and configuration."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
from os import getcwd
from os.path import join
DEBUG = False
TESTING = False
# General
MUD_NAME = "Clockwork"
MUD_NAME_FULL = "Clockwork MUD Server"
# Networking
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 4000
IDLE_TIME = 180 # seconds
IDLE_TIME_MAX = 600 # seconds
# Logging
LOG_PATH = join(getcwd(), "logs", "mud.log")
LOG_TIME_FORMAT_CONSOLE = "%H:%M:%S,%F"
LOG_TIME_FORMAT_FILE = "%Y-%m-%d %a %H:%M:%S,%F"
LOG_ROTATE_WHEN = "midnight"
LOG_ROTATE_INTERVAL = 1
LOG_UTC_TIMES = False
# Storage
DATA_DIR = join(getcwd(), "data")
# Optional modules
CONTRIB_MODULES = [
# These should be import paths relative to the `contrib` package.
# ".my_contrib_module",
]
GAME_MODULES = [
# These should be import paths relative to the `game` package.
# ".my_game_module",
]
# Advanced
FORCE_GC_COLLECT = False
|
nilq/baby-python
|
python
|
from FSMConfig import FSMConfig
class GraphicsMouseManager:
def __init__(self):
self.leftDown = False
self.middleDown = False
self.rightDown = False
self.gcLocal = FSMConfig()
self.prevDragX = None
self.prevDragY = None
self.draggedObject = None
def downHandler(self, event):
if(event.num == 1):
self.leftDown = True
self.prevDragX = event.x
self.prevDragY = event.y
for state in self.gcLocal.allStates.values():
if(state.graphic.checkBodyIntersect(event.x, event.y)):
self.draggedObject = state.graphic
break
elif(event.num == 2):
self.middleDown = True
elif(event.num == 3):
self.rightDown = True
#print("Click! {},{} on {}".format(event.x, event.y, event.num))
def upHandler(self, event):
if(event.num == 1):
self.leftDown = False
self.prevDragX = None
self.prevDragY = None
self.draggedObject = None
elif(event.num == 2):
self.middleDown = False
elif(event.num == 3):
self.rightDown = False
#print("Release! {},{} on {}".format(event.x, event.y, event.num))
def motionHandler(self, event):
#print("Move! {},{}".format(event.x, event.y))
for state in self.gcLocal.allStates.values():
state.graphic.unhighlightBody()
state.graphic.unhighlightEdge()
if(self.leftDown):
if(self.draggedObject is not None):
deltaX = event.x - self.prevDragX
deltaY = event.y - self.prevDragY
self.draggedObject.moveBy(deltaX, deltaY)
self.draggedObject.highlightBody()
self.prevDragX = event.x
self.prevDragY = event.y
else:
for state in self.gcLocal.allStates.values():
if(state.graphic.checkBodyIntersect(event.x, event.y)):
state.graphic.highlightBody()
break
for state in self.gcLocal.allStates.values():
if(state.graphic.checkEdgeIntersect(event.x, event.y)):
state.graphic.highlightEdge()
break
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3.8
# Copyright 2021 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--binary',
required=True,
help=(
'The path to the binary in the base directory to list files for. This file'
'is not included in the output'))
parser.add_argument(
'--dest_root', required=True, help="destination path root.")
parser.add_argument(
'--output', required=True, help='The path to the output file.')
parser.add_argument(
'--meta_out', required=True, help='path to metadata for tool.')
parser.add_argument(
'--name', required=True, help='name of host tool in metadata.')
args = parser.parse_args()
directory = os.path.dirname(args.binary)
binary_path = os.path.join(
args.dest_root, os.path.relpath(args.binary, directory))
# the main binary should be first in the list.
dest_files = [binary_path]
with open(args.output, 'w') as f:
print(f'{binary_path}={args.binary}', file=f)
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in files:
source_filepath = os.path.join(path, filename)
filepath = os.path.join(
args.dest_root, os.path.relpath(source_filepath, directory))
if binary_path != filepath:
dest_files += [filepath]
print(f'{filepath}={source_filepath}', file=f)
metadata = {
'files': dest_files,
'name': args.name,
'root': 'tools',
'type': 'companion_host_tool'
}
with open(args.meta_out, 'w') as f:
print(json.dumps(metadata, sort_keys=True, indent=2), file=f)
if __name__ == u"__main__":
sys.exit(main())
|
nilq/baby-python
|
python
|
# Angus Dempster, Francois Petitjean, Geoff Webb
#
# @article{dempster_etal_2020,
# author = {Dempster, Angus and Petitjean, Fran\c{c}ois and Webb, Geoffrey I},
# title = {ROCKET: Exceptionally fast and accurate time classification using random convolutional kernels},
# year = {2020},
# journal = {Data Mining and Knowledge Discovery},
# doi = {https://doi.org/10.1007/s10618-020-00701-z}
# }
#
# https://arxiv.org/abs/1910.13051 (preprint)
import os
import argparse
import numpy as np
import pandas as pd
import time
import torch, torch.nn as nn, torch.optim as optim
from rocket_functions import apply_kernels, generate_kernels
# == notes =====================================================================
# Reproduce the scalability experiments.
#
# Arguments:
# -tr --training_path : training dataset (npy)
# -te --test_path : test dataset (npy)
# -o --output_path : path for results
# -k --num_kernels : number of kernels
# == parse arguments ===========================================================
parser = argparse.ArgumentParser()
parser.add_argument("-path", "--data_path", required = True)
parser.add_argument("-o", "--output_path", required = True)
parser.add_argument("-k", "--num_kernels", type = int)
parser.add_argument("-seed", "--seed", type=int)
arguments = parser.parse_args()
# == training function =========================================================
def train(X,
Y,
X_validation,
Y_validation,
kernels,
num_features,
num_classes,
minibatch_size = 256,
max_epochs = 100,
patience = 2, # x10 minibatches; reset if loss improves
tranche_size = 2 ** 11,
cache_size = 2 ** 14): # as much as possible
# -- init ------------------------------------------------------------------
def init(layer):
if isinstance(layer, nn.Linear):
nn.init.constant_(layer.weight.data, 0)
nn.init.constant_(layer.bias.data, 0)
# -- model -----------------------------------------------------------------
model = nn.Sequential(nn.Linear(num_features, num_classes)) # logistic / softmax regression
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor = 0.5, min_lr = 1e-8)
model.apply(init)
# -- run -------------------------------------------------------------------
minibatch_count = 0
best_validation_loss = np.inf
stall_count = 0
stop = False
num_examples = len(X)
num_tranches = np.int(np.ceil(num_examples / tranche_size))
cache = np.zeros((min(cache_size, num_examples), num_features))
cache_count = 0
for epoch in range(max_epochs):
if epoch > 0 and stop:
break
for tranche_index in range(num_tranches):
if epoch > 0 and stop:
break
a = tranche_size * tranche_index
b = a + tranche_size
Y_tranche = Y[a:b]
# if cached, use cached transform; else transform and cache the result
if b <= cache_count:
X_tranche_transform = cache[a:b]
else:
X_tranche = X[a:b]
X_tranche = (X_tranche - X_tranche.mean(axis = 1, keepdims = True)) / X_tranche.std(axis = 1, keepdims = True) # normalise time series
X_tranche_transform = apply_kernels(X_tranche, kernels)
if epoch == 0 and tranche_index == 0:
# per-feature mean and standard deviation (estimated on first tranche)
f_mean = X_tranche_transform.mean(0)
f_std = X_tranche_transform.std(0) + 1e-8
# normalise and transform validation data
X_validation = (X_validation - X_validation.mean(axis = 1, keepdims = True)) / X_validation.std(axis = 1, keepdims = True) # normalise time series
X_validation_transform = apply_kernels(X_validation, kernels)
X_validation_transform = (X_validation_transform - f_mean) / f_std # normalise transformed features
X_validation_transform = torch.FloatTensor(X_validation_transform)
Y_validation = torch.LongTensor(Y_validation)
X_tranche_transform = (X_tranche_transform - f_mean) / f_std # normalise transformed features
if b <= cache_size:
cache[a:b] = X_tranche_transform
cache_count = b
X_tranche_transform = torch.FloatTensor(X_tranche_transform)
Y_tranche = torch.LongTensor(Y_tranche)
minibatches = torch.randperm(len(X_tranche_transform)).split(minibatch_size)
for minibatch_index, minibatch in enumerate(minibatches):
if epoch > 0 and stop:
break
# abandon undersized minibatches
if minibatch_index > 0 and len(minibatch) < minibatch_size:
break
# -- (optional) minimal lr search ------------------------------
# default lr for Adam may cause training loss to diverge for a
# large number of kernels; lr minimising training loss on first
# update should ensure training loss converges
if epoch == 0 and tranche_index == 0 and minibatch_index == 0:
candidate_lr = 10 ** np.linspace(-1, -6, 6)
best_lr = None
best_training_loss = np.inf
for lr in candidate_lr:
lr_model = nn.Sequential(nn.Linear(num_features, num_classes))
lr_optimizer = optim.Adam(lr_model.parameters())
lr_model.apply(init)
for param_group in lr_optimizer.param_groups:
param_group["lr"] = lr
# perform a single update
lr_optimizer.zero_grad()
Y_tranche_predictions = lr_model(X_tranche_transform[minibatch])
training_loss = loss_function(Y_tranche_predictions, Y_tranche[minibatch])
training_loss.backward()
lr_optimizer.step()
Y_tranche_predictions = lr_model(X_tranche_transform)
training_loss = loss_function(Y_tranche_predictions, Y_tranche).item()
if training_loss < best_training_loss:
best_training_loss = training_loss
best_lr = lr
for param_group in optimizer.param_groups:
param_group["lr"] = best_lr
# -- training --------------------------------------------------
optimizer.zero_grad()
Y_tranche_predictions = model(X_tranche_transform[minibatch])
training_loss = loss_function(Y_tranche_predictions, Y_tranche[minibatch])
training_loss.backward()
optimizer.step()
minibatch_count += 1
if minibatch_count % 10 == 0:
Y_validation_predictions = model(X_validation_transform)
validation_loss = loss_function(Y_validation_predictions, Y_validation)
scheduler.step(validation_loss)
if validation_loss.item() >= best_validation_loss:
stall_count += 1
if stall_count >= patience:
stop = True
else:
best_validation_loss = validation_loss.item()
if not stop:
stall_count = 0
return model, f_mean, f_std
# == run =======================================================================
# -- run through dataset sizes -------------------------------------------------
np.random.seed(arguments.seed)
torch.manual_seed(arguments.seed)
all_num_training_examples = [900000]
results = pd.DataFrame(index = all_num_training_examples,
columns = ["accuracy", "time_training_seconds"],
data = 0)
results.index.name = "num_training_examples"
print(f" {arguments.num_kernels:,} Kernels ".center(80, "="))
for num_training_examples in all_num_training_examples:
if num_training_examples == all_num_training_examples[0]:
print("Number of training examples:" + f"{num_training_examples:,}".rjust(75 - 28 - 5, " ") + ".....", end = "", flush = True)
else:
print(f"{num_training_examples:,}".rjust(75 - 5, " ") + ".....", end = "", flush = True)
# -- read training and validation data -------------------------------------
# if training data does not fit in memory, it is possible to load the
# training data inside the train(...) function, using the *chunksize*
# argument for pandas.read_csv(...) (and roughly substituting chunks for
# tranches); similarly, if the cache does not fit in memory, consider
# caching the transformed features on disk
path = arguments.data_path
train_file = os.path.join(path, 'satellite_train.npy')
test_file = os.path.join(path, 'satellite_test.npy')
X_training, Y_training = np.load(train_file, allow_pickle=True)[()]['data'], np.load(train_file,allow_pickle=True)[()]['label']
X_validation, Y_validation = np.load(test_file, allow_pickle=True)[()]['data'], np.load(test_file, allow_pickle=True)[()]['label']
Y_training = Y_training - 1
Y_validation = Y_validation - 1
print(np.unique(Y_training))
# -- generate kernels ------------------------------------------------------
kernels = generate_kernels(X_training.shape[1], arguments.num_kernels)
# -- train -----------------------------------------------------------------
time_a = time.perf_counter()
model, f_mean, f_std = train(X_training,
Y_training,
X_validation,
Y_validation,
kernels,
arguments.num_kernels * 2,
num_classes = 24)
time_b = time.perf_counter()
results.loc[num_training_examples, "time_training_seconds"] = time_b - time_a
# -- test ------------------------------------------------------------------
# read test data (here, we test on a subset of the full test data)
X_test, Y_test = X_validation, Y_validation
# normalise and transform test data
X_test = (X_test - X_test.mean(axis = 1, keepdims = True)) / X_test.std(axis = 1, keepdims = True) # normalise time series
X_test_transform = apply_kernels(X_test, kernels)
X_test_transform = (X_test_transform - f_mean) / f_std # normalise transformed features
# predict
model.eval()
Y_test_predictions = model(torch.FloatTensor(X_test_transform))
results.loc[num_training_examples, "accuracy"] = (Y_test_predictions.max(1)[1].numpy() == Y_test).mean()
print("Done.")
print(f" FINISHED ".center(80, "="))
results.to_csv(f"{arguments.output_path}/results_scalability_k={arguments.num_kernels}.csv")
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.urls import path
from . import views
app_name = 'sbadmin'
urlpatterns = [
path('table/', views.TableView.as_view(), name='table'),
path('chart/', views.ChartView.as_view(), name='chart'),
path('', views.IndexView.as_view(), name='index'),
path('home/', views.home, name='home')
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# import bibliotek
import os
import datetime
# zmienna-licznik przeskanowanych folderow i separator
czysazdjecia = countope = 0
lines_seen = set()
# aktualna data i godzina
czasstart = datetime.datetime.now()
print("~~~~~~START~~~~~~\t" + str(czasstart).split(".")[0])
# usunac jesli stosujemy rootdir a w os.walk() wstawic 'rootdir'
print("\nPodaj ścieżkę ddo sprawdzania wykonawców:")
sprwyk = input()
print("\nPodaj ścieżkę dla ew. pliku z błędami:")
sciezka = input()
bledny = (
sciezka
+ "\\"
+ os.path.basename(os.path.normpath(sciezka))
+ "_"
+ czasstart.strftime("%Y-%m-%d")
+ ".txt"
)
print("\nPlik zostanie umieszczony w:\n" + bledny)
input("\nWciśnij ENTER aby kontynuować...")
with open(
r"V:\Dane robocze\maciej\regexy_formuly_skrypty_polecenia\spis_wykonawcow_zambrowski.txt", # noqa
"r",
) as spiswyk:
for line in spiswyk:
lines_seen.add(line.rstrip("\n"))
# for _, dirnames, _ in os.walk(sprwyk):
# countope += len(dirnames)
for subdir, dirs, files in os.walk(sprwyk):
print(countope)
countope += 1
for file in files:
if file == "opis.txt":
opisek = os.path.join(subdir, file)
with open(opisek, "r") as opis:
for line in opis:
if line.startswith("X:"):
if line.rstrip("\n") not in lines_seen:
with open(bledny, "a") as bl:
bl.write(line)
# czas trwania calego skryptu
czaskoniec = datetime.datetime.now()
roznicaczas = czaskoniec - czasstart
czastrwania = roznicaczas.total_seconds() / 60
print("\nCałość zajęła (minuty):")
print("%.2f" % czastrwania)
print("\n~~~~~~KONIEC~~~~~~\t" + str(czaskoniec).split(".")[0])
input("Wciśnij ENTER aby wyjść...")
|
nilq/baby-python
|
python
|
import os
import sys
import requests
import ConnectWindow, ConnectedWindow, Driver
from PySide2 import QtCore
from PySide2.QtUiTools import QUiLoader
from PySide2.QtWidgets import QApplication, QLineEdit, QPushButton, QTabWidget, QWidget
class LoginWindow(QtCore.QObject):
def __init__(self, ui_file, driver_window, parent=None):
super(LoginWindow, self).__init__(parent)
self.driver_window = driver_window
ui_file = QtCore.QFile(ui_file)
ui_file.open(QtCore.QFile.ReadOnly)
loader = QUiLoader()
self.window = loader.load(ui_file)
ui_file.close()
self.tab_controller = self.window.findChild(QTabWidget)
cancel_button = self.tab_controller.findChild(QPushButton, 'cancel_button')
cancel_button.clicked.connect(self.cancel_button_clicked)
cancel_button_su = self.tab_controller.findChild(QPushButton, 'cancel_button_su')
cancel_button_su.clicked.connect(self.cancel_button_clicked)
login_button = self.tab_controller.findChild(QPushButton, 'login_button')
login_button.clicked.connect(self.login_button_clicked)
sign_up_button = self.tab_controller.findChild(QPushButton, 'sign_up_button')
sign_up_button.clicked.connect(self.sign_up_button_clicked)
self.window.show()
def login_button_clicked(self):
login_button = self.tab_controller.findChild(QPushButton, 'login_button')
login_button.setEnabled(False)
username = self.tab_controller.findChild(QLineEdit, 'username_field')
password = self.tab_controller.findChild(QLineEdit, 'password_field')
url = 'http://127.0.0.1:5000/login'
data = {
'username' : username.text(),
'password' : password.text()
}
result = requests.post(url = url, data = data)
if (result.text == 'Invalid Password' or result.text == 'Username Does Not Exist'):
self.window.statusBar().showMessage(result.text)
login_button.setEnabled(True)
else:
self.window.statusBar().showMessage(result.text)
self.driver_window.LoginSignal()
def sign_up_button_clicked(self):
sign_up_button = self.tab_controller.findChild(QPushButton, 'sign_up_button')
sign_up_button.setEnabled(False)
username = self.tab_controller.findChild(QLineEdit, 'username_input_su')
email = self.tab_controller.findChild(QLineEdit, 'email_input_su')
password = self.tab_controller.findChild(QLineEdit, 'password_input_su')
password_conf = self.tab_controller.findChild(QLineEdit, 'confirm_password_su')
url = 'http://127.0.0.1:5000/signup'
data = {
'username' : username.text(),
'email' : email.text(),
'password' : password.text(),
'password_conf' : password_conf.text()
}
result = requests.post(url = url, data = data)
if (result.text == 'A User Already Exists With That Email Address' or
result.text == 'A User Already Exists With That Username' or
result.text == 'One or More Fields Were Left Blank'):
self.window.statusBar().showMessage(result.text)
sign_up_button.setEnabled(True)
else:
self.window.statusBar().showMessage(result.text)
def cancel_button_clicked(self):
self.window.close()
def get_mainwindow(driver_window):
window = LoginWindow('secureshellinterface.ui', driver_window)
return window
if __name__ == '__main__':
os.system('python Driver.py')
|
nilq/baby-python
|
python
|
__author__ = 'joon'
import sys
sys.path.insert(0, 'ResearchTools')
from util.construct_filenames import create_token
from util.construct_controls import subcontrol
from util.ios import mkdir_if_missing, save_to_cache, load_from_cache
from util.maths import Jsoftmax, proj_lp, proj_lf, compute_percentiles
from util.dict_with_dot import Map
from util.time_debugging import debug_show_time_elapsed
from util.images import load_image_PIL
from util.construct_args import control2list
from util.parallel import apply_async_wrapper, Sum
from vis.imshow import fpim, vis_seg
from image.mask_box import mask2bbox, bbox_area, bbox_ratio, carve_bbox_to_im
from image.cc import compute_cc
from image.bw_to_rgb import bw_to_rgb
from image.crop import random_crop, random_translation
from image.iou import compute_iou
|
nilq/baby-python
|
python
|
"""Backend for rendering multi-frame images using PIL.
These are internal APIs and subject to change at any time.
"""
try:
import PIL
except (ImportError):
PIL = None
from .shared import Backend, BackendError, check_output
class PILMultiframeBackend(Backend):
"""Backend for rendering multi-frame images.
This backend is used to render image formats supporting multiple
frames in a single file, such as GIF and TIFF.
Note: For performance reasons, support for rendering single-frame
images is built into the DocViewer widget.
This backend requires the PIL module.
"""
__slots__ = ["im"]
def __init__(self, input_path, **kw):
"""Return a new rendering backend."""
Backend.__init__(self, input_path, **kw)
if PIL:
self.im = PIL.Image.open(input_path)
else:
raise BackendError(
"Could not render {0} because PIL is not available "
"on your system."
.format(input_path)
)
def page_count(self):
"""Return the number of pages in the input file."""
if hasattr(self.im, "num_frames"):
# This attribute is available for some formats, like TIFF
return self.im.num_frames
else:
# Count the number of pages manually
pc = 1
self.im.seek(0)
try:
while True:
self.im.seek(self.im.tell() + 1)
pc += 1
except (EOFError):
# We've seen every frame in the image
return pc
def render_page(self, page_num):
"""Render the specified page of the input file."""
self.im.seek(page_num - 1)
return self.im.copy()
|
nilq/baby-python
|
python
|
import jwt.exceptions
import pytest
from okay.jwt import main, decode
__author__ = "Cesar Alvernaz"
__copyright__ = "Cesar Alvernaz"
__license__ = "MIT"
from fixtures.jwt_fixtures import VALID_TOKEN, SECRET, \
EXPECTED_TOKEN_PAYLOAD, INVALID_SECRET, VALID_RS256_TOKEN, \
EXPECTED_TOKEN_RS256_PAYLOAD
def test_decode_without_signature_verification():
assert decode(VALID_TOKEN, SECRET) == EXPECTED_TOKEN_PAYLOAD
def test_decode_with_signature_verification():
assert decode(VALID_TOKEN, SECRET, verify_signature=True) == \
EXPECTED_TOKEN_PAYLOAD
def test_decode_with_invalid_secret():
with pytest.raises(jwt.exceptions.InvalidSignatureError):
decode(VALID_TOKEN, INVALID_SECRET, verify_signature=True)
def test_decode_with_invalid_secret_no_validation():
assert decode(VALID_TOKEN, INVALID_SECRET, verify_signature=False) == \
EXPECTED_TOKEN_PAYLOAD
def test_decode_with_invalid_token_type_with_validation():
with pytest.raises(jwt.exceptions.InvalidAlgorithmError):
decode(VALID_RS256_TOKEN, SECRET, verify_signature=True)
def test_decode_with_invalid_token_type_with_no_validation():
assert decode(VALID_RS256_TOKEN, SECRET, verify_signature=False) == \
EXPECTED_TOKEN_RS256_PAYLOAD
# def test_main(capsys):
# """CLI Tests"""
# # capsys is a pytest fixture that allows asserts agains stdout/stderr
# # https://docs.pytest.org/en/stable/capture.html
# main(["7"])
# captured = capsys.readouterr()
# assert "The 7-th Fibonacci number is 13" in captured.out
|
nilq/baby-python
|
python
|
import os,re, sys
from byo.track import Track, load_track
from byo.io.genome_accessor import GenomeCache, RemoteCache
from byo.io.annotation import AnnotationAccessor
#from byo.io.lazytables import NamedTupleImporter as Importer
import byo.config
import logging
class LazyTranscriptLoader(object):
def __init__(self,system = None):
self.transcripts = None
self.system = system
self.logger = logging.getLogger("LazyTranscriptLoader(system={system.name})".format(system=self.system) )
def __getitem__(self,txname):
if not self.transcripts:
self.transcripts = self.load_transcript_catalogs()
return self.transcripts[txname]
def load_transcript_catalogs(self):
from byo.gene_model import transcripts_from_UCSC
import glob
path = os.path.join(self.system.root, "annotation", self.system.name, "*.ucsc.gz")
sources = glob.glob(path)
if sources:
self.logger.debug('loading {0}'.format(sources[0]))
T = transcripts_from_UCSC(sources[0],system = self.system)
for s in sources[1:]:
self.logger.debug('loading {0}'.format(s))
T.load(s)
self.logger.info("loaded {0} transcript models from {1} source(s)".format(len(T), len(sources)))
return T
else:
self.logger.error("no transcript models found in path '{0}'".format(path))
return {}
class ModelSystem(object):
def __init__(self, name, genome = None, transcript_models = None, root = byo.config.system_root):
self.name = name
self.root = root
if genome == None:
if getattr(byo.config,"genome_server",None):
self.genome = RemoteCache(byo.config.genome_server)[name]
else:
self.genome = GenomeCache(os.path.join(root,"genomes"))[name]
else:
self.genome = genome
if transcript_models == None:
self.transcript_models = LazyTranscriptLoader(system=self)
else:
self.transcript_models = transcript_models
# Fails before first access due to lazy loading of genome
#self.chr_sizes = self.genome.data.chrom_stats
def get_annotations_track(path="",accessor=AnnotationAccessor,**kwargs):
if not path:
path = os.path.join(self.root,"annotation",self.name,"compiled")
return Track(path,accessor,**kwargs)
def load_track(self, path, **kwargs):
return load_track(path, system=self, **kwargs)
def get_refGenes(self):
return self.transcript_models
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Time : 2018/5/28 22:03
# @Author : ddvv
# @Site : http://ddvv.life
# @File : xiaomistorespider.py
# @Software: PyCharm
"""
第三方依赖库: Crypto
功能:
1. 获取小米商店应用评论
消息说明:
1. "AppSpider-0010-001" : 应用评论
"""
import scrapy
from appspider.commonapis import *
CONST_INFO = {
'app_name': 'com.xiaomi.market',
'app_version': 'R.1.4.5',
'spider_author': 'ddvv'
}
class xiaomistorecommentsspider(scrapy.Spider):
"""
爬取中国裁判文书APP
"""
# 爬虫名称
name = 'xiaomistorecommentsspider'
def __init__(self, appid, **kwargs):
super().__init__(**kwargs)
self.header = {
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 7.0; HUAWEI NXT-AL10 Build/HUAWEINXT-AL10)",
"Host": "app.market.xiaomi.com",
"Accept-Encoding": "gzip, deflate"
}
self.appid = appid
# 爬虫入口,发起请求
def start_requests(self):
"""
"""
appid = self.appid
header = self.header
burl = "https://app.market.xiaomi.com/apm/comment/list/{" \
"appid}?channel=market_100_1_android&clientId=70a40c54102b9be2da4664cd819bbc32&co=CN" \
"&densityScaleFactor=3.0&imei=6066eb90c6d80f6e8eaa7afd48256483&la=zh&marketVersion=147&model=HUAWEI" \
"+NXT-AL10&os=C00B577&page={page}&resolution=1080*1812&sdk=24&session=2jmj7l5rSw0yVb_v"
for page in range(0, 10):
url = burl.format(appid=appid, page=page)
yield scrapy.Request(url=url,
headers=header,
method='GET',
callback=self.parse)
# 解析返回值,推送至pipeline
def parse(self, response):
"""
:param response: 爬取的数据返回值。
"""
try:
js = json.loads(response.body.decode())
js['appid'] = self.appid
item = setappspideritem('AppSpider-0009-001', 'json', js, **CONST_INFO)
yield item
except Exception as e:
logger.error(str(e))
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# Simple tcp fuzz against a target
import socket
from sys import exit,argv
if len(argv) < 2:
print "Performs a simple fuzz against a target"
print "Usage: %s <Target IP Address/hostname> <Target Port>" % str(argv[0])
exit(1)
#Create an arry of buffers, from 10 to 2000, with increments of 20.
buffer=["A"]
counter=100
while len(buffer) <= 30:
buffer.append("A"*counter)
counter=counter+200
for string in buffer:
print "Fuzzing %s:%s with %s bytes" % (str(argv[1]),int(argv[2]),len(string))
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect=s.connect((str(argv[1]),int(argv[2])))
# This next part depends on whatever the RFC is for what you're trying to
# exploit. Up to you to put the 'string' in the right place. Be sure to
# receive bytes after sending anything.
s.recv(1024) # Grab the banner, do not remove
s.send(string + "\r\n") # Sends your evil buffer as 'string'
s.send('QUIT\r\n') # Replace 'QUIT' with whatever ends your session
s.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import sys
import re
threshold = float(sys.argv[1])
tp = 0
fp = 0
fn = 0
typePr = {}
for line in sys.stdin:
if re.search(r'^\d', line):
fields = line.rstrip('\n').split(' ')
gold = fields[1]
(predicted, conf) = fields[2].split(':')
print "%s\t%s\t%s" % (gold, predicted, conf)
conf = float(conf)
if not typePr.has_key(gold):
typePr[gold] = [0.0, 0.0, 0.0] #tp, fp, fn
if conf > threshold and predicted != 'error':
if predicted == gold:
tp += 1.0
typePr[gold][0] += 1.0
else:
fp += 1.0
typePr[gold][1] += 1.0
fn += 1.0
typePr[gold][2] += 1.0
elif gold != 'error':
fn += 1
print "tp=%s\tfp=%s\tfn=%s" % (tp, fp, fn)
p = float(tp) / float(tp + fp)
r = float(tp) / float(tp + fn)
f = 2 * p * r / (p + r)
print "p=%s\tr=%s\tf=%s" % (p,r,f)
for t in typePr.keys():
tp = typePr[t][0]
fp = typePr[t][1]
fn = typePr[t][2]
if tp + fp > 0:
p = float(tp) / float(tp + fp)
else:
p = 0.0
if tp + fn > 0:
r = float(tp) / float(tp + fn)
else:
r = 0.0
if p + r > 0:
f = 2 * p * r / (p + r)
else:
f = 0.0
print "%s\tp=%s\tr=%s\tf=%s" % (t,p,r,f)
|
nilq/baby-python
|
python
|
import datetime as dt
import re
from collections import namedtuple
from pathlib import Path
import pytest
import ravenpy
from ravenpy.config.commands import (
BasinStateVariablesCommand,
EvaluationPeriod,
GriddedForcingCommand,
HRUStateVariableTableCommand,
)
from ravenpy.config.rvs import OST, RVC, RVH, RVI, RVP, RVT, Config
from ravenpy.extractors import (
RoutingProductGridWeightExtractor,
RoutingProductShapefileExtractor,
)
from ravenpy.utilities.testdata import get_local_testdata
class TestRV:
def test_end_date(self):
rvi = RVI(None)
rvi.run_name = "test"
rvi.start_date = dt.datetime(2000, 1, 1)
rvi.end_date = dt.datetime(2000, 1, 11)
assert 10 == rvi.duration
rvi.duration = 11
assert dt.datetime(2000, 1, 12) == rvi.end_date
def test_evaluation_metrics(self):
rvi = RVI(None)
rvi.evaluation_metrics = "LOG_NASH"
with pytest.raises(ValueError):
rvi.evaluation_metrics = "JIM"
def test_evaluation_periods(self):
rvi = RVI(None)
assert rvi.evaluation_periods == ""
rvi.evaluation_periods = [
EvaluationPeriod("dry", "1980-01-01", "1989-12-31"),
EvaluationPeriod("wet", "1990-01-01", "2000-12-31"),
]
out = rvi.evaluation_periods
assert len(out.split("\n")) == 2
assert out.startswith(":EvaluationPeriod")
# Check date input
d = EvaluationPeriod("dry", dt.date(1980, 1, 1), dt.date(1989, 12, 31))
assert str(d) == str(rvi.evaluation_periods.splitlines()[0])
class TestOst:
def test_random(self):
o = OST(None)
assert o.random_seed == ""
o.random_seed = 0
assert o.random_seed == "RandomSeed 0"
def test_evaluation_metric_multiplier(self):
config = Config(model=None)
config.rvi.evaluation_metrics = ["RMSE", "NASH_SUTCLIFFE"]
assert config.ost.evaluation_metric_multiplier == 1
with pytest.raises(ValueError):
config.rvi.evaluation_metrics = ["PCT_BIAS"]
config.ost.evaluation_metric_multiplier
class TestRVI:
def test_supress_output(self):
rvi = RVI(None)
rvi.suppress_output = True
assert rvi.suppress_output == ":SuppressOutput\n:DontWriteWatershedStorage"
rvi = RVI(None)
rvi.suppress_output = False
assert rvi.suppress_output == ""
class TestRVC:
@classmethod
def setup_class(self):
sol = open(get_local_testdata("gr4j_cemaneige/solution.rvc")).read()
self.rvc = RVC.create_solution(sol)
def test_parse(self):
assert len(self.rvc.hru_states) == 1
assert self.rvc.hru_states[1].atmosphere == 821.98274
assert self.rvc.hru_states[1].atmos_precip == -1233.16
assert len(self.rvc.basin_states) == 1
assert self.rvc.basin_states[1].channel_storage == 0
assert self.rvc.basin_states[1].qout == (1, 13.21660, 13.29232)
def test_format(self):
rv = self.rvc.to_rv()
assert ":BasinIndex 1 watershed" in rv
class TestRVH:
@classmethod
def setup_class(self):
shp = get_local_testdata("raven-routing-sample/finalcat_hru_info.zip")
extractor = RoutingProductShapefileExtractor(shp)
config = extractor.extract()
self.rvh = RVH(None)
for k, v in config.items():
if k != "channel_profiles":
self.rvh.update(k, v)
def test_import_process(self):
assert len(self.rvh.subbasins) == 46
assert len(self.rvh.land_subbasin_ids) == 41
assert len(self.rvh.lake_subbasin_ids) == 5
assert len(self.rvh.reservoirs) == 5
assert len(self.rvh.hrus) == 51
def test_format(self):
res = self.rvh.to_rv()
sbs = (
re.search(":SubBasins(.+):EndSubBasins", res, re.MULTILINE | re.DOTALL)
.group(1)
.split("\n")
)
sbs = list(filter(None, sbs)) # remove whitespaces
assert len(sbs) == len(self.rvh.subbasins) + 2
assert res.count("ZERO-") == len(self.rvh.reservoirs)
hrus = (
re.search(":HRUs(.+):EndHRUs", res, re.MULTILINE | re.DOTALL)
.group(1)
.split("\n")
)
hrus = list(filter(None, hrus)) # remove whitespaces
assert len(hrus) == len(self.rvh.hrus) + 2
assert res.count(":Reservoir") == len(self.rvh.reservoirs)
class TestRVP:
@classmethod
def setup_class(self):
shp = get_local_testdata("raven-routing-sample/finalcat_hru_info.zip")
extractor = RoutingProductShapefileExtractor(shp)
config = extractor.extract()
self.rvp = RVP(None)
self.rvp.tmpl = "{channel_profiles}"
self.rvp.channel_profiles = config["channel_profiles"]
def test_import_process(self):
assert len(self.rvp.channel_profiles) == 46
def test_format(self):
res = self.rvp.to_rv()
assert res.count(":ChannelProfile") == 46
assert res.count(":EndChannelProfile") == 46
class TestRVT:
@classmethod
def setup_class(self):
input_file = get_local_testdata("raven-routing-sample/VIC_streaminputs.nc")
routing_file = get_local_testdata("raven-routing-sample/finalcat_hru_info.zip")
extractor = RoutingProductGridWeightExtractor(input_file, routing_file)
gws = extractor.extract()
self.gfc = GriddedForcingCommand(grid_weights=gws)
def test_import_process(self):
res = self.gfc.to_rv()
assert ":NumberHRUs 51" in res
assert ":NumberGridCells 100" in res
# FIXME: This test is not superb.
assert len(res.split("\n")) == 226
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate, get_last_day, add_days
from erpnext.assets.doctype.asset.test_asset import create_asset_data
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import make_purchase_receipt
from erpnext.assets.doctype.asset_value_adjustment.asset_value_adjustment import get_current_asset_value
class TestAssetValueAdjustment(unittest.TestCase):
def setUp(self):
create_asset_data()
def test_current_asset_value(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset_doc = frappe.get_doc('Asset', asset_name)
month_end_date = get_last_day(nowdate())
purchase_date = nowdate() if nowdate() != month_end_date else add_days(nowdate(), -15)
asset_doc.available_for_use_date = purchase_date
asset_doc.purchase_date = purchase_date
asset_doc.calculate_depreciation = 1
asset_doc.append("finance_books", {
"expected_value_after_useful_life": 200,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10,
"depreciation_start_date": month_end_date
})
asset_doc.submit()
current_value = get_current_asset_value(asset_doc.name)
self.assertEqual(current_value, 100000.0)
def test_asset_depreciation_value_adjustment(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset_doc = frappe.get_doc('Asset', asset_name)
asset_doc.calculate_depreciation = 1
month_end_date = get_last_day(nowdate())
purchase_date = nowdate() if nowdate() != month_end_date else add_days(nowdate(), -15)
asset_doc.available_for_use_date = purchase_date
asset_doc.purchase_date = purchase_date
asset_doc.calculate_depreciation = 1
asset_doc.append("finance_books", {
"expected_value_after_useful_life": 200,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10,
"depreciation_start_date": month_end_date
})
asset_doc.submit()
current_value = get_current_asset_value(asset_doc.name)
adj_doc = make_asset_value_adjustment(asset = asset_doc.name,
current_asset_value = current_value, new_asset_value = 50000.0)
adj_doc.submit()
expected_gle = (
("_Test Accumulated Depreciations - _TC", 0.0, 50000.0),
("_Test Depreciations - _TC", 50000.0, 0.0)
)
gle = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type='Journal Entry' and voucher_no = %s
order by account""", adj_doc.journal_entry)
self.assertEqual(gle, expected_gle)
def make_asset_value_adjustment(**args):
args = frappe._dict(args)
doc = frappe.get_doc({
"doctype": "Asset Value Adjustment",
"company": args.company or "_Test Company",
"asset": args.asset,
"date": args.date or nowdate(),
"new_asset_value": args.new_asset_value,
"current_asset_value": args.current_asset_value,
"cost_center": args.cost_center or "Main - _TC"
}).insert()
return doc
|
nilq/baby-python
|
python
|
import logging
import os
from ...utils import import_export_content
from ...utils import paths
from ...utils import transfer
from kolibri.core.tasks.management.commands.base import AsyncCommand
logger = logging.getLogger(__name__)
class Command(AsyncCommand):
def add_arguments(self, parser):
node_ids_help_text = """
Specify one or more node IDs to import. Only the files associated to those node IDs will be imported.
Make sure to call this near the end of the argument list.
e.g.
kolibri manage importcontent network <channel id> --node_ids <id1>,<id2>, [<ids>,...]
"""
parser.add_argument(
"--node_ids", "-n",
# Split the comma separated string we get, into a list of strings
type=lambda x: x.split(","),
default=[],
required=False,
dest="node_ids",
help=node_ids_help_text,
)
exclude_node_ids_help_text = """
Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported.
Make sure to call this near the end of the argument list.
e.g.
kolibri manage importcontent network <channel id> --exclude_node_ids <id1>,<id2>, [<ids>,...]
"""
parser.add_argument(
"--exclude_node_ids",
type=lambda x: x.split(","),
default=[],
required=False,
dest="exclude_node_ids",
help=exclude_node_ids_help_text
)
parser.add_argument("channel_id", type=str)
parser.add_argument("destination", type=str)
def handle_async(self, *args, **options):
channel_id = options["channel_id"]
data_dir = os.path.realpath(options["destination"])
node_ids = options["node_ids"]
exclude_node_ids = options["exclude_node_ids"]
logger.info("Exporting content for channel id {} to {}".format(channel_id, data_dir))
files, total_bytes_to_transfer = import_export_content.get_files_to_transfer(
channel_id, node_ids, exclude_node_ids, True)
exported_files = []
with self.start_progress(total=total_bytes_to_transfer) as overall_progress_update:
for f in files:
if self.is_cancelled():
break
filename = f.get_filename()
srcpath = paths.get_content_storage_file_path(filename)
dest = paths.get_content_storage_file_path(filename, datafolder=data_dir)
# if the file already exists, add its size to our overall progress, and skip
if os.path.isfile(dest) and os.path.getsize(dest) == f.file_size:
overall_progress_update(f.file_size)
continue
copy = transfer.FileCopy(srcpath, dest)
with copy:
with self.start_progress(total=copy.total_size) as file_cp_progress_update:
for chunk in copy:
if self.is_cancelled():
copy.cancel()
break
length = len(chunk)
overall_progress_update(length)
file_cp_progress_update(length)
else:
exported_files.append(dest)
if self.is_cancelled():
# Cancelled, clean up any already downloading files.
for dest in exported_files:
os.remove(dest)
self.cancel()
|
nilq/baby-python
|
python
|
import os
import mimetypes
import json
from plantcv.plantcv import fatal_error
# Process results. Parse individual image output files.
###########################################
def process_results(job_dir, json_file):
"""Get results from individual files. Parse the results and recompile for SQLite.
Args:
job_dir: Intermediate file output directory.
json_file: Json data table filehandle object.
:param job_dir: str
:param json_file: obj
"""
if os.path.exists(json_file):
with open(json_file, 'r') as datafile:
try:
data = json.load(datafile)
if "variables" not in data or "entities" not in data:
fatal_error("Invalid JSON file")
except:
fatal_error("Invalid JSON file")
else:
# Data dictionary
data = {"variables": {}, "entities": []}
# Walk through the image processing job directory and process data from each file
for (dirpath, dirnames, filenames) in os.walk(job_dir):
for filename in filenames:
# Make sure file is a text or json file
if 'text/plain' in mimetypes.guess_type(filename) or 'application/json' in mimetypes.guess_type(filename):
# Open results file
with open(os.path.join(dirpath, filename)) as results:
obs = json.load(results)
data["entities"].append(obs)
# Keep track of all metadata variables stored
for vars in obs["metadata"]:
data["variables"][vars] = {"category": "metadata", "datatype": "<class 'str'>"}
# Keep track of all observations variables stored
for othervars in obs["observations"]:
data["variables"][othervars] = {"category": "observations",
"datatype": obs["observations"][othervars]["datatype"]}
# Write out json file with info from all images
with open(json_file, 'w') as datafile:
json.dump(data, datafile)
###########################################
|
nilq/baby-python
|
python
|
import logging
import traceback
import urllib
import datetime
import mimetypes
import os
import sys
import zlib
import gzip
import StringIO
import json
from pylons import request, response, session, tmpl_context as c
from pylons import app_globals
from pypesvds.lib.base import BaseController, render
from pypesvds.lib.packet import Packet
from pypesvds.lib.utils import abort
log = logging.getLogger(__name__)
mimes = os.path.join(os.path.dirname(__file__), 'mime.types')
mimetypes.init([mimes])
class DataController(BaseController):
def _decompress(self, encoding, data):
""" decompress data if it is gzipped """
filedata = data
if encoding == 'gzip':
log.debug('Found gzipped data, decompressing')
# gzip files have a header preceding the zlib stream.
# try with zlib (streams compressed on the fly) and if
# that fails, try the gzip module
try:
filedata = zlib.decompress(data)
except:
gz_data = StringIO.StringIO(data)
filedata = gzip.GzipFile(fileobj=gz_data).read()
return filedata
def create(self, route=None, id=None):
status = {}
try:
content_encoding = request.headers.get('Content-Encoding', None)
content_type = request.headers.get('Content-Type', None)
content_length = request.headers.get('Content-Length', None)
log.debug('content_encoding: %s' % content_encoding)
log.debug('content_type: %s' % content_type)
log.debug('content_length: %s' % content_length)
except Exception as e:
log.error('Controller Exception: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
log.debug(traceback.print_exc())
abort(500, str(e))
else:
# bad content-type
if content_type == 'application/x-www-form-urlencoded':
abort(415, "Invalid or Unspecified Content-Type")
try:
packet = Packet()
# indicates a file upload
if content_type.startswith('multipart/form-data;'):
log.debug('found multipart form data, attempting to find source filename')
part = request.POST['document']
if part.filename:
fname = unicode(part.filename.lstrip(os.sep))
packet.set_meta('filename', fname)
# update content type based on filename
content_type = unicode(mimetypes.guess_type(fname)[0])
data = part.value
else:
data = request.body
# decompress if compressed
filedata = self._decompress(content_encoding, data)
# update content length since we might be decompressed now
content_length = len(filedata)
if content_length > 0:
packet.add('data', filedata)
else:
abort(400, 'Empty Request')
# set optional user provided id
if id is not None:
log.debug('id: %s' % id)
packet.set_meta('id', id)
# set optional user provided routing info
if route is not None:
log.debug('route: %s' % route)
packet.set_meta('route', route)
# set some common meta attributes on the packet
packet.set_meta('requestmethod', request.method)
packet.set_meta('contentlength', content_length)
packet.set_meta('mimetype', content_type)
packet.set_meta('processingtime', unicode(
datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')))
status = app_globals.dfg.send(packet)
# calls into pypes core are asynchronous so we respond as such
if status['status'] == 'success':
response.status = 202
except Exception as e:
log.error('Controller Exception: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
log.debug(traceback.print_exc())
abort(500, str(e))
# return empty body on success otherwise return status object
return None if status['status'] == 'success' else json.dumps(status)
def delete(self, route, id):
status = {}
try:
packet = Packet()
# set packet meta attributes
packet.set_meta('id', id)
packet.set_meta('requestmethod', request.method)
packet.set_meta('processingtime', unicode(
datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')))
status = app_globals.dfg.send(packet)
# calls into pypes core are asynchronous so we respond as such
if status['status'] == 'success':
response.status = 202
except Exception as e:
status = 'An Undefined Error Has Occurred'
log.error('Controller Exception: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
log.debug(traceback.print_exc())
abort(500, str(e))
# return empty body on success otherwise return status object
return None if status['status'] == 'success' else json.dumps(status)
def get(self, route=None, id=None):
status = {}
try:
packet = Packet()
# set packet meta attributes
if id is not None:
packet.set_meta('id', id)
# set optional user provided routing info
if route is not None:
packet.set_meta('route', route)
packet.set_meta('requestmethod', request.method)
packet.set_meta('processingtime', unicode(
datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')))
status = app_globals.dfg.send(packet)
# calls into pypes core are asynchronous so we respond as such
if status['status'] == 'success':
response.status = 202
except Exception as e:
status = 'An Undefined Error Has Occurred'
log.error('Controller Exception: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
log.debug(traceback.print_exc())
response.content_type = 'application/json'
response.status = 500
status['error'] = str(e)
abort(500, str(e))
# return empty body on success otherwise return status object
return None if status['status'] == 'success' else json.dumps(status)
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.2 on 2018-05-16 11:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project', '0019_auto_20180516_1034'),
]
operations = [
migrations.AlterField(
model_name='project',
name='allocation_systems',
field=models.ManyToManyField(through='project.ProjectSystemAllocation', to='system.System', verbose_name='Allocation systems'),
),
migrations.AlterField(
model_name='project',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='project.ProjectCategory', verbose_name='Category'),
),
migrations.AlterField(
model_name='project',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='Created time'),
),
migrations.AlterField(
model_name='project',
name='department',
field=models.CharField(blank=True, max_length=128, verbose_name='Department'),
),
migrations.AlterField(
model_name='project',
name='economic_user',
field=models.BooleanField(default=False, verbose_name='Economic user'),
),
migrations.AlterField(
model_name='project',
name='end_date',
field=models.DateField(verbose_name='End date'),
),
migrations.AlterField(
model_name='project',
name='funding_source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.ProjectFundingSource', verbose_name='Funding source'),
),
migrations.AlterField(
model_name='project',
name='institution',
field=models.ForeignKey(help_text='Institution project is based', on_delete=django.db.models.deletion.CASCADE, to='institution.Institution', verbose_name='Institution'),
),
migrations.AlterField(
model_name='project',
name='members',
field=models.ManyToManyField(through='project.ProjectUserMembership', to=settings.AUTH_USER_MODEL, verbose_name='Members'),
),
migrations.AlterField(
model_name='project',
name='modified_time',
field=models.DateTimeField(auto_now=True, verbose_name='Modified time'),
),
migrations.AlterField(
model_name='project',
name='notes',
field=models.TextField(blank=True, help_text='Internal project notes', max_length=512, verbose_name='Notes'),
),
migrations.AlterField(
model_name='project',
name='requirements_gateways',
field=models.TextField(help_text='Web gateway or portal name and versions', max_length=512, verbose_name='Requirements gateways'),
),
migrations.AlterField(
model_name='project',
name='requirements_onboarding',
field=models.TextField(max_length=512, verbose_name='Requirements onboarding'),
),
migrations.AlterField(
model_name='project',
name='requirements_software',
field=models.TextField(help_text='Software name and versions', max_length=512, verbose_name='Requirements software'),
),
migrations.AlterField(
model_name='project',
name='requirements_training',
field=models.TextField(max_length=512, verbose_name='Requirements training'),
),
migrations.AlterField(
model_name='project',
name='start_date',
field=models.DateField(verbose_name='Start date'),
),
migrations.AlterField(
model_name='project',
name='status',
field=models.PositiveSmallIntegerField(choices=[(1, 'Awaiting Approval'), (2, 'Approved'), (3, 'Declined'), (4, 'Revoked'), (5, 'Suspended'), (6, 'Closed')], default=1, verbose_name='Status'),
),
]
|
nilq/baby-python
|
python
|
from typing import Tuple, Iterable
from rlp.utils import str_to_bytes
from state.util import utils
from storage.kv_store import KeyValueStorage
# log = get_logger('db')
databases = {}
class KeyValueStorageInMemory(KeyValueStorage):
def __init__(self):
self._dict = {}
def get(self, key):
if isinstance(key, str):
key = key.encode()
return self._dict[key]
def put(self, key, value):
if isinstance(key, str):
key = key.encode()
if isinstance(value, str):
value = value.encode()
self._dict[key] = value
def remove(self, key):
if isinstance(key, str):
key = key.encode()
del self._dict[key]
def setBatch(self, batch: Iterable[Tuple]):
for key, value in batch:
self.put(key, value)
def do_ops_in_batch(self, batch: Iterable[Tuple]):
for op, key, value in batch:
if op == self.WRITE_OP:
self.put(key, value)
elif op == self.REMOVE_OP:
self.remove(key)
else:
raise ValueError('Unknown operation')
def open(self):
pass
def close(self):
pass
def drop(self):
self._dict = {}
def reset(self):
self._dict = {}
def iterator(self, start=None, end=None, include_key=True, include_value=True, prefix=None):
if not (include_key or include_value):
raise ValueError("At least one of includeKey or includeValue "
"should be true")
def filter(key, start, end):
if start and end:
return key in range(start, end)
if start:
return key >= start
if end:
return key <= end
if include_key and include_value:
if start or end:
return {k: v for k, v in self._dict.items() if filter(k, start, end)}
return self._dict.items()
if include_key:
if start or end:
return (k for k in self._dict.keys() if filter(k, start, end))
return self._dict.keys()
if include_value:
if start or end:
return (v for k, v in self._dict.items() if filter(k, start, end))
return self._dict.values()
def closed(self):
return False
def is_byte(self):
return False
def db_path(self) -> str:
return ""
def __eq__(self, other):
return isinstance(other, self.__class__) and self._dict == other._dict
def __hash__(self):
return utils.big_endian_to_int(str_to_bytes(self.__repr__()))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
## define the convolutional neural network architecture
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
# 224-5/1 + 1 = 220
# output tensor: (32, 220, 220)
# after maxpool: (32, 110, 110)
self.conv1 = nn.Conv2d(1, 32, 5)
# 110-3/1 + 1 = 108
# output tensor: (64, 108, 108)
# after maxpool: (64, 54, 54)
self.conv2 = nn.Conv2d(32, 64, 3)
self.pool = nn.MaxPool2d(2,2)
self.drop_layer1 = nn.Dropout(p=0.4)
self.drop_layer2 = nn.Dropout(p=0.2)
self.lin1 = nn.Linear(64*54*54, 3)
def forward(self, x):
## Define the feedforward behavior of this model
x = self.pool(F.relu(self.conv1(x)))
x = self.drop_layer1(x)
x = self.pool(F.relu(self.conv2(x)))
x = self.drop_layer2(x)
# prepare for linear layer by flattening
x = x.view(x.size(0), -1)
x = self.lin1(x)
return x
|
nilq/baby-python
|
python
|
from .CTCModel import *
|
nilq/baby-python
|
python
|
'''
Module containing all the requisite classes to perform test steps.
Adding new actions
-------------------
Creating new simple actions in the code is designed to be fairly straightforward, and only
requires three steps:
1. Add an entry for the action on the ``enums`` module
2. Create a function to perform the actual step under the ``TestStep`` class
3. Add an entry to the selector with the enum as a key and the function as a value
Keep in mind that the step function should also validate any required data, and that
updating the schema for proper json validation is essential.
If the parameters for the new action are expected to be enums, you must also add the logic
for converting the parameter from string to enum in the ``UIValidation`` class.
'''
from typing import (
Optional,
Dict,
Any,
)
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from quilla.ctx import Context
from quilla.common.enums import (
UITestActions,
)
from quilla.steps.base_steps import (
BaseStepFactory,
BaseStep
)
# Steps classes
class TestStep(BaseStep, BaseStepFactory):
'''
Class that contains the definition of a single test step.
Used for setting up validations
Args:
ctx: The runtime context of the application
action: The action enum for this step
target: What the target for this step is, if applicable
parameters: Extra options for certain actions
aggregator: The parent object holding this step
driver: The browser driver
Attributes:
selector: A dictionary that maps action enums to the action function
'''
required_params = [
'action',
]
optional_params = [
'target',
'parameters',
]
@classmethod
def from_dict(
cls,
ctx: Context,
action_dict,
driver: Optional[WebDriver] = None
) -> 'TestStep':
'''
Factory method to extract needed parameters from a dictionary
'''
for item in cls.required_params:
if item not in action_dict:
raise AttributeError('Missing one or more required parameters')
params: Dict[str, Any] = {}
for param in cls.required_params:
params[param] = action_dict[param]
for param in cls.optional_params:
if param in action_dict:
params[param] = action_dict[param]
return TestStep(ctx, **params, driver=driver)
def __init__(
self,
ctx: Context,
action: UITestActions,
target: Optional[str] = None,
parameters: Optional[dict] = None,
driver: Optional[WebDriver] = None,
):
super().__init__(ctx, action, target=target, parameters=parameters, driver=driver)
self.selector = {
UITestActions.CLICK: self._click,
UITestActions.CLEAR: self._clear,
UITestActions.SEND_KEYS: self._send_keys,
UITestActions.NAVIGATE_TO: self._navigate_to,
UITestActions.WAIT_FOR_VISIBILITY: self._wait_for_visibility,
UITestActions.WAIT_FOR_EXISTENCE: self._wait_for_existence,
UITestActions.NAVIGATE_BACK: self._navigate_back,
UITestActions.NAVIGATE_FORWARD: self._navigate_forward,
UITestActions.HOVER: self._hover,
UITestActions.REFRESH: self._refresh,
UITestActions.SET_BROWSER_SIZE: self._set_browser_size,
UITestActions.ADD_COOKIES: self._add_cookies,
UITestActions.SET_COOKIES: self._set_cookies,
UITestActions.CLEAR_COOKIES: self._clear_cookies,
UITestActions.REMOVE_COOKIE: self._remove_cookie,
}
def copy(self) -> 'TestStep':
'''
Creates a shallow copy of the TestStep object
This is used so that each browser can have an independent copy of
the steps, in case any script would want to edit individual browser
steps
'''
return TestStep(
self.ctx,
self.action,
self._target, # Make sure it's passed in raw
self._parameters, # Make sure it's passed in raw
self._driver
)
def perform(self):
'''
Runs the specified action. Wrapper for selecting proper inner method
'''
perform_action = self.selector[self.action]
return perform_action()
def _click(self):
self._verify_target()
self.element.click()
def _clear(self):
self._verify_target()
self.element.clear()
def _send_keys(self):
self._verify_parameters('data')
self.element.send_keys(self.parameters['data'])
def _navigate_to(self):
self._verify_target()
self.driver.get(self.target)
def _wait_for(self, condition):
self._verify_parameters('timeoutInSeconds')
WebDriverWait(self.driver, self.parameters['timeoutInSeconds']).until(condition)
def _wait_for_visibility(self):
self._verify_target()
self._wait_for(EC.visibility_of_element_located(self.locator))
def _wait_for_existence(self):
self._verify_target()
self._wait_for(EC.presence_of_element_located(self.locator))
def _navigate_back(self):
self.driver.back()
def _navigate_forward(self):
self.driver.forward()
def _refresh(self):
self.driver.refresh()
def _set_browser_size(self):
self._verify_parameters('width', 'height')
width = self._parameters['width']
height = self._parameters['height']
self.driver.set_window_size(width, height)
def _set_cookies(self):
self._clear_cookies()
self._add_cookies()
def _add_cookies(self):
self._verify_parameters('cookieJar')
self.driver.add_cookie(self.parameters['cookieJar'])
def _remove_cookie(self):
self._verify_parameters('cookieName')
self.driver.delete_cookie(self.parameters['cookieName'])
def _clear_cookies(self):
self.driver.delete_all_cookies()
def _hover(self):
self._verify_target()
ActionChains(self.driver).move_to_element(self.element).perform()
def _set_zoom_level(self):
self._verify_parameters('zoomLevel')
zoom_level = self._parameters['zoomLevel']
self.driver.execute_script(f'document.body.style.zoom="{zoom_level}%"')
|
nilq/baby-python
|
python
|
execfile('<%= @tmp_dir %>/common.py')
# weblogic node params
WLHOME = '<%= @weblogic_home_dir %>'
JAVA_HOME = '<%= @java_home_dir %>'
WEBLOGIC_VERSION = '<%= @version %>'
# domain params
DOMAIN_PATH = '<%= @domain_dir %>'
DOMAIN = '<%= @domain_name %>'
APP_PATH = '<%= @app_dir %>'
# adminserver params
ADMIN_SERVER_NAME = '<%= @adminserver_name %>'
ADMIN_SERVER_LISTEN_ADDRESS = '<%= @adminserver_listen_address %>'
MACHINE_NAME = 'LocalMachine'
ESS_SERVER_STARTUP_ARGUMENTS = '<%= @ess_server_startup_arguments %>'
ESS_SERVER_LISTEN_PORT = 8201
ESS_CLUSTER = '<%= @ess_cluster %>'
SOA_CLUSTER = '<%= @soa_cluster %>'
OSB_CLUSTER = '<%= @osb_cluster %>'
BAM_CLUSTER = '<%= @bam_cluster %>'
# templates
WLS_EM_TEMPLATE = '<%= @wls_em_template %>'
WLS_ESS_EM_TEMPLATE = '<%= @wls_ess_em_template %>'
WLS_ESS_TEMPLATE = '<%= @wls_ess_template %>'
# repository
REPOS_DBURL = '<%= @repository_database_url %>'
REPOS_DBUSER_PREFIX = '<%= @repository_prefix %>'
REPOS_DBPASSWORD = sys.argv[2]
readDomain(DOMAIN_PATH)
cd('/')
setOption( "AppDir", APP_PATH )
print 'Adding EM Template'
try:
addTemplate(WLS_EM_TEMPLATE)
except:
print "Probably already added error:", sys.exc_info()[0]
print 'Adding ESS Template'
addTemplate(WLS_ESS_TEMPLATE)
addTemplate(WLS_ESS_EM_TEMPLATE)
if ESS_CLUSTER:
pass
else:
print 'change ess_server1'
cd('/')
changeManagedServer('ess_server1', MACHINE_NAME, ADMIN_SERVER_LISTEN_ADDRESS, ESS_SERVER_LISTEN_PORT, ESS_SERVER_STARTUP_ARGUMENTS, JAVA_HOME)
print 'Change datasources'
print 'Change datasource LocalScvTblDataSource'
changeDatasource('LocalSvcTblDataSource', REPOS_DBUSER_PREFIX+'_STB', REPOS_DBPASSWORD, REPOS_DBURL)
print 'Call getDatabaseDefaults which reads the service table'
getDatabaseDefaults()
# changeDatasourceToXA('EssDS')
print 'end datasources'
print 'Add server groups WSM-CACHE-SVR WSMPM-MAN-SVR JRF-MAN-SVR to AdminServer'
serverGroup = ["WSM-CACHE-SVR" , "WSMPM-MAN-SVR" , "JRF-MAN-SVR"]
setServerGroups(ADMIN_SERVER_NAME, serverGroup)
serverGroup = ["ESS-MGD-SVRS"]
if ESS_CLUSTER:
print 'Add server group ESS-MGD-SVRS to cluster'
cd('/')
setServerGroups('ess_server1', [])
essServers = getClusterServers(ESS_CLUSTER, ADMIN_SERVER_NAME)
cd('/')
for i in range(len(essServers)):
print "Add server group ESS-MGD-SVRS to " + essServers[i]
setServerGroups(essServers[i] , serverGroup)
print 'Assign cluster to defaultCoherenceCluster'
cd('/')
assign('Cluster',ESS_CLUSTER,'CoherenceClusterSystemResource','defaultCoherenceCluster')
cd('/CoherenceClusterSystemResource/defaultCoherenceCluster')
AllArray = []
if SOA_CLUSTER:
AllArray.append(SOA_CLUSTER)
if BAM_CLUSTER:
AllArray.append(BAM_CLUSTER)
if OSB_CLUSTER:
AllArray.append(OSB_CLUSTER)
if ESS_CLUSTER:
AllArray.append(ESS_CLUSTER)
All = ','.join(AllArray)
set('Target', All)
if 'ess_server1' in essServers:
pass
else:
print "delete ess_server1"
cd('/')
delete('ess_server1', 'Server')
if WEBLOGIC_VERSION == '12.2.1':
updateDomain()
dumpStack()
closeDomain()
readDomain(DOMAIN_PATH)
cleanJMS('UMSJMSSystemResource', 'UMSJMSServer_auto', 'UMSJMSFileStore_auto')
recreateUMSJms12c(ADMIN_SERVER_NAME, SOA_CLUSTER, OSB_CLUSTER, BAM_CLUSTER, ESS_CLUSTER, All)
else:
print 'Add server group ESS-MGD-SVRS to ess_server1'
setServerGroups('ess_server1', serverGroup)
print 'end server groups'
updateDomain()
dumpStack()
closeDomain()
print('Exiting...')
exit()
|
nilq/baby-python
|
python
|
import random
import sys
min = 1
max = 1000
if len(sys.argv) > 1 :
max = int(sys.argv[1])
number = random.randint(min, max)
print('I have selected a number between %d and %d' % (min, max))
print('Please try to guess my number.')
guess_count = 0
while True :
guess = input('Your guess: ')
try :
guess = int(guess)
except :
print("That doesn't look like a number. Try again.")
continue
guess_count += 1
if guess == number :
print('You guess by number in %d guesses!' % (guess_count))
break
elif guess > number :
print('Your guess was too high. Try again.')
else :
print('Your guess was too low. Try again.')
|
nilq/baby-python
|
python
|
import os
import unittest
from monty.json import MontyDecoder
from monty.serialization import loadfn
from robocrys.util import load_condensed_structure_json
class RobocrysTest(unittest.TestCase):
"""Base test class providing access to common test data. """
_module_dir = os.path.dirname(os.path.abspath(__file__))
_structures_dir = os.path.join(_module_dir, "structures")
_condensed_structures_dir = os.path.join(
_module_dir, "condensed_structures")
_test_structures = {}
for _fn in os.listdir(_structures_dir):
if ".json.gz" in _fn:
_test_structures[_fn.split(".")[0]] = loadfn(os.path.join(
_structures_dir, _fn), cls=MontyDecoder)
_test_condensed_structures = {}
for _fn in os.listdir(_condensed_structures_dir):
if ".json.gz" in _fn:
_test_condensed_structures[_fn.split(".")[0]] = \
load_condensed_structure_json(os.path.join(
_condensed_structures_dir, _fn))
@classmethod
def get_structure(cls, name):
return cls._test_structures[name].copy()
@classmethod
def get_condensed_structure(cls, name):
return cls._test_condensed_structures[name].copy()
|
nilq/baby-python
|
python
|
'''
Created on Jan. 24, 2018
@author Andrew Habib
'''
from statistics import mean
from collections import Counter
import os
from Util import load_parsed_ep, load_parsed_inf, load_parsed_sb, load_json_list, get_list_of_uniq_jsons
def display_min_max_avg_warnings_per_bug_total():
print("\nMin, Max, Avg (warnings per bug) and Total number of warnings")
print("\nBuggy versions:\n")
rel_path = './b/'
ep_all = load_parsed_ep(rel_path + 'ep_parsed.json')
inf_all = load_parsed_inf(rel_path + 'inf_parsed.json')
sb_all = load_parsed_sb(rel_path + 'sb_parsed.json')
print("Tool Min. Max. Avg. Total")
print("Errorprone", get_min_max_avg_warnings_per_bug_total(ep_all))
print("Infer", get_min_max_avg_warnings_per_bug_total(inf_all))
print("Spotbugs", get_min_max_avg_warnings_per_bug_total(sb_all))
print("\nTotal number of warnings by all tools:",
get_min_max_avg_warnings_per_bug_total(ep_all)[3] + get_min_max_avg_warnings_per_bug_total(inf_all)[3] + get_min_max_avg_warnings_per_bug_total(sb_all)[3])
''''''
print("\nFixed versions:\n")
rel_path = './f/'
ep_all = load_parsed_ep(rel_path + 'ep_parsed.json')
inf_all = load_parsed_inf(rel_path + 'inf_parsed.json')
sb_all = load_parsed_sb(rel_path + 'sb_parsed.json')
print("Tool Total Min. Max. Avg.")
print("Errorprone", get_min_max_avg_warnings_per_bug_total(ep_all))
print("Infer", get_min_max_avg_warnings_per_bug_total(inf_all))
print("Spotbugs", get_min_max_avg_warnings_per_bug_total(sb_all))
print("\nTotal number of warnings by all tools:",
get_min_max_avg_warnings_per_bug_total(ep_all)[3] + get_min_max_avg_warnings_per_bug_total(inf_all)[3] + get_min_max_avg_warnings_per_bug_total(sb_all)[3])
def get_min_max_avg_warnings_per_bug_total(warnings):
count = [w.proj for w in warnings]
counter = Counter(count)
return min(counter.values()), max(counter.values()), mean(counter.values()), len(count)
def get_warnings_bugs_from_each_approach():
print("\nWarnings and bugs from each automatic matching approach")
print("** warnings for combined approach are not unique (duplicates exist) **\n")
rel_path = './diffs_warnings/'
ep_res1 = load_parsed_ep(rel_path + "ep_warnings.json")
inf_res1 = load_parsed_inf(rel_path + "inf_warnings.json")
sb_res1 = load_parsed_sb(rel_path + "sb_warnings.json")
rel_path = './removed_warnings/'
ep_res2 = load_parsed_ep(rel_path + "ep_warnings.json")
inf_res2 = load_parsed_inf(rel_path + "inf_warnings.json")
sb_res2 = load_parsed_sb(rel_path + "sb_warnings.json")
_all_b = []
print("Tool Diff-based Fixed-based Combined")
print(" W B W B W B")
all_b = []
b_diff = get_bugs_from_warnings(ep_res1)
b_fixed = get_bugs_from_warnings(ep_res2)
all_b.extend(b_diff)
all_b.extend(b_fixed)
_all_b.extend(all_b)
print("Error Prone ", len(ep_res1), len(b_diff), len(ep_res2), len(b_fixed), len(ep_res1) + len(ep_res2), len(b_diff | b_fixed))
all_b = []
b_diff = get_bugs_from_warnings(inf_res1)
b_fixed = get_bugs_from_warnings(inf_res2)
all_b.extend(b_diff)
all_b.extend(b_fixed)
_all_b.extend(all_b)
print("Infer ", len(inf_res1), len(b_diff), len(inf_res2), len(b_fixed), len(inf_res1) + len(inf_res2), len(b_diff | b_fixed))
all_b = []
b_diff = get_bugs_from_warnings(sb_res1)
b_fixed = get_bugs_from_warnings(sb_res2)
all_b.extend(b_diff)
all_b.extend(b_fixed)
_all_b.extend(all_b)
print("SpotBugs ", len(sb_res1), len(b_diff), len(sb_res2), len(b_fixed), len(sb_res1) + len(sb_res2), len(b_diff | b_fixed))
print("\nUnique warnings from each approachcombined approach:\n")
rel_path = './diffs_warnings/'
ep_res1 = load_json_list(rel_path + "ep_warnings.json")
inf_res1 = load_json_list(rel_path + "inf_warnings.json")
sb_res1 = load_json_list(rel_path + "sb_warnings.json")
rel_path = './removed_warnings/'
ep_res2 = load_json_list(rel_path + "ep_warnings.json")
inf_res2 = load_json_list(rel_path + "inf_warnings.json")
sb_res2 = load_json_list(rel_path + "sb_warnings.json")
print("Ep ", len(ep_res1), len(ep_res2), len(get_list_of_uniq_jsons(ep_res1 + ep_res2)))
print("Inf", len(inf_res1), len(inf_res2), len(get_list_of_uniq_jsons(inf_res1 + inf_res2)))
print("Sb ", len(sb_res1), len(sb_res2), len(get_list_of_uniq_jsons(sb_res1 + sb_res2)))
print("\nUnique bugs from combined approach: ", len(set(_all_b)))
def get_bugs_from_warnings(warnings):
bugs = set(w.proj for w in warnings)
return bugs
def count_bugs_from_warnings(warnings):
bugs = set(w.proj for w in warnings)
return(len(bugs))
def get_manually_inspected_warnings_bugs():
print("\nManual inspection of warnings aggregated on warnings and bugs levels")
print("\nDiffs-based approach:\n")
rel_path = './diffs_warnings/'
ep_res = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ = load_parsed_ep(rel_path + "ep_succ.json")
ep_part = load_parsed_ep(rel_path + "ep_part.json")
ep_fail = load_parsed_ep(rel_path + "ep_fail.json")
inf_res = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ = load_parsed_inf(rel_path + "inf_succ.json")
inf_part = load_parsed_inf(rel_path + "inf_part.json")
inf_fail = load_parsed_inf(rel_path + "inf_fail.json")
sb_res = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ = load_parsed_sb(rel_path + "sb_succ.json")
sb_part = load_parsed_sb(rel_path + "sb_part.json")
sb_fail = load_parsed_sb(rel_path + "sb_fail.json")
print("Warnings:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
print('"Error Prone"', len(ep_succ), len(ep_part), len(ep_fail), len(ep_res))
print("Infer", len(inf_succ), len(inf_part), len(inf_fail), len(inf_res))
print("Spotbugs", len(sb_succ), len(sb_part), len(sb_fail), len(sb_res))
print("\nBugs:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
print('"Error Prone"', count_bugs_from_warnings(ep_succ), count_bugs_from_warnings(ep_part), count_bugs_from_warnings(ep_fail), count_bugs_from_warnings(ep_res))
print("Infer", count_bugs_from_warnings(inf_succ), count_bugs_from_warnings(inf_part), count_bugs_from_warnings(inf_fail), count_bugs_from_warnings(inf_res))
print("Spotbugs", count_bugs_from_warnings(sb_succ), count_bugs_from_warnings(sb_part), count_bugs_from_warnings(sb_fail), count_bugs_from_warnings(sb_res))
print("\nFixed warnings approach\n")
rel_path = './removed_warnings/'
ep_res = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ = load_parsed_ep(rel_path + "ep_succ.json")
ep_part = load_parsed_ep(rel_path + "ep_part.json")
ep_fail = load_parsed_ep(rel_path + "ep_fail.json")
inf_res = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ = load_parsed_inf(rel_path + "inf_succ.json")
inf_part = load_parsed_inf(rel_path + "inf_part.json")
inf_fail = load_parsed_inf(rel_path + "inf_fail.json")
sb_res = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ = load_parsed_sb(rel_path + "sb_succ.json")
sb_part = load_parsed_sb(rel_path + "sb_part.json")
sb_fail = load_parsed_sb(rel_path + "sb_fail.json")
print("Warnings:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
print('"Error Prone"', len(ep_succ), len(ep_part), len(ep_fail), len(ep_res))
print("Infer", len(inf_succ), len(inf_part), len(inf_fail), len(inf_res))
print("Spotbugs", len(sb_succ), len(sb_part), len(sb_fail), len(sb_res))
print("\nBugs:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
print('"Error Prone"', count_bugs_from_warnings(ep_succ), count_bugs_from_warnings(ep_part), count_bugs_from_warnings(ep_fail), count_bugs_from_warnings(ep_res))
print("Infer", count_bugs_from_warnings(inf_succ), count_bugs_from_warnings(inf_part), count_bugs_from_warnings(inf_fail), count_bugs_from_warnings(inf_res))
print("Spotbugs", count_bugs_from_warnings(sb_succ), count_bugs_from_warnings(sb_part), count_bugs_from_warnings(sb_fail), count_bugs_from_warnings(sb_res))
get_manually_inspected_warnings_bugs_combined_approach()
def get_manually_inspected_warnings_bugs_combined_approach():
print("\nCombined approach\n")
rel_path = './diffs_warnings/'
ep_succ1 = load_json_list(rel_path + "ep_succ.json")
ep_part1 = load_json_list(rel_path + "ep_part.json")
ep_fail1 = load_json_list(rel_path + "ep_fail.json")
inf_succ1 = load_json_list(rel_path + "inf_succ.json")
inf_part1 = load_json_list(rel_path + "inf_part.json")
inf_fail1 = load_json_list(rel_path + "inf_fail.json")
sb_succ1 = load_json_list(rel_path + "sb_succ.json")
sb_part1 = load_json_list(rel_path + "sb_part.json")
sb_fail1 = load_json_list(rel_path + "sb_fail.json")
rel_path = './removed_warnings/'
ep_succ2 = load_json_list(rel_path + "ep_succ.json")
ep_part2 = load_json_list(rel_path + "ep_part.json")
ep_fail2 = load_json_list(rel_path + "ep_fail.json")
inf_succ2 = load_json_list(rel_path + "inf_succ.json")
inf_part2 = load_json_list(rel_path + "inf_part.json")
inf_fail2 = load_json_list(rel_path + "inf_fail.json")
sb_succ2 = load_json_list(rel_path + "sb_succ.json")
sb_part2 = load_json_list(rel_path + "sb_part.json")
sb_fail2 = load_json_list(rel_path + "sb_fail.json")
# comnined data #
ep_succ = get_list_of_uniq_jsons(ep_succ1 + ep_succ2)
ep_part = get_list_of_uniq_jsons(ep_part1 + ep_part2)
ep_fail = get_list_of_uniq_jsons(ep_fail1 + ep_fail2)
inf_succ = get_list_of_uniq_jsons(inf_succ1 + inf_succ2)
inf_part = get_list_of_uniq_jsons(inf_part1 + inf_part2)
inf_fail = get_list_of_uniq_jsons(inf_fail1 + inf_fail2)
sb_succ = get_list_of_uniq_jsons(sb_succ1 + sb_succ2)
sb_part = get_list_of_uniq_jsons(sb_part1 + sb_part2)
sb_fail = get_list_of_uniq_jsons(sb_fail1 + sb_fail2)
print("Warnings:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
print('"Error Prone"', len(ep_succ), len(ep_part), len(ep_fail), len(ep_succ) + len(ep_part) + len(ep_fail))
print('Infer', len(inf_succ), len(inf_part), len(inf_fail), len(inf_succ) + len(inf_part) + len(inf_fail))
print('SpotBugs', len(sb_succ), len(sb_part), len(sb_fail), len(sb_succ) + len(sb_part) + len(sb_fail))
print("\nBugs:\n")
print('Tool "Full match" "Partial match" Mismatch Total')
b_succ, b_part, b_fail = len(Counter(p[' Proj'] for p in ep_succ)), len(Counter(p[' Proj'] for p in ep_part)), len(Counter(p[' Proj'] for p in ep_fail))
print('"Error Prone"', b_succ, b_part, b_fail, b_succ + b_part + b_fail)
b_succ, b_part, b_fail = len(Counter(p[' Proj'] for p in inf_succ)), len(Counter(p[' Proj'] for p in inf_part)), len(Counter(p[' Proj'] for p in inf_fail))
print('Infer', b_succ, b_part, b_fail, b_succ + b_part + b_fail)
b_succ, b_part, b_fail = len(Counter(p[' Proj'] for p in sb_succ)), len(Counter(p[' Proj'] for p in sb_part)), len(Counter(p[' Proj'] for p in sb_fail))
print('SpotBugs', b_succ, b_part, b_fail, b_succ + b_part + b_fail)
def get_cand_detected_bugs_tools_sets():
print("\nCandidate and detected bugs by each tool and each approach")
rel_path = './diffs_warnings/'
ep_res1 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ1 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part1 = load_parsed_ep(rel_path + "ep_part.json")
inf_res1 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ1 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part1 = load_parsed_inf(rel_path + "inf_part.json")
sb_res1 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ1 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part1 = load_parsed_sb(rel_path + "sb_part.json")
rel_path = './removed_warnings/'
ep_res2 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ2 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part2 = load_parsed_ep(rel_path + "ep_part.json")
inf_res2 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ2 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part2 = load_parsed_inf(rel_path + "inf_part.json")
sb_res2 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ2 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part2 = load_parsed_sb(rel_path + "sb_part.json")
print("\nCandidate bugs:\n")
print("Tool Diff-based Fixed-based Both")
ep_cand_diff = get_bugs_from_warnings(ep_res1)
ep_cand_fixed = get_bugs_from_warnings(ep_res2)
print('"Error Prone"', len(ep_cand_diff), len(ep_cand_fixed), len(ep_cand_diff & ep_cand_fixed))
inf_cand_diff = get_bugs_from_warnings(inf_res1)
inf_cand_fixed = get_bugs_from_warnings(inf_res2)
print("Infer", len(inf_cand_diff), len(inf_cand_fixed), len(inf_cand_diff & inf_cand_fixed))
sb_cand_diff = get_bugs_from_warnings(sb_res1)
sb_cand_fixed = get_bugs_from_warnings(sb_res2)
print("Spotbugs", len(sb_cand_diff), len(sb_cand_fixed), len(sb_cand_diff & sb_cand_fixed))
print("\nTrue bugs (fully or partially flagged)\n")
print("Tool Diff-based Fixed-based Both")
ep_succ_diff = get_bugs_from_warnings(ep_succ1) | get_bugs_from_warnings(ep_part1)
ep_succ_fixed = get_bugs_from_warnings(ep_succ2) | get_bugs_from_warnings(ep_part2)
print('"Error Prone"', len(ep_succ_diff), len(ep_succ_fixed), len(ep_succ_diff & ep_succ_fixed))
inf_succ_diff = get_bugs_from_warnings(inf_succ1) | get_bugs_from_warnings(inf_part1)
inf_succ_fixed = get_bugs_from_warnings(inf_succ2) | get_bugs_from_warnings(inf_part2)
print("Infer", len(inf_succ_diff), len(inf_succ_fixed), len(inf_succ_diff & inf_succ_fixed))
sb_succ_diff = get_bugs_from_warnings(sb_succ1) | get_bugs_from_warnings(sb_part1)
sb_succ_fixed = get_bugs_from_warnings(sb_succ2) | get_bugs_from_warnings(sb_part2)
print("Spotbugs", len(sb_succ_diff), len(sb_succ_fixed), len(sb_succ_diff & sb_succ_fixed))
print("\nTrue bugs found by all tools\n")
ep_succ = get_bugs_from_warnings(ep_succ1) | get_bugs_from_warnings(ep_succ2) | get_bugs_from_warnings(ep_part1) | get_bugs_from_warnings(ep_part2)
print("Ep:", len(ep_succ))
inf_succ = get_bugs_from_warnings(inf_succ1) | get_bugs_from_warnings(inf_succ2) | get_bugs_from_warnings(inf_part1) | get_bugs_from_warnings(inf_part2)
print("Inf:", len(inf_succ))
sb_succ = get_bugs_from_warnings(sb_succ1) | get_bugs_from_warnings(sb_succ2) | get_bugs_from_warnings(sb_part1) | get_bugs_from_warnings(sb_part2)
print("Sb:", len(sb_succ))
print("Ep & Inf:", len(ep_succ & inf_succ))
print("Ep & Sb:", len(ep_succ & sb_succ))
print("Inf & Sb:", len(inf_succ & sb_succ))
print("Ep & Inf & Sb:", len(ep_succ & inf_succ & sb_succ))
def get_cand_detected_bugs_tools_table():
print("\nAll candidate and detected bugs by each tool and each approach\n")
rel_path = './diffs_warnings/'
ep_res1 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ1 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part1 = load_parsed_ep(rel_path + "ep_part.json")
ep_fail1 = load_parsed_ep(rel_path + "ep_fail.json")
inf_res1 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ1 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part1 = load_parsed_inf(rel_path + "inf_part.json")
inf_fail1 = load_parsed_inf(rel_path + "inf_fail.json")
sb_res1 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ1 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part1 = load_parsed_sb(rel_path + "sb_part.json")
sb_fail1 = load_parsed_sb(rel_path + "sb_fail.json")
rel_path = './removed_warnings/'
ep_res2 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ2 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part2 = load_parsed_ep(rel_path + "ep_part.json")
ep_fail2 = load_parsed_ep(rel_path + "ep_fail.json")
inf_res2 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ2 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part2 = load_parsed_inf(rel_path + "inf_part.json")
inf_fail2 = load_parsed_inf(rel_path + "inf_fail.json")
sb_res2 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ2 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part2 = load_parsed_sb(rel_path + "sb_part.json")
sb_fail2 = load_parsed_sb(rel_path + "sb_fail.json")
bugs = []
bugs.extend(w.proj for w in ep_res1)
bugs.extend(w.proj for w in inf_res1)
bugs.extend(w.proj for w in sb_res1)
bugs.extend(w.proj for w in ep_res2)
bugs.extend(w.proj for w in inf_res2)
bugs.extend(w.proj for w in sb_res2)
bugs = sorted(list(set(bugs)))
print(" Removed Warnings Diffs-based Combined")
print("Tool Ep Inf SB Ep Inf SB Ep Inf SB")
for b in bugs:
entry = b + " "
#####################################
if b in get_bugs_from_warnings(ep_succ1):
entry += "& F "
elif b in get_bugs_from_warnings(ep_part1):
entry += "& P "
elif b in get_bugs_from_warnings(ep_fail1):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(inf_succ1):
entry += "& F "
elif b in get_bugs_from_warnings(inf_part1):
entry += "& P "
elif b in get_bugs_from_warnings(inf_fail1):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(sb_succ1):
entry += "& F "
elif b in get_bugs_from_warnings(sb_part1):
entry += "& P "
elif b in get_bugs_from_warnings(sb_fail1):
entry += "& M "
else:
entry += "& - "
#####################################
if b in get_bugs_from_warnings(ep_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(ep_part2):
entry += "& P "
elif b in get_bugs_from_warnings(ep_fail2):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(inf_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(inf_part2):
entry += "& P "
elif b in get_bugs_from_warnings(inf_fail2):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(sb_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(sb_part2):
entry += "& P "
elif b in get_bugs_from_warnings(sb_fail2):
entry += "& M "
else:
entry += "& - "
#####################################
if b in get_bugs_from_warnings(ep_succ1) or b in get_bugs_from_warnings(ep_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(ep_part1) or b in get_bugs_from_warnings(ep_part2):
entry += "& P "
elif b in get_bugs_from_warnings(ep_fail1) or b in get_bugs_from_warnings(ep_fail2):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(inf_succ1) or b in get_bugs_from_warnings(inf_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(inf_part1) or b in get_bugs_from_warnings(inf_part2):
entry += "& P "
elif b in get_bugs_from_warnings(inf_fail1) or b in get_bugs_from_warnings(inf_fail2):
entry += "& M "
else:
entry += "& - "
if b in get_bugs_from_warnings(sb_succ1) or b in get_bugs_from_warnings(sb_succ2):
entry += "& F "
elif b in get_bugs_from_warnings(sb_part1) or b in get_bugs_from_warnings(sb_part2):
entry += "& P "
elif b in get_bugs_from_warnings(sb_fail1) or b in get_bugs_from_warnings(sb_fail2):
entry += "& M "
else:
entry += "& - "
entry += "\\\\"
print(entry)
print()
def get_true_detected_bugs_by_each_tool():
rel_path = './diffs_warnings/'
ep_res1 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ1 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part1 = load_parsed_ep(rel_path + "ep_part.json")
inf_res1 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ1 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part1 = load_parsed_inf(rel_path + "inf_part.json")
sb_res1 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ1 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part1 = load_parsed_sb(rel_path + "sb_part.json")
rel_path = './removed_warnings/'
ep_res2 = load_parsed_ep(rel_path + "ep_warnings.json")
ep_succ2 = load_parsed_ep(rel_path + "ep_succ.json")
ep_part2 = load_parsed_ep(rel_path + "ep_part.json")
inf_res2 = load_parsed_inf(rel_path + "inf_warnings.json")
inf_succ2 = load_parsed_inf(rel_path + "inf_succ.json")
inf_part2 = load_parsed_inf(rel_path + "inf_part.json")
sb_res2 = load_parsed_sb(rel_path + "sb_warnings.json")
sb_succ2 = load_parsed_sb(rel_path + "sb_succ.json")
sb_part2 = load_parsed_sb(rel_path + "sb_part.json")
print("\nTrue bugs found by each tool\n")
ep_succ = get_bugs_from_warnings(ep_succ1) | get_bugs_from_warnings(ep_succ2) | get_bugs_from_warnings(ep_part1) | get_bugs_from_warnings(ep_part2)
print("Ep:", len(ep_succ))
with open(os.path.join(os.getcwd(), "ep_detected"), 'w') as f:
f.write("\n".join(i for i in ep_succ))
inf_succ = get_bugs_from_warnings(inf_succ1) | get_bugs_from_warnings(inf_succ2) | get_bugs_from_warnings(inf_part1) | get_bugs_from_warnings(inf_part2)
print("Inf:", len(inf_succ))
with open(os.path.join(os.getcwd(), "inf_detected"), 'w') as f:
f.write("\n".join(i for i in inf_succ))
sb_succ = get_bugs_from_warnings(sb_succ1) | get_bugs_from_warnings(sb_succ2) | get_bugs_from_warnings(sb_part1) | get_bugs_from_warnings(sb_part2)
print("Sb:", len(sb_succ))
with open(os.path.join(os.getcwd(), "sb_detected"), 'w') as f:
f.write("\n".join(i for i in sb_succ))
print()
''' this script has to be run from the results/ directory '''
if __name__ == '__main__':
# display_min_max_avg_warnings_per_bug_total()
# get_warnings_bugs_from_each_approach()
# get_manually_inspected_warnings_bugs()
# get_cand_detected_bugs_tools_sets()
# get_cand_detected_bugs_tools_table()
get_true_detected_bugs_by_each_tool()
|
nilq/baby-python
|
python
|
from graphene_sqlalchemy import SQLAlchemyObjectType
import graphene
from ..database import db_session
from ..models import ModelFridge
from ..lib.utils import input_to_dictionary
from importlib import import_module
from flask_jwt_extended import jwt_required
class FridgeAttributes:
ingredient_id = graphene.List(graphene.String)
class Fridge(SQLAlchemyObjectType, FridgeAttributes):
ingredients = graphene.List(lambda: import_module('.ingredient', "babylon.schemas").Ingredient)
@graphene.resolve_only_args
def resolve_ingredients(self):
return [ingredient for ingredient in self.ingredients]
class Meta:
model = ModelFridge
interfaces = (graphene.relay.Node,)
class CreateFridgeInput(graphene.InputObjectType, FridgeAttributes):
pass
class CreateFridge(graphene.Mutation):
recipe = graphene.Field(lambda: Fridge, description="Recipe created by this mutation")
class Arguments:
input = CreateFridgeInput(required=True)
@jwt_required
def mutate(self, info, input):
# TODO: Add this
pass
class UpdateFridgeInput(graphene.InputObjectType, FridgeAttributes):
id = graphene.ID(required=True, description="Global ID of the recipe")
class UpdateFridge(graphene.Mutation):
recipe = graphene.Field(lambda: Fridge, description="Recipe created by this mutation")
class Arguments:
input = CreateFridgeInput(required=True)
@jwt_required
def mutate(self, info, input):
data = input_to_dictionary(input)
fridge = db_session.query(ModelFridge).filter_by(id=data["id"])
fridge.update(data)
db_session.commit()
recipe = db_session.query(ModelFridge).filter_by(id=data["id"]).first()
return UpdateFridge(recipe=recipe)
|
nilq/baby-python
|
python
|
while True:
try:
pilha = input()
correct = 1
par = 0
i = 0
while i < len(pilha) and correct:
if pilha[i] == '(':
par += 1
#print('1', i, par)
if pilha[i] == ')':
if par == 0:
correct = 0
#print('2', i, par)
else:
par -= 1
#print('3', i, par)
if i == len(pilha)-1:
if par % 2 != 0:
correct = 0
#print('5', i, par)
i += 1
if correct:
print('correct')
else:
print('incorrect')
except EOFError:
break
|
nilq/baby-python
|
python
|
import os
import logging
import argparse
from tqdm import tqdm
import torch
PAD_token = 1
SOS_token = 3
EOS_token = 2
UNK_token = 0
MODE = 'en'
data_version = 'init' # processed
if torch.cuda.is_available():
USE_CUDA = True
else:
USE_CUDA = False
MAX_LENGTH = 10
parser = argparse.ArgumentParser(description='TRADE Multi-Domain DST')
class EmptyParser():
def parse_args(self):
return
parser.ArgumentParser = EmptyParser
# Training Setting
parser.add_argument('-ds','--dataset', help='dataset', required=False, default="multiwoz")
parser.add_argument('-t','--task', help='Task Number', required=False, default="dst")
parser.add_argument('-path','--path', help='path of the file to load', required=False)
parser.add_argument('-sample','--sample', help='Number of Samples', required=False,default=None)
parser.add_argument('-patience','--patience', help='', required=False, default=6, type=int)
parser.add_argument('-es','--earlyStop', help='Early Stop Criteria, BLEU or ENTF1', required=False, default='BLEU')
parser.add_argument('-all_vocab','--all_vocab', help='', required=False, default=1, type=int)
parser.add_argument('-imbsamp','--imbalance_sampler', help='', required=False, default=0, type=int)
parser.add_argument('-data_ratio','--data_ratio', help='', required=False, default=100, type=int)
parser.add_argument('-um','--unk_mask', help='mask out input token to UNK', type=int, required=False, default=1)
parser.add_argument('-bsz','--batch', help='Batch_size', required=False, type=int)
# Testing Setting
parser.add_argument('-rundev','--run_dev_testing', help='', required=False, default=0, type=int)
parser.add_argument('-viz','--vizualization', help='vizualization', type=int, required=False, default=0)
## model predictions
parser.add_argument('-gs','--genSample', help='Generate Sample', type=int, required=False, default=0) #### change this when testing
parser.add_argument('-evalp','--evalp', help='evaluation period', required=False, default=1)
parser.add_argument('-an','--addName', help='An add name for the model folder', required=False, default='')
parser.add_argument('-eb','--eval_batch', help='Evaluation Batch_size', required=False, type=int, default=0)
# Model architecture
parser.add_argument('-gate','--use_gate', help='', required=False, default=1, type=int)
parser.add_argument('-le','--load_embedding', help='', required=False, default=0, type=int)
parser.add_argument('-femb','--fix_embedding', help='', required=False, default=0, type=int)
parser.add_argument('-paral','--parallel_decode', help='', required=False, default=0, type=int)
# Model Hyper-Parameters
parser.add_argument('-dec','--decoder', help='decoder model', required=False)
parser.add_argument('-hdd','--hidden', help='Hidden size', required=False, type=int, default=400)
parser.add_argument('-lr','--learn', help='Learning Rate', required=False, type=float)
parser.add_argument('-dr','--drop', help='Drop Out', required=False, type=float)
parser.add_argument('-lm','--limit', help='Word Limit', required=False,default=-10000)
parser.add_argument('-clip','--clip', help='gradient clipping', required=False, default=10, type=int)
parser.add_argument('-tfr','--teacher_forcing_ratio', help='teacher_forcing_ratio', type=float, required=False, default=0.5)
# parser.add_argument('-l','--layer', help='Layer Number', required=False)
# Unseen Domain Setting
parser.add_argument('-l_ewc','--lambda_ewc', help='regularization term for EWC loss', type=float, required=False, default=0.01)
parser.add_argument('-fisher_sample','--fisher_sample', help='number of sample used to approximate fisher mat', type=int, required=False, default=0)
parser.add_argument("--all_model", action="store_true")
parser.add_argument("--domain_as_task", action="store_true")
parser.add_argument('--run_except_4d', help='', required=False, default=1, type=int)
parser.add_argument("--strict_domain", action="store_true")
parser.add_argument('-exceptd','--except_domain', help='', required=False, default="", type=str)
parser.add_argument('-onlyd','--only_domain', help='', required=False, default="", type=str)
args = vars(parser.parse_known_args(args=[])[0])
if args["load_embedding"]:
args["hidden"] = 400
print("[Warning] Using hidden size = 400 for pretrained word embedding (300 + 100)...")
if args["fix_embedding"]:
args["addName"] += "FixEmb"
if args["except_domain"] != "":
args["addName"] += "Except"+args["except_domain"]
if args["only_domain"] != "":
args["addName"] += "Only"+args["only_domain"]
|
nilq/baby-python
|
python
|
DIMINISHING_BRIGHTNESS = 0.8
def run(led_wire, string_length, running_time, sleep_time, num_pulses, time_between_pulse, colour, staggered):
pass
## TODO
# start_time = time.time()
# if colour == "random":
# colour_list = [red, dim_orange, dim_yellow, dim_light_green,
# green, dim_turquoise, blue, dim_pink]
# current_colour = list(random.choice(colour_list))
# while (time.time() - start_time) < running_time:
# pulse_start = random.randint(0, string_length)
# led_wire.setPixelColor(pulse_start, Color(current_colour[1],
# current_colour[0], current_colour[2]))
# for i in (1, 2, 3):
# for c in current_colour:
# c * DIMINISHING_BRIGHTNESS
# for j in (-i, i):
# if pulse_start + j > 0 or pulse_start + j < string_length:
# led_wire.setPixelColor(pulse_start - j, Color(current_colour[1],
# current_colour[0], current_colour[2]))
# for i in range(100):
# led_wire.setPixelColor(i, Color(current_colour[1],
# current_colour[0], current_colour[2]))
# led_wire.show()
# time.sleep(sleep_time)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""basic data structure wrapper for tensor in paddle.
like stack, array.
"""
import sys
import os
import traceback
import logging
from collections import namedtuple
import numpy as np
from paddle import fluid
from paddle.fluid import layers
from text2sql.utils import fluider
from text2sql.utils import nn_utils
ArrayData = namedtuple("ArrayData", "data pos")
StackData = namedtuple("StackData", "data pos")
class Array(object):
"""Array function simulator"""
def __init__(self):
"""init of class """
super(Array, self).__init__()
@classmethod
def push(cls, array_data, updates, in_place=True):
"""append udpates to array_data.data on array_data.pos
Args:
array_data (TYPE): NULL
updates (TYPE): NULL
in_place (bool): 默认是 True.
Returns: None
Raises: NULL
"""
new_data = nn_utils.batch_scatter(array_data.data, array_data.pos, updates, overwrite=True, in_place=in_place)
new_pos = fluider.increment(array_data.pos, value=1, in_place=in_place)
if in_place:
return array_data
else:
return ArrayData(new_data, new_pos)
class Stack(object):
"""Stack function simulator"""
def __init__(self):
"""init of class """
super(Stack, self).__init__()
@classmethod
def pop(cls, stack_data, mask=True, in_place=True):
"""pop data in stack_data
Args:
stack_data (StackData): (data, pos) with shape ([batch_size, stack_len], [batch_size, 1])
mask (bool): 是否 mask 空栈的返回值。默认为 True
in_place (bool): 默认为 True
Returns: (Variable1, Variable2)
Variable1: pop 得到的值
dtype=stack_data.data.dtype
shape=[-1]
Variable2: 对应位置的值是否合法。入参已经为空的栈,此处为 False。
dtype=bool
shape=[-1]
Raises: NULL
"""
data = stack_data.data
pos = stack_data.pos
# 只有非空的栈才能pop(才合法)
valid_pos = layers.logical_not(cls.empty(stack_data))
new_pos_delta = layers.cast(valid_pos, dtype=pos.dtype)
new_pos = layers.elementwise_sub(pos, new_pos_delta)
# shape = [batch_size]
output = nn_utils.batch_gather(data, new_pos)
# mask 空栈的返回值
if mask:
# shape = [batch_size, 1]
mask_tag = layers.cast(new_pos_delta, dtype=data.dtype) if data.dtype != pos.dtype else new_pos_delta
mask_tag = layers.squeeze(mask_tag, [1])
output = layers.elementwise_mul(output, mask_tag)
# 出栈后原位置置为0
updates = layers.zeros_like(output)
new_data = nn_utils.batch_scatter(data, new_pos, updates, overwrite=True, in_place=in_place)
if in_place:
layers.assign(new_pos, pos)
return output, valid_pos, stack_data
else:
return output, valid_pos, StackData(new_data, new_pos)
@classmethod
def push(cls, stack_data, updates, in_place=True):
"""push udpates to stack_data
Args:
stack_data (TYPE): NULL
updates (TYPE): NULL
in_place (bool): 默认是 True.
Returns: None
Raises: NULL
"""
new_data = nn_utils.batch_scatter(stack_data.data, stack_data.pos, updates, overwrite=True, in_place=in_place)
new_pos = fluider.increment(stack_data.pos, value=1, in_place=in_place)
if in_place:
return stack_data
else:
return StackData(new_data, new_pos)
@classmethod
def empty(cls, stack_data, dtype='bool'):
"""Return True if stack is empty(pos == 0)
Args:
stack_data (TYPE): NULL
dtype (str): result dtype. Default is bool.
Returns: Variable
shape=[-1], dtype=params<dtype>
Raises: NULL
"""
zeros = layers.zeros_like(stack_data.pos)
output = layers.equal(stack_data.pos, zeros)
if dtype != 'bool':
output = layers.cast(output, dtype=dtype)
return output
if __name__ == "__main__":
"""run some simple test cases"""
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import copy
import os
import unittest
from mlcomp.utils import TemporaryDirectory
from mlcomp.report import (ReportSaver, ReportObject, Resource,
default_report_types, Report)
from .helper import to_config
class MyReportObject(ReportObject):
def __init__(self, value=None, children=None, name=None, name_scope=None):
super(MyReportObject, self).__init__(name=name, name_scope=name_scope)
self.value = value
if children:
children = list(children)
self.children = children
def gather_children(self):
if self.children:
ret = copy.copy(self.children)
else:
ret = []
ret.extend(super(MyReportObject, self).gather_children())
return ret
class PersistTestCase(unittest.TestCase):
def test_ReportSaver(self):
report = Report(
children=[
Resource(data=b'123'),
MyReportObject(
Resource(data=b'456'),
children=[
Resource(data=b'789')
]
)
]
)
with default_report_types({'MyReport': MyReportObject}), \
TemporaryDirectory() as tempdir:
# test writing
saver = ReportSaver(tempdir + '/1')
saver.save(report)
report2 = saver.load()
self.assertEqual(
to_config(report),
to_config(report2)
)
self.assertEqual(report.children[0].data,
report2.children[0].data)
self.assertEqual(report.children[1].value.data,
report2.children[1].value.data)
self.assertEqual(report.children[1].children[0].data,
report2.children[1].children[0].data)
# writing to exist dir will be refused
with self.assertRaises(IOError):
saver.save(report)
# test writing to exist but empty dir
os.makedirs(tempdir + '/2')
saver = ReportSaver(tempdir + '/2')
saver.save(report)
# test force writing
saver = ReportSaver(tempdir + '/2', overwrite=True)
saver.save(report)
# test the `save` and `load` method of Report
report.save(tempdir + '/3')
report2 = Report.load(tempdir + '/3')
self.assertEqual(
to_config(report),
to_config(report2)
)
self.assertEqual(report.children[0].data,
report2.children[0].data)
self.assertEqual(report.children[1].value.data,
report2.children[1].value.data)
self.assertEqual(report.children[1].children[0].data,
report2.children[1].children[0].data)
with self.assertRaises(IOError):
report.save(tempdir + '/3')
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from operator import attrgetter
from ubuntui.utils import Padding
from ubuntui.widgets.hr import HR
from urwid import Columns, Text
from conjureup.app_config import app
from conjureup.ui.views.base import NEXT_SCREEN, BaseView
from conjureup.ui.widgets.selectors import CheckList
class AddonsView(BaseView):
title = 'Add-on Selection'
subtitle = 'Choose one or more additional components to add to your spell'
footer = ('Select zero or more add-ons using SPACE, then press ENTER '
'or select CONTINUE to continue')
def __init__(self, next, back):
self.next = next
self.choices = CheckList()
self.extend_command_map({
'enter': NEXT_SCREEN,
})
super().__init__(back)
def build_widget(self):
self.choices.append(HR())
for addon in sorted(app.addons.values(), key=attrgetter('name')):
self.choices.append_option(label=addon.friendly_name,
value=addon.name)
self.choices.append(Padding.line_break(""))
self.choices.append(
Columns([
('fixed', 3, Text('')),
Text(addon.description)
], dividechars=5)
)
self.choices.append(HR())
if app.addons:
self.choices.focus_position = 1
return self.choices
def build_buttons(self):
return [
self.button('CONTINUE', lambda btn: self.next())
]
@property
def selected(self):
return self.choices.selected
|
nilq/baby-python
|
python
|
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Utils for manipulation with directories and files."""
import csv
import os
import time
from collections import defaultdict
from lib import constants
def wait_file_downloaded(
path_to_csv,
timeout=constants.ux.MAX_USER_WAIT_SECONDS,
poll_frequency=constants.ux.POLL_FREQUENCY
):
"""Wait until file is exist or IOError is raised."""
end_time = time.time() + timeout
while not os.path.exists(path_to_csv):
time.sleep(poll_frequency)
if time.time() > end_time:
raise IOError(
"No such file {} or directory after waiting for {} sec.".format(
path_to_csv, timeout))
file_size = os.path.getsize(path_to_csv)
while True:
current_file_size = os.path.getsize(path_to_csv)
if current_file_size == file_size and file_size != 0:
break
file_size = current_file_size
time.sleep(poll_frequency)
if time.time() > end_time:
raise IOError(
"File {} not changed size from {} bytes during {} sec of "
"waiting.".format(path_to_csv, current_file_size, timeout))
def get_list_objs_scopes_from_csv(path_to_csv):
"""Open according to 'path_to_csv' CSV file witch is expected to contain
exported objects, parse through CSV file's structure and return list of
objects scopes (dicts with keys as exportable field names, values as values
of this field for current instance).
"""
wait_file_downloaded(path_to_csv)
with open(path_to_csv) as csv_file:
rows = csv.reader(csv_file)
object_type = None
keys = []
results = defaultdict(list)
for columns in rows:
if not any(columns):
continue
if columns[0] == "Object type":
# new block started
object_type = None
keys = []
continue
if object_type is None:
keys = columns[1:]
object_type = columns[0]
continue
columns = [unicode(val) for val in columns]
results[object_type].append(dict(zip(keys, columns[1:])))
return results
|
nilq/baby-python
|
python
|
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2017-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Utilities to load and save onnx models """
from typing import Union, List, Tuple, Dict
import os
import copy
from collections import defaultdict
import torch
import torch.nn as nn
import torch.onnx.symbolic_caffe2
import onnx
from aimet_common.utils import AimetLogger
import aimet_torch.utils
import aimet_torch.elementwise_ops as elementwise_ops
from aimet_torch.defs import OpToIOTensors
_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
recurrent_onnx_optypes = ['LSTM', 'GRU', 'RNN']
# This is a dict that maps a PyTorch module type to the corresponding ONNX op type (as a string)
map_torch_types_to_onnx = {
nn.Conv2d: ['Conv'],
nn.Dropout: ['Dropout'],
nn.Dropout2d: ['Dropout'],
nn.BatchNorm1d: ['BatchNormalization'],
nn.BatchNorm2d: ['BatchNormalization'],
nn.ReLU: ['Relu'],
nn.ReLU6: ['Clip'],
nn.MaxPool2d: ['MaxPool'],
nn.Linear: ['Gemm', 'MatMul'],
nn.AdaptiveAvgPool2d: ['GlobalAveragePool', 'AveragePool'],
nn.AvgPool2d: ['AveragePool'],
nn.LogSoftmax: ['LogSoftmax'],
nn.RNN: ['RNN'],
nn.LSTM: ['LSTM'],
nn.GRU: ['GRU'],
nn.ConvTranspose2d: ['ConvTranspose'],
nn.Sigmoid: ['Sigmoid'],
nn.Upsample: ['Upsample'],
nn.PReLU: ['PRelu'],
nn.LeakyReLU: ['LeakyRelu'],
nn.Flatten: ['Flatten'],
elementwise_ops.Add: ['Add'],
elementwise_ops.Subtract: ['Sub'],
elementwise_ops.Multiply: ['Mul'],
elementwise_ops.Divide: ['Div'],
elementwise_ops.Concat: ['Concat']
}
# Maps pytorch functional op string names to corresponding onnx types.
pytorch_functional_name_to_onnx_dict = {
'add': 'Add',
'cat': 'Concat',
'mul': 'Mul',
'div': 'Div'
}
onnx_subgraph_op_to_pytorch_module_param_name = {
torch.nn.GroupNorm:
{
# '#depth', 'op_type': {input_index: torch module parameter name}
('#2', 'Mul'): {1: 'weight'},
('#3', 'Add'): {1: 'bias'}
}
}
class OnnxExportApiArgs:
"""
configuration for torch onnx export api invocation
"""
def __init__(self, opset_version: int = None, input_names: List[str] = None, output_names: List[str] = None):
"""
Refer torch documentation https://pytorch.org/docs/1.7.1/onnx.html?highlight=onnx%20export#torch.onnx.export
:param opset_version: onnx opset version to use to export the model
:param input_names: names to assign to the input nodes of the onnx graph, in order
:param output_names: names to assign to the output nodes of the graph, in order
"""
self.opset_version = opset_version
self.input_names = input_names
self.output_names = output_names
@property
def kwargs(self):
"""
formats all override options into kwarg format to appended to onnx export call
"""
return {'opset_version': self.opset_version,
'input_names': self.input_names,
'output_names': self.output_names}
class OnnxSaver:
"""
Utilities to save/load onnx models
"""
@classmethod
def set_node_names(cls, onnx_model_path: str, pytorch_model: torch.nn.Module,
dummy_input: Union[torch.Tensor, Tuple],
onnx_export_args: OnnxExportApiArgs = OnnxExportApiArgs()):
"""
This utility loads a given onnx model file and set the names of all the nodes (ops) to equivalent
pytorch module names given the corresponding pytorch model.
:param onnx_model_path: Path to the ONNX model file
:param pytorch_model: Equivalent PyTorch model instance
:param dummy_input: Dummy input to the model. Used to parse model graph.
:param onnx_export_args: override options for torch.onnx.export call
:return:
"""
onnx_model = cls._map_onnx_nodes_to_pytorch_modules(pytorch_model, dummy_input,
onnx_model_path, onnx_export_args)
onnx.save(onnx_model, onnx_model_path)
@staticmethod
def _create_map_of_tensor_to_node(onnx_model: onnx.ModelProto) -> Tuple[Dict[str, List[onnx.NodeProto]],
Dict[str, onnx.NodeProto]]:
"""
Create and return two dicts
1. Tensor -> list of nodes that consume this tensor
2. Tensor -> node that produces this tensor
:param onnx_model: ONNX model object
:return: The two dicts described above
Note: The list in #1 is ordered exactly in the order that pytorch trace reaches these nodes. This is important
because later on we will use pytorch layer hooks to match these nodes with the equivalent PyTorch modules.
The expectation is that PyTorch trace and PyTorch hooks follow the same execution sequence
"""
map_input_tensor_to_node = {}
map_output_tensor_to_node = {}
for node in onnx_model.graph.node:
for in_tensor in node.input:
if in_tensor in map_input_tensor_to_node:
map_input_tensor_to_node[in_tensor].append(node)
else:
map_input_tensor_to_node[in_tensor] = [node]
for output in node.output:
assert output not in map_output_tensor_to_node, 'More than one node produces the same tensor'
map_output_tensor_to_node[output] = node
return map_output_tensor_to_node, map_input_tensor_to_node
@classmethod
def _add_markers(cls, starting_module, module_name_map):
"""Recursively add marker layers
"""
class CustomMarkerFunc(torch.autograd.Function):
"""
This function helps add a custom layer when exporting to ONNX
Note the input tensor has a trivial operation performed on it (clamp). This is needed to force
pytorch trace to not ignore the function.
"""
@staticmethod
def symbolic(g, inp, identifier, start):
"""
Magic method that helps with exporting a custom ONNX node
"""
return g.op('CustomMarker', inp, id_s=identifier, start_s=start)
@staticmethod
def forward(ctx, inp, _identifier, _start): # pylint: disable=arguments-differ
return inp.clamp(0)
@staticmethod
def backward(ctx, _grad): # pylint: disable=arguments-differ
raise NotImplementedError()
class CustomMarker(torch.nn.Module):
"""
This is a temporary layer that in inserted next to a real layer to distinguish the real layer in the
exported ONNX format
"""
def __init__(self, module, identifier):
super(CustomMarker, self).__init__()
self.marked_module = module
self.identifier = identifier
def forward(self, *inputs):
"""
Forward method for this CustomMarker layer
"""
output = []
for t in inputs:
if isinstance(t, torch.Tensor):
t = CustomMarkerFunc.apply(t, self.identifier, 'True')
output.append(t)
x = self.marked_module(*output)
if isinstance(x, torch.Tensor):
x = [x]
output = []
for t in x:
if isinstance(t, torch.Tensor):
t = CustomMarkerFunc.apply(t, self.identifier, 'False')
output.append(t)
if len(output) == 1:
output = output[0]
else:
output = tuple(output)
return output
for module_name, module_ref in starting_module.named_children():
if aimet_torch.utils.is_leaf_module(module_ref):
marker_layer = CustomMarker(module_ref, module_name_map[module_ref])
setattr(starting_module, module_name, marker_layer)
# recursively call children modules
else:
cls._add_markers(module_ref, module_name_map)
@classmethod
def _map_onnx_nodes_to_pytorch_modules(cls, pt_model, dummy_input, onnx_model_path, onnx_export_args):
"""
Exports an onnx model, maps the nodes in the onnx model to corresponding pytorch modules and names
them accordingly
:param pt_model: PyTorch model
:param dummy_input: Dummy input to run a fwd pass on @pt_model
:param onnx_model_path: Path to the saved ONNX model
:param onnx_export_args: override options for torch.onnx.export call
"""
working_dir = os.path.dirname(onnx_model_path)
onnx_model = cls._create_onnx_model_with_markers(dummy_input, pt_model, working_dir, onnx_export_args)
model_output_names = [output.name for output in onnx_model.graph.output] # pylint: disable=no-member
# Parse the ONNX model and create mapping from input and output tensors to corresponding nodes
map_output_tensor_to_node, map_input_tensor_to_node = cls._create_map_of_tensor_to_node(onnx_model)
# Find all marker nodes
end_marker_map, start_marker_map = cls._create_map_of_marker_nodes(onnx_model)
# Set names
cls._set_onnx_node_names(map_input_tensor_to_node, start_marker_map)
# Remove markers
for markers in start_marker_map.values():
for marker in markers:
cls._detach_start_marker_node(map_input_tensor_to_node, map_output_tensor_to_node, marker)
for markers in end_marker_map.values():
for marker in markers:
cls._detach_end_marker_node(onnx_model, map_input_tensor_to_node, map_output_tensor_to_node, marker)
# Make sure we rename the model outputs to original names
cls._set_output_names(onnx_model, model_output_names, map_output_tensor_to_node, map_input_tensor_to_node)
# Clean up the detached nodes
onnx_model = cls._remove_detached_nodes_from_onnx_graph(onnx_model)
cls._fix_param_names(onnx_model)
cls._fix_initializer_names(onnx_model, pt_model)
return onnx_model
@classmethod
def _fix_initializer_names(cls, onnx_model: onnx.NodeProto, pt_model: torch.nn.Module):
"""
Parameter names in some case do not have reflect the torch param names. This method updates the onnx model
with param names using a custom mapping.
:param onnx_model: Onnx Model
:param pt_model: PyTorch Model
"""
initializer_names = [initializer.name for initializer in onnx_model.graph.initializer]
onnx_node_map = {(node.name, node.op_type): node for node in onnx_model.graph.node}
for module_name, module_ref in pt_model.named_modules():
if not isinstance(module_ref, tuple(onnx_subgraph_op_to_pytorch_module_param_name.keys())):
continue
for (node_suffix, op_type), replace_pairs in \
onnx_subgraph_op_to_pytorch_module_param_name[type(module_ref)].items():
node = onnx_node_map[module_name + node_suffix, op_type]
cls._replace_param_name(initializer_names, module_name, node, replace_pairs)
for index, initializer in enumerate(onnx_model.graph.initializer):
if initializer_names[index] != initializer.name:
initializer.name = initializer_names[index]
@classmethod
def _replace_param_name(cls, initializer_names: List[str], module_name: str,
node: onnx.NodeProto, replace_pairs: Dict[int, str]):
"""
helper method to replace parameter names at the corresponding input tensor index
:param initializer_names: List of model initializer names
:param module_name: PyTorch module name
:param node: Onnx node part of sub-graph that maps to the torch module
:param replace_pairs: dictionary of input tensor indices and param names
"""
for input_index, param_name in replace_pairs.items():
new_param_name = module_name + '.' + param_name
inp_tensor = node.input[input_index]
node.input.remove(inp_tensor)
node.input.insert(input_index, new_param_name)
initializer_index = initializer_names.index(inp_tensor)
initializer_names.remove(inp_tensor)
initializer_names.insert(initializer_index, new_param_name)
@classmethod
def _fix_param_names(cls, onnx_model):
"""
Parameter names have an additional level due to the name of the Marker module itself. This method removes that.
:param onnx_model: Onnx Model
"""
# Rename initializers
for ini in onnx_model.graph.initializer:
if 'marked_module' in ini.name:
name = ini.name
name = name.replace('marked_module.', '')
ini.name = name
# Change the references to initializers in each node
for node in onnx_model.graph.node:
indices_to_replace = []
for index, inp_tensor in enumerate(node.input):
if 'marked_module' in inp_tensor:
indices_to_replace.append(index)
for index in indices_to_replace:
param_name = node.input[index]
node.input.remove(param_name)
node.input.insert(index, param_name.replace('marked_module.', ''))
@classmethod
def _remove_detached_nodes_from_onnx_graph(cls, onnx_model):
"""
Given a ONNX model removes any detached nodes from the graph
:return: Updated onnx model
"""
marker_nodes = [node for node in onnx_model.graph.node if node.op_type == 'CustomMarker']
for node in marker_nodes:
onnx_model.graph.node.remove(node)
return onnx_model
@classmethod
def _set_onnx_node_names(cls, map_input_tensor_to_node, start_marker_map):
"""
Set names of the ONNX nodes using the identifier fields in the marker layers
:param map_input_tensor_to_node: Map of tensor to node consuming that tensor
:param start_marker_map: Map of start marker nodes in the ONNX graph
:return:
"""
def set_name_for_downstream_nodes(starting_nodes, name, depth):
for node in starting_nodes:
if node.op_type == 'CustomMarker': # Recursion end condition
return
if depth == 0:
node.name = name
else:
node.name = name + "#" + str(depth)
for tensor in node.output:
downstream_nodes = map_input_tensor_to_node.get(tensor, [])
set_name_for_downstream_nodes(downstream_nodes, name, depth + 1)
for node_name, markers in start_marker_map.items():
for marker in markers:
out_tensor = marker.output[0]
downstream_nodes = map_input_tensor_to_node.get(out_tensor, [])
set_name_for_downstream_nodes(downstream_nodes, node_name, 0)
@classmethod
def _create_map_of_marker_nodes(cls, onnx_model):
"""
Creates and returns maps of start and end marker nodes
:param onnx_model: Onnx model
:return: Map of end marker node, Map of start marker nodes
"""
start_marker_map = defaultdict(list)
end_marker_map = defaultdict(list)
for node in onnx_model.graph.node:
if node.op_type == 'CustomMarker':
identifier = node.attribute[0].s.decode()
is_start_marker = node.attribute[1].s.decode()
if is_start_marker == 'True':
start_marker_map[identifier].append(node)
else:
end_marker_map[identifier].append(node)
print(start_marker_map.keys())
print(end_marker_map.keys())
return end_marker_map, start_marker_map
@classmethod
def _create_onnx_model_with_markers(cls, dummy_input, pt_model, working_dir, onnx_export_args) -> onnx.ModelProto:
"""
Exports an onnx model with marker nodes inserted
:param dummy_input: Dummy input
:param pt_model: PyTorch model
:param working_dir: Working directory for storing the exported onnx model
:param onnx_export_args: override options for torch.onnx.export call
:return: Onnx model with marker layers
"""
model = copy.deepcopy(pt_model).cpu()
module_name_map = {}
for module_name, module_ref in model.named_modules():
if aimet_torch.utils.is_leaf_module(module_ref):
module_name_map[module_ref] = module_name
cls._add_markers(model, module_name_map)
temp_file = os.path.join(working_dir, 'temp_onnx_model_with_markers.onnx')
torch.onnx.export(model, dummy_input, temp_file, enable_onnx_checker=False, **onnx_export_args.kwargs)
onnx_model = onnx.load(temp_file)
return onnx_model
@classmethod
def _detach_start_marker_node(cls, map_input_tensor_to_node, map_output_tensor_to_node, start_marker):
"""
Given a ONNX start_marker node, detach it from the graph
:param map_input_tensor_to_node: Map of tensor to node consuming the tensor
:param map_output_tensor_to_node: Map of tensor to node producing the tensor
:param start_marker: Reference to the ONNX node to detach
"""
assert len(start_marker.input) == 1
assert len(start_marker.output) == 1
input_tensor = start_marker.input[0]
output_tensor = start_marker.output[0]
for next_node in map_input_tensor_to_node[output_tensor]:
index = list(next_node.input).index(output_tensor)
next_node.input.remove(output_tensor)
next_node.input.insert(index, input_tensor)
map_input_tensor_to_node[input_tensor].append(next_node)
map_input_tensor_to_node[input_tensor].remove(start_marker)
del map_output_tensor_to_node[output_tensor] # No node should produce output tensor anymore
del map_input_tensor_to_node[output_tensor] # No node should consume output tensor anymore
start_marker.input.pop()
start_marker.output.pop()
@classmethod
def _detach_end_marker_node(cls, onnx_model, map_input_tensor_to_node, map_output_tensor_to_node, end_marker):
"""
Given a ONNX end_marker node, detach it from the graph
:param onnx_model: ONNX model instance
:param map_input_tensor_to_node: Map of tensor to node consuming the tensor
:param map_output_tensor_to_node: Map of tensor to node producing the tensor
:param end_marker: Reference to the ONNX node to detach
"""
assert len(end_marker.input) == 1
assert len(end_marker.output) == 1
input_tensor = end_marker.input[0]
output_tensor = end_marker.output[0]
model_outputs = [output.name for output in onnx_model.graph.output]
if output_tensor in model_outputs:
# Degenerate case: somebody did a "return y, y" at the end of the model or something similar
for index, model_output in enumerate(model_outputs):
if model_output == output_tensor:
onnx_model.graph.output[index].name = input_tensor
else:
for next_node in map_input_tensor_to_node[output_tensor]:
index = list(next_node.input).index(output_tensor)
next_node.input.remove(output_tensor)
next_node.input.insert(index, input_tensor)
map_input_tensor_to_node[input_tensor].append(next_node)
map_input_tensor_to_node[input_tensor].remove(end_marker)
if not map_input_tensor_to_node[input_tensor]:
del map_input_tensor_to_node[input_tensor]
del map_output_tensor_to_node[output_tensor] # No node should produce output tensor anymore
if output_tensor in map_input_tensor_to_node:
del map_input_tensor_to_node[output_tensor] # No node should consume output tensor anymore
end_marker.input.pop()
end_marker.output.pop()
@staticmethod
def _set_output_names(onnx_model: onnx.ModelProto, desired_model_output_names,
map_output_tensor_to_node, map_input_tensor_to_node):
# Iterate over the model outputs
for index, output in enumerate(onnx_model.graph.output):
new_tensor = desired_model_output_names[index]
old_tensor = output.name
if old_tensor == new_tensor: # Nothing to do
continue
if old_tensor in map_input_tensor_to_node:
# Degenerate case: model output tensor also is an intermediate tensor that inputs into other nodes
for consumer in map_input_tensor_to_node[old_tensor]:
index = list(consumer.input).index(old_tensor)
consumer.input.remove(old_tensor)
consumer.input.insert(index, new_tensor)
if new_tensor not in map_input_tensor_to_node:
map_input_tensor_to_node[new_tensor] = []
map_input_tensor_to_node[new_tensor].append(consumer)
del map_input_tensor_to_node[old_tensor] # No node should consume old tensor anymore
producer = map_output_tensor_to_node[old_tensor]
output.name = new_tensor
index = list(producer.output).index(old_tensor)
producer.output.remove(old_tensor)
producer.output.insert(index, new_tensor)
del map_output_tensor_to_node[old_tensor]
map_output_tensor_to_node[new_tensor] = producer
# If there were duplicate outputs with the same name, they need to be updated
for output_node in onnx_model.graph.output:
# Ugly double loop - cannot avoid
if output_node.name == old_tensor:
output_node.name = new_tensor
@staticmethod
def _collate_io_tensors_for_multi_layer_recurrent_nodes(onnx_model: onnx.NodeProto,
node_to_io_tensor_name_map: Dict):
"""
Given an ONNX model and corresponding node-tensor map, consolidate multi-layer recurrent nodes
into single map entries
"""
recurrent_nodes = []
for node in onnx_model.graph.node:
if node.op_type in recurrent_onnx_optypes:
recurrent_nodes.append(node.name)
# Collection of recurrent nodes that includes only the first layer nodes
recurrent_root_nodes = [node for node in recurrent_nodes if '#' not in node]
for root_node in recurrent_root_nodes:
# Find nodes corresponding to all other layers of the recurrent node
other_layers = [node for node in recurrent_nodes if node.startswith(root_node + '#')]
# sort the other layers using the depth value following the '#'
other_layers = sorted(other_layers, key=lambda layer: int(layer.split('#')[1]))
# Append the io_tensors for all layers for the current root recurrent node, in order
io_tensor_list = [node_to_io_tensor_name_map[root_node]]
for layer in other_layers:
io_tensor_list.append(node_to_io_tensor_name_map[layer])
del node_to_io_tensor_name_map[layer]
node_to_io_tensor_name_map[root_node] = io_tensor_list
@classmethod
def get_onnx_node_to_io_tensor_names_map(cls, onnx_model: onnx.NodeProto) -> \
(Dict[str, Union[OpToIOTensors, List[OpToIOTensors]]], set):
"""
Given an ONNX model, gets the inputs and output tensor names for each node in the model.
if multiple onnx nodes have the same name then the nodes are provided as a list of inputs and output tensor
names, one for each onnx node.
:param onnx_model: The ONNX model instance
:return: Dictionary of ONNX node name and corresponding input and output tensor names and a set with all valid
param names in model
"""
node_to_io_tensor_name_map = {}
valid_param_set = set()
initializer_names = {initializer.name for initializer in onnx_model.graph.initializer}
for node in onnx_model.graph.node:
if node.name:
onnx_node_io_tensors = OpToIOTensors(list(node.input), list(node.output))
if (node.name not in node_to_io_tensor_name_map) or node.op_type in recurrent_onnx_optypes:
node_to_io_tensor_name_map[node.name] = onnx_node_io_tensors
# update valid params list
for input_tensor in list(node.input):
if input_tensor in initializer_names:
valid_param_set.add(input_tensor)
cls._collate_io_tensors_for_multi_layer_recurrent_nodes(onnx_model, node_to_io_tensor_name_map)
return node_to_io_tensor_name_map, valid_param_set
|
nilq/baby-python
|
python
|
import os
import boto3
from boto3.dynamodb.conditions import Key
dynamodb = boto3.resource('dynamodb')
def put_atcoder_info(line_message_info):
TABLE = dynamodb.Table(os.environ["ATCODER_INFO_TABLE"])
for atcoder_id in line_message_info:
accepted_count = line_message_info[atcoder_id]["accepted_count"]
new_ac = line_message_info[atcoder_id]["new_ac"]
rated_point_sum = line_message_info[atcoder_id]["rated_point_sum"]
#dictのnew_acが0以外のものをputする(0だとDBの内容が書き変わらないため)
if(new_ac != 0):
TABLE.put_item(
Item={
"atcoder_id": atcoder_id,
"accepted_count": accepted_count,
"new_ac": new_ac,
"rated_point_sum": rated_point_sum
}
)
print("succeed put DB " + str(atcoder_id))
|
nilq/baby-python
|
python
|
import struct
from logger import Logger
class ClRequestBase:
def __init__(self, payload):
self.message_id = struct.unpack("<B", payload[0:1])[0]
self.message_unique_id = struct.unpack("<H", payload[1:3])[0]
self.payload = payload
Logger.log("processing: " + type(self).__name__)
self.parse()
def parse(self):
""" OVERRIDE THIS TO IMPLEMENT """
raise
def execute(self, client_id):
""" OVERRIDE THIS TO IMPLEMENT """
raise
def response(self):
""" OVERRIDE THIS TO IMPLEMENT """
raise
def broadcast(self):
""" OVERRIDE THIS TO IMPLEMENT """
return None
|
nilq/baby-python
|
python
|
from torch import nn
from torch.nn import functional as F
class QNet(nn.Module):
def __init__(self, input_channel=4, num_actions=18):
"""
Create a MLP Q network as described in DQN paper
"""
super(QNet, self).__init__()
self.conv1 = nn.Conv2d(input_channel, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(7*7*64, 512)
self.fc2 = nn.Linear(512, num_actions)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.fc1(x.flatten(start_dim=1)))
x = self.fc2(x)
return x
class DuelQNet(nn.Module):
def __init__(self, input_channel=4, num_actions=18):
"""
Create a Dueling Q network for atari
"""
super(DuelQNet, self).__init__()
self.conv1 = nn.Conv2d(input_channel, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(7*7*64, 512)
self.fc_a = nn.Linear(512, num_actions)
self.fc_v = nn.Linear(512, 1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.fc1(x.flatten(start_dim=1)))
V = self.fc_v(x)
A = self.fc_a(x)
Q = V + (A - A.mean(dim=1).view(-1, 1))
return Q
|
nilq/baby-python
|
python
|
from typing import List, Callable
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
from nlpretext.social.preprocess import (
remove_html_tags, remove_mentions, remove_emoji, remove_hashtag)
from nlpretext.basic.preprocess import normalize_whitespace, remove_eol_characters, fix_bad_unicode
class Preprocessor():
def __init__(
self):
"""
Initialize preprocessor object to apply all text transformation
"""
self.__operations = []
self.pipeline = None
def pipe(self, operation: Callable, args: dict = None):
"""
Add an operation and its arguments to pipe in the preprocessor
Parameters
----------
operation : callable
text preprocessing function
args : dict of arguments
"""
self.__operations.append({
'operation': operation,
'args': args
})
@staticmethod
def build_pipeline(operation_list: List[dict]) -> Pipeline:
"""
Build sklearn pipeline from a operation list
Parameters
----------
operation_list : iterable
list of __operations of preprocessing
Returns
-------
sklearn.pipeline.Pipeline
"""
return Pipeline(
steps=[
(
operation['operation'].__name__,
FunctionTransformer(operation['operation'], kw_args=operation['args'])
)
for operation in operation_list])
def run(self, text: str) -> str:
"""
Apply pipeline to text
Parameters
----------
text : string
text to preprocess
Returns
-------
string
"""
operations = self.__operations
if operations == []:
operations_to_pipe = (
remove_html_tags, remove_mentions, remove_emoji, remove_hashtag,
remove_eol_characters, fix_bad_unicode, normalize_whitespace
)
operations = [{'operation': operation, 'args': None} for operation in operations_to_pipe]
self.pipeline = self.build_pipeline(operations)
text = self.pipeline.fit_transform(text)
return text
|
nilq/baby-python
|
python
|
# produce list of genes in GRCm38
import pandas as pd
import json
# open refgene
refGeneFilename = '../gtex/gtex_mouse/refGene_mouse.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
# open biomart
biomartFilename = 'mart_export_mus_2.txt'
biomart = pd.read_csv(biomartFilename, sep="\t")
seen = {}
results = []
total_len = len(refGene)
for index, row in refGene.iterrows():
ensembl_id = row['name']
if ensembl_id not in seen:
the_loc = biomart.loc[biomart['Gene ID'] == ensembl_id]
gene_name = list(the_loc['Associated Gene Name'])[0]
entrez = list(the_loc['EntrezGene ID'])[0]
if pd.isnull(entrez):
entrez = ''
print ensembl_id, gene_name, 'has no entrez'
else:
entrez = str(int(entrez))
if pd.isnull(gene_name):
gene_name = ''
print ensembl_id, 'has no gene_name'
results.append({
'name': gene_name,
'ensembl_id': ensembl_id,
'entrez_id': entrez,
'description': ""
})
seen[ensembl_id] = True
with open('genes_list_GRCm38_processed.txt', 'w') as output:
json.dump(results, output)
with open('genes_list_GRCm38.txt', 'w') as output:
json.dump(results, output)
|
nilq/baby-python
|
python
|
import numpy as np
def run_env(
env,
episode_count=100,
n_samples_per_omega=100,
policy=None,
grid=False,
omega_min=0,
omega_max=10,
bins=100,
total_n_samples=500,
):
"""
Simple runner, takes an environment, run a random policy and records everything
"""
if not grid:
inputs = np.zeros((bins * n_samples_per_omega, env.observation_space_size + 1))
targets = np.zeros((bins * n_samples_per_omega, env.observation_space_size))
i = 0
for omega in np.linspace(omega_min, omega_max, bins):
env.set_params(omega)
state = np.array(env.reset())
for t in range(n_samples_per_omega):
# sample one action from policy network or at random
if policy is None:
action = env.action_space.sample()
else:
action = policy.pi(state[np.newaxis, :], log=False)
if env.n_actions == 2:
action = action * 2 - 1
else:
action = action - 1
force = action * omega
# save the current state action in the training set
inputs[i, :] = np.hstack((state, force))
# observe the next state, reward etc
newState, reward, done, info = env.step(action)
newState = np.array(newState)
# compute the delta to be added in the target
delta = np.matrix((newState - state))
targets[i, :] = delta
state = newState
i += 1
if done:
state = np.array(env.reset())
env.close()
else:
low_pos, low_vel = env.low
high_pos, high_vel = env.high
# actions = np.random.randint(low=0, high=env.n_actions, size=timestep)# [1.0/env.n_actions]*env.n_actions)
actions = np.random.uniform(
low=-omega_max, high=omega_max, size=total_n_samples
)
positions = np.random.uniform(low=low_pos, high=high_pos, size=total_n_samples)
velocities = np.random.uniform(low=low_vel, high=high_vel, size=total_n_samples)
start_states = list(zip(positions, velocities, actions))
inputs = np.matrix(start_states)
next_states = list()
action = 1
for state in start_states:
x, x_dot, a = state
env.set_params(a)
newState, reward, done, info = env._step(action, (x, x_dot))
# append delta state
next_states.append(newState - np.array([x, x_dot]))
targets = np.matrix(next_states)
return inputs, targets
# # modify actions:
# if env.n_actions == 3:
# inputs[:,2] = inputs[:,2] - 1
# else:
# inputs[:,2] = 2*inputs[:,2] - 1
# subsampling
ind = np.arange(0, np.shape(inputs)[0])
selected_ind = np.random.choice(ind, size=total_n_samples, replace=True)
inputs = inputs[selected_ind, :]
targets = targets[selected_ind, :]
print("Collected data points: ", inputs.shape)
return inputs, targets
|
nilq/baby-python
|
python
|
# http header
API_URL = 'https://www.okex.com'
CONTENT_TYPE = 'Content-Type'
OK_ACCESS_KEY = 'OK-ACCESS-KEY'
OK_ACCESS_SIGN = 'OK-ACCESS-SIGN'
OK_ACCESS_TIMESTAMP = 'OK-ACCESS-TIMESTAMP'
OK_ACCESS_PASSPHRASE = 'OK-ACCESS-PASSPHRASE'
ACEEPT = 'Accept'
COOKIE = 'Cookie'
LOCALE = 'Locale='
APPLICATION_JSON = 'application/json'
GET = "GET"
POST = "POST"
DELETE = "DELETE"
SERVER_TIMESTAMP_URL = '/api/general/v3/time'
# account
CURRENCIES_INFO = '/api/account/v3/currencies'
WALLET_INFO = '/api/account/v3/wallet'
CURRENCY_INFO = '/api/account/v3/wallet/'
COIN_TRANSFER = '/api/account/v3/transfer'
COIN_WITHDRAW = '/api/account/v3/withdrawal'
COIN_FEE = '/api/account/v3/withdrawal/fee'
COINS_WITHDRAW_RECORD = '/api/account/v3/withdrawal/history'
COIN_WITHDRAW_RECORD = '/api/account/v3/withdrawal/history/'
LEDGER_RECORD = '/api/account/v3/ledger'
TOP_UP_ADDRESS = '/api/account/v3/deposit/address'
COIN_TOP_UP_RECORDS = '/api/account/v3/deposit/history'
COIN_TOP_UP_RECORD = '/api/account/v3/deposit/history/'
# spot
SPOT_ACCOUNT_INFO = '/api/spot/v3/accounts'
SPOT_COIN_ACCOUNT_INFO = '/api/spot/v3/accounts/'
SPOT_LEDGER_RECORD = '/api/spot/v3/accounts/'
SPOT_ORDER = '/api/spot/v3/orders'
SPOT_ORDERS = '/api/spot/v3/batch_orders'
SPOT_REVOKE_ORDER = '/api/spot/v3/cancel_orders/'
SPOT_REVOKE_ORDERS = '/api/spot/v3/cancel_batch_orders/'
SPOT_ORDERS_LIST = '/api/spot/v3/orders'
SPOT_ORDERS_PENDING = '/api/spot/v3/orders_pending'
SPOT_ORDER_INFO = '/api/spot/v3/orders/'
SPOT_FILLS = '/api/spot/v3/fills'
SPOT_COIN_INFO = '/api/spot/v3/instruments'
SPOT_DEPTH = '/api/spot/v3/instruments/'
SPOT_TICKER = '/api/spot/v3/instruments/ticker'
SPOT_SPECIFIC_TICKER = '/api/spot/v3/instruments/'
SPOT_DEAL = '/api/spot/v3/instruments/'
SPOT_KLINE = '/api/spot/v3/instruments/'
# lever
LEVER_ACCOUNT = '/api/margin/v3/accounts'
LEVER_COIN_ACCOUNT = '/api/margin/v3/accounts/'
LEVER_LEDGER_RECORD = '/api/margin/v3/accounts/'
LEVER_CONFIG = '/api/margin/v3/accounts/availability'
LEVER_SPECIFIC_CONFIG = '/api/margin/v3/accounts/'
LEVER_BORROW_RECORD = '/api/margin/v3/accounts/'
LEVER_SPECIFIC_BORROW_RECORD = '/api/margin/v3/accounts/'
LEVER_BORROW_COIN = '/api/margin/v3/accounts/borrow'
LEVER_REPAYMENT_COIN = '/api/margin/v3/accounts/repayment'
LEVER_ORDER = '/api/margin/v3/orders'
LEVER_ORDERS = '/api/margin/v3/batch_orders'
LEVER_REVOKE_ORDER = '/api/margin/v3/cancel_orders/'
LEVER_REVOKE_ORDERS = '/api/margin/v3/cancel_batch_orders'
LEVER_ORDER_LIST = '/api/margin/v3/orders'
LEVEL_ORDERS_PENDING = '/api/margin/v3/orders_pending'
LEVER_ORDER_INFO = '/api/margin/v3/orders/'
LEVER_FILLS = '/api/margin/v3/fills'
FF = '/api/futures/v3/orders'
# future
FUTURE_POSITION = '/api/futures/v3/position'
FUTURE_SPECIFIC_POSITION = '/api/futures/v3/'
FUTURE_ACCOUNTS = '/api/futures/v3/accounts'
FUTURE_COIN_ACCOUNT = '/api/futures/v3/accounts/'
FUTURE_GET_LEVERAGE = '/api/futures/v3/accounts/'
FUTURE_SET_LEVERAGE = '/api/futures/v3/accounts/'
FUTURE_LEDGER = '/api/futures/v3/accounts/'
FUTURE_DELETE_POSITION = '/api/futures/v3/close_all_orders'
FUTURE_ORDER = '/api/futures/v3/order'
FUTURE_ORDERS = '/api/futures/v3/orders'
FUTURE_REVOKE_ORDER = '/api/futures/v3/cancel_order/'
FUTURE_REVOKE_ORDERS = '/api/futures/v3/cancel_batch_orders/'
FUTURE_ORDERS_LIST = '/api/futures/v3/orders'
FUTURE_ORDER_INFO = '/api/futures/v3/orders/'
FUTURE_FILLS = '/api/futures/v3/fills'
FUTURE_PRODUCTS_INFO = '/api/futures/v3/instruments'
FUTURE_DEPTH = '/api/futures/v3/instruments/'
FUTURE_TICKER = '/api/futures/v3/instruments/ticker'
FUTURE_SPECIFIC_TICKER = '/api/futures/v3/instruments/'
FUTURE_TRADES = '/api/futures/v3/instruments/'
FUTURE_KLINE = '/api/futures/v3/instruments/'
FUTURE_INDEX = '/api/futures/v3/instruments/'
FUTURE_RATE = '/api/futures/v3/rate'
FUTURE_ESTIMAT_PRICE = '/api/futures/v3/instruments/'
FUTURE_HOLDS = '/api/futures/v3/instruments/'
FUTURE_LIMIT = '/api/futures/v3/instruments/'
FUTURE_LIQUIDATION = '/api/futures/v3/instruments/'
FUTURE_MARK = '/api/futures/v3/instruments/'
HOLD_AMOUNT = '/api/futures/v3/accounts/'
#CURRENCY_LIST = '/api/futures/v3/instruments/currencies/'
# ETT
ETT_ACCOUNTS = '/api/ett/v3/accounts'
ETT_ACCOUNT = '/api/ett/v3/accounts/'
ETT_LEDGER = '/api/ett/v3/accounts/'
ETT_ORDER = '/api/ett/v3/orders'
ETT_REVOKE = '/api/ett/v3/orders/'
ETT_ORDER_LIST = '/api/ett/v3/orders'
ETT_SPECIFIC_ORDER = '/api/ett/v3/orders/'
ETT_CONSTITUENTS = '/api/ett/v3/constituents/'
ETT_DEFINE = '/api/ett/v3/define-price/'
# SWAP
SWAP_POSITIONS = '/api/swap/v3/position'
SWAP_POSITION = '/api/swap/v3/'
SWAP_ACCOUNTS = '/api/swap/v3/accounts'
SWAP_ACCOUNT = '/api/swap/v3/'
SWAP_ORDER = '/api/swap/v3/order'
SWAP_ORDERS = '/api/swap/v3/orders'
SWAP_CANCEL_ORDER = '/api/swap/v3/cancel_order/'
SWAP_CANCEL_ORDERS = '/api/swap/v3/cancel_batch_orders/'
SWAP_FILLS = '/api/swap/v3/fills'
SWAP_INSTRUMENTS = '/api/swap/v3/instruments'
SWAP_TICKETS = '/api/swap/v3/instruments/ticker'
SWAP_RATE = '/api/swap/v3/rate'
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from .views import classify
from .views import delete_conversation
app_name = "classification"
urlpatterns = [
url(r"^classify/$", classify, name="classify"),
url(r"^delete/$", delete_conversation, name="delete"),
]
|
nilq/baby-python
|
python
|
from django.urls import path
from user.views import CreateUserView
from user.views import CreateTokenView
from user.views import ManageUserView
app_name = 'user'
urlpatterns = [
path(
'create/',
CreateUserView.as_view(),
name='create',
),
path(
'token/',
CreateTokenView.as_view(),
name='token',
),
path(
'me/',
ManageUserView.as_view(),
name='me',
),
]
|
nilq/baby-python
|
python
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.template import loader
from django.core.mail import send_mail
class ContactForm(forms.Form):
subject = forms.CharField(label=_('Subject'), max_length=100)
message = forms.CharField(label=_('Message'), widget=forms.Textarea)
sender = forms.EmailField(label=_('Email address'), help_text=_('A valid e-mail address, please.'))
cc_myself = forms.BooleanField(label=_('Send a copy to yourself?'), required=False)
def save(self, recipients=[]):
if self.errors:
raise ValueError("The ContactForm could not be saved because"
"the data didn't validate.")
if self.cleaned_data['cc_myself']:
recipients.append(self.cleaned_data['sender'])
body = loader.render_to_string('contact/contact_email.txt',
dict(self.cleaned_data, recipients=recipients))
send_mail(self.cleaned_data['subject'], body,
self.cleaned_data['sender'], recipients)
|
nilq/baby-python
|
python
|
from flask import Flask
from flask_cors import CORS
from .config import config
app = Flask(__name__)
app.secret_key = config['app']['secret_key']
dburi = 'postgresql://{username}:{password}@{host}:{port}/{database}'.format(**config['db'])
app.config.update(
{
'SQLALCHEMY_DATABASE_URI': dburi,
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
}
)
CORS(app, supports_credentials=True)
|
nilq/baby-python
|
python
|
import tkinter as tk
import math
showString=''
def output(string):
global showString
showString=str(showString)+str(string)
displayLabel['text']=showString
def calculate():
global showString
showString=str(eval(showString))
displayLabel['text']=showString
def pi():
global showString
showString=''
output(math.pi)
def finde():
global showString
showString=''
output(math.e)
def findFact():
global showString
showString=math.factorial(int(showString))
displayLabel['text']=showString
def delete():
global showString
showString=showString[:-1]
displayLabel['text']=showString
def clearAll():
global showString
showString=''
displayLabel['text']=showString
def findLog():
global showString
showString=math.log(float(showString))
displayLabel['text']=showString
def sq(power):
global showString
showString=math.pow(float(showString),power)
displayLabel['text']=showString
def sin():
global showString
showString=float(showString)
showString=round(math.sin(showString),4)
showString=str(showString)
displayLabel['text']=showString
def cos():
global showString
showString=float(showString)
showString=round(math.cos(showString),4)
showString=str(showString)
displayLabel['text']=showString
def tan():
global showString
showString=float(showString)
showString=round(math.tan(showString),4)
showString=str(showString)
displayLabel['text']=showString
def cosec():
global showString
showString=float(showString)
showString=round(math.sin(showString),4)
showString=round((1/showString),4)
showString=str(showString)
displayLabel['text']=showString
def sec():
global showString
showString=float(showString)
showString=round(math.cos(showString),4)
showString=round(1/showString,4)
showString=str(showString)
displayLabel['text']=showString
def cot():
global showString
showString=float(showString)
showString=round(math.tan(showString),4)
showString=round(1/showString,4)
showString=str(showString)
displayLabel['text']=showString
root=tk.Tk()
root.title('Calculator')
root.minsize(width=250,height=250)
display=tk.Frame(root)
displayLabel=tk.Label(display,text=0,font='Verdana 15')
display.grid(row=0)
displayLabel.pack()
buttons=tk.Frame(root)
nineButton=tk.Button(buttons,text=9,width=4,background='DarkOrange1',command=lambda:output('9'))
nineButton.grid(row=3,column=3)
eightButton=tk.Button(buttons,text=8,width=4,background='DarkOrange1',command=lambda:output('8'))
eightButton.grid(row=3,column=2)
sevenButton=tk.Button(buttons,text=7,width=4,background='DarkOrange1',command=lambda:output('7'))
sevenButton.grid(row=3,column=1)
sixButton=tk.Button(buttons,text=6,width=4,background='DarkOrange1',command=lambda:output('6'))
sixButton.grid(row=4,column=3)
fiveButton=tk.Button(buttons,text=5,width=4,background='DarkOrange1',command=lambda:output('5'))
fiveButton.grid(row=4,column=2)
fourButton=tk.Button(buttons,text=4,width=4,background='DarkOrange1',command=lambda:output('4'))
fourButton.grid(row=4,column=1)
threeButton=tk.Button(buttons,text=3,width=4,background='DarkOrange1',command=lambda:output('3'))
threeButton.grid(row=5,column=3)
twoButton=tk.Button(buttons,text=2,width=4,background='DarkOrange1',command=lambda:output('2'))
twoButton.grid(row=5,column=2)
oneButton=tk.Button(buttons,text=1,width=4,background='DarkOrange1',command=lambda:output('1'))
oneButton.grid(row=5,column=1)
zeroButton=tk.Button(buttons,text=0,width=4,background='DarkOrange1',command=lambda:output('0'))
zeroButton.grid(row=6,column=2)
delButton=tk.Button(buttons,text='del',width=4,background='DarkOrange1',command=lambda:delete())
delButton.grid(row=6,column=3)
clearAllButton=tk.Button(buttons,text='CE',width=4,background='DarkOrange1',command=lambda:clearAll())
clearAllButton.grid(row=6,column=1)
addButton=tk.Button(buttons,text='+',width=4,background='DarkOrange1',command=lambda:output('+'))
addButton.grid(row=2,column=4)
subButton=tk.Button(buttons,text='-',width=4,background='DarkOrange1',command=lambda:output('-'))
subButton.grid(row=3,column=4)
divButton=tk.Button(buttons,text='/',width=4,background='DarkOrange1',command=lambda:output('/'))
divButton.grid(row=4,column=4)
multiplyButton=tk.Button(buttons,text='*',width=4,background='DarkOrange1',command=lambda:output('*'))
multiplyButton.grid(row=5,column=4)
remButton=tk.Button(buttons,text='%',width=4,background='DarkOrange1',command=lambda:output('%'))
remButton.grid(row=2,column=1)
calculateButton=tk.Button(buttons,text='=',width=4,background='DarkOrange1',command=lambda:calculate())
calculateButton.grid(row=6,column=4)
squareButton=tk.Button(buttons,text='x^2',width=4,background='DarkOrange1',command=lambda:sq(2))
squareButton.grid(row=2,column=3)
squareRootButton=tk.Button(buttons,text='x^1/2',width=4,background='DarkOrange1',command=lambda:sq(0.5))
squareRootButton.grid(row=2,column=2)
piButton=tk.Button(buttons,text='pi',width=4,background='DarkOrange1',command=lambda:pi())
piButton.grid(row=1,column=3)
eButton=tk.Button(buttons,text='e',width=4,background='DarkOrange1',command=lambda:finde())
eButton.grid(row=1,column=2)
factButton=tk.Button(buttons,text='n!',width=4,background='DarkOrange1',command=lambda:findFact())
factButton.grid(row=1,column=1)
logButton=tk.Button(buttons,text='ln',width=4,background='DarkOrange1',command=lambda:findLog())
logButton.grid(row=1,column=4)
sinButton=tk.Button(buttons,text='sin',width=4,background='DarkOrange1',command=sin)
sinButton.grid(row=0,column=1)
cosButton=tk.Button(buttons,text='cos',width=4,background='DarkOrange1',command=cos)
cosButton.grid(row=0,column=2)
tanButton=tk.Button(buttons,text='tan',width=4,background='DarkOrange1',command=tan)
tanButton.grid(row=0,column=3)
cosecButton=tk.Button(buttons,text='cosec',width=4,background='DarkOrange1',command=cosec)
cosecButton.grid(row=1,column=1)
secButton=tk.Button(buttons,text='sec',width=4,background='DarkOrange1',command=sec)
secButton.grid(row=1,column=2)
cotButton=tk.Button(buttons,text='cot',width=4,background='DarkOrange1',command=cot)
cotButton.grid(row=1,column=3)
powerButton=tk.Button(buttons,text='^(**)',width=4,background='DarkOrange1',command=lambda:output('**'))
powerButton.grid(row=0,column=4)
display.pack(anchor='center',pady=5)
buttons.pack()
root.mainloop()
|
nilq/baby-python
|
python
|
class Solution:
def findSmallestSetOfVertices(self, n, edges):
ind = [0] * n
for e in edges:
ind[e[1]] += 1
return [i for i, d in enumerate(ind) if d == 0]
|
nilq/baby-python
|
python
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'IPDB'
db.create_table('switch_ipdb', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('mac', self.gf('django.db.models.fields.CharField')(max_length=18)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('archived', self.gf('django.db.models.fields.BooleanField')(default=False)),
('updated', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('switch', ['IPDB'])
def backwards(self, orm):
# Deleting model 'IPDB'
db.delete_table('switch_ipdb')
models = {
'switch.ipdb': {
'Meta': {'object_name': 'IPDB'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'updated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'switch.macdb': {
'Meta': {'object_name': 'MacDB'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'port': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Port']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'switch.office': {
'Meta': {'object_name': 'Office'},
'office_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'})
},
'switch.physicalport': {
'Meta': {'ordering': "['name']", 'object_name': 'PhysicalPort'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'port': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Port']"}),
'remarks': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Room']", 'null': 'True', 'blank': 'True'})
},
'switch.port': {
'Meta': {'ordering': "['switch__switch_name', 'number']", 'unique_together': "(('switch', 'number'),)", 'object_name': 'Port'},
'default_vlan': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'switch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Switch']"}),
'vlan': ('django.db.models.fields.IntegerField', [], {'default': '3', 'null': 'True', 'blank': 'True'})
},
'switch.portlog': {
'Meta': {'ordering': "['-created']", 'object_name': 'PortLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Port']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'switch.room': {
'Meta': {'object_name': 'Room'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Office']"})
},
'switch.roomlog': {
'Meta': {'ordering': "['-created']", 'object_name': 'RoomLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Room']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'switch.switch': {
'Meta': {'object_name': 'Switch'},
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['switch.Office']"}),
'only_snmp': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'snmp_community': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'switch_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'primary_key': 'True'}),
'switch_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'switch_password': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'switch_username': ('django.db.models.fields.CharField', [], {'default': "'admin'", 'max_length': '50'})
}
}
complete_apps = ['switch']
|
nilq/baby-python
|
python
|
import numpy as np, pandas as pd, os
from .. import *
from ..utils.utils_traj import unwrap_traj_and_center
from ..measure.compute_msd_simple import msd_fft
#simple routine for computation of individual mean squared displacements
# Programmer: Tim Tyree
# 7.20.2021
def compute_individual_mean_squared_displacement(df,dft1,dft2,DT,pid,pid_col,t_col='t',max_lagtime=None,**kwargs):
'''
Example Usage:
lagt_values,msd_values=compute_individual_mean_squared_displacement(df,dft1,dft2,DT,pid,pid_col='pid_explicit')
'''
#extract the trajectory as a DataFrame instance
t1=float(dft1[dft1.index==pid].values[0])
t2=float(dft2[dft2.index==pid].values[0])
# print(f"computing msd for particle {pid} from times {t1} to {t2} ms...")
#extract the trajectory as a DataFrame instance
boo = df[pid_col]==pid
boo&= df[t_col]>=t1
boo&= df[t_col]<=t2
dff=df[boo]
#extract r from dff
my_r=dff[['x','y']].values
msd_values=msd_fft(my_r)
lagt_values=DT*(np.arange(msd_values.shape[0]))
return lagt_values,msd_values
#trackpy is scaling is unavoidably deprecated
# fps = 1./DT #output time units is in same time units as inputs
# if max_lagtime is None:
# max_lagtime=dff.index.values.shape[0]
# # Input units are pixels and frames. Output units are microns and seconds.
# df_out=trackpy.motion.msd(
# traj=dff,
# mpp=1.,#does nothing
# fps=fps,
# max_lagtime=max_lagtime,
# detail=False
# )
# lagt_values,msd_values=df_out[['lagt','msd']].values.T
# return lagt_values,msd_values
def comp_each_mean_squared_displacement_particle(df,input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col,t_col,max_lagtime=None,use_unwrap=False,
**kwargs):
DS = ds / width
height=width
# df = pd.read_csv(input_fn)
# DT = get_DT(df, t_col=t_col, pid_col=pid_col)
if use_unwrap is True:
#unwrap trajectories
pid_lst = sorted(set(df[pid_col].values))
#(duplicates filtered earlier in full model pipeline. Unnecessary in particle model with explicit tracking_ _ _ _ ) filter_duplicate_trajectory_indices is slow (and can probs be accelerated with a sexy pandas one liner)
# pid_lst_filtered = filter_duplicate_trajectory_indices(pid_lst,df)
df = pd.concat([
unwrap_traj_and_center(df[df[pid_col] == pid],
width=width,
height=height,
**kwargs) for pid in pid_lst
])
#compute t0 and tf for each particle
dft = df.groupby(pid_col)[t_col].describe()
dft0 = dft['min']
dftf = dft['max']
#compute t1 and t2 for each particle
dft1 = dft0 + crop_start_by
dft2 = dftf - crop_end_by
#get the list of particles dft2-dft1 \ge minimum_lifetime
dflifetime_considered = dft2 - dft1
pid_values_to_consider = dflifetime_considered[
dflifetime_considered >= minimum_lifetime].index.values
#compute number of num_individuals
# pid_lst=sorted(set(df[pid_col].values))
num_individuals = len(list(pid_values_to_consider))
# print(f'Computing msd values for {num_individuals} particles...')
#for each particle, set lagt equal to the zero'd time
event_id_lst = sorted(set(df[pid_col].values))
for pid in pid_values_to_consider:
boo = df[pid_col] == pid
tbirth = df.loc[boo, 't'].min()
df.loc[boo, 'lagt'] = df.loc[boo, 't'] - tbirth
df['msd'] = (df['x']**2 + df['y']**2) * DS**2
df['pid'] = df[pid_col]
df_msd = df[['pid', 'lagt', 'msd']].copy()
df_msd.dropna(inplace=True)
return df_msd
def comp_each_mean_squared_displacement(df,input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col,t_col,max_lagtime=None,use_unwrap=False,
**kwargs):
'''
output is in length units of ds/width and duration units of DT.
computes the mean squared displacements for each trajectory listed in input_fn
input_fn gives the location of a trajectory file with columns x,y,frames, and some pid_col.
trajectory that may have periodic periodic boundary conditions on a square domain.
Example Usage:
input_fn=''
df_msd=comp_each_mean_squared_displacement(df,input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col,t_col,max_lagtime=None,
**kwargs)
'''
height=width
DS=ds/width
if use_unwrap:
#unwrap trajectories
pid_lst = sorted(set(df[pid_col].values))
#(duplicates filtered earlier in full model pipeline. Unnecessary in particle model with explicit tracking... filter_duplicate_trajectory_indices is slow (and can probably be accelerated with a sexy pandas one liner)
# pid_lst = filter_duplicate_trajectory_indices(pid_lst,df)
df = pd.concat([unwrap_traj_and_center(df[df[pid_col]==pid], width=width, height=height, **kwargs) for pid in pid_lst])
#compute t0 and tf for each particle
dft=df.groupby(pid_col)[t_col].describe()
dft0=dft['min']
dftf=dft['max']
#compute t1 and t2 for each particle
dft1=dft0+crop_start_by
dft2=dftf-crop_end_by
#get the list of particles dft2-dft1 \ge minimum_lifetime
dflifetime_considered=dft2-dft1
pid_values_to_consider=dflifetime_considered[dflifetime_considered>=minimum_lifetime].index.values
#compute number of num_individuals
# pid_lst=sorted(set(df[pid_col].values))
num_individuals=len(list(pid_values_to_consider))
# print(f'Computing msd values for {num_individuals} particles...')
#how long does it take 1 core to compute the msd's for every particle in this trial?
lagt_out_lst=[];msd_out_lst=[];pid_out_lst=[]
for pid in pid_values_to_consider:
#compute output
lagt_values,msd_values=compute_individual_mean_squared_displacement(df,dft1,dft2,DT,pid,pid_col=pid_col)
pid_values=np.zeros_like(msd_values,dtype='int')
#record output
pid_out_lst.extend(pid_values) #indices that identify the particles
lagt_out_lst.extend(lagt_values) #ms
msd_out_lst.extend(DS**2*msd_values) #units of ds
df_out=pd.DataFrame({'pid':pid_out_lst,'lagt':lagt_out_lst,'msd':msd_out_lst})
return df_out
def compute_each_mean_squared_displacement(input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col,t_col,max_lagtime=None,use_unwrap=False,use_particle_avg=True,
**kwargs):
'''
computes the mean squared displacements for each trajectory listed in input_fn
input_fn gives the location of a trajectory file with columns x,y,frames, and some pid_col.
trajectory that may have periodic periodic boundary conditions on a square domain.
'''
df=pd.read_csv(input_fn)
if not use_particle_avg:
return comp_each_mean_squared_displacement(df,input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col=pid_col,t_col=t_col,max_lagtime=max_lagtime,use_unwrap=use_unwrap,
**kwargs)
else:
return comp_each_mean_squared_displacement_particle(df,input_fn,DT,ds,width,
minimum_lifetime,crop_start_by,crop_end_by,
pid_col=pid_col,t_col=t_col,use_unwrap=use_unwrap,#max_lagtime=max_lagtime,
**kwargs)
def routine_compute_imsd(input_fn,save_folder=None,use_unwrap=False,**kwargs):
#compute results
df_msd=compute_each_mean_squared_displacement(input_fn,use_unwrap=use_unwrap,**kwargs)
#save results
folder_name=os.path.dirname(input_fn)
dirname = folder_name.split('/')[-1]
if save_folder is None:
save_folder = folder_name.replace(dirname,'msd')
if not os.path.exists(save_folder):
os.mkdir(save_folder)
os.chdir(save_folder)
output_fn=os.path.basename(input_fn).replace('.csv','_emsd.csv')
df_msd.to_csv(output_fn, index=False)
return os.path.abspath(output_fn)
|
nilq/baby-python
|
python
|
import pickle
import requests
import streamlit as st
from requests.auth import HTTPBasicAuth
import os
import json
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
API_URL = "https://ml-api-phn4j6lmdq-uc.a.run.app"
BASIC_AUTH_USERNAME = os.getenv("BASIC_AUTH_USERNAME")
BASIC_AUTH_PASSWORD = os.getenv("BASIC_AUTH_PASSWORD")
# defining the function which will make the prediction using the data which the user inputs
def prediction(Gender, Married, ApplicantIncome, LoanAmount, Credit_History):
# Pre-processing user input
if Gender == "Masculino":
Gender = 0
else:
Gender = 1
if Married == "Solteiro":
Married = 0
else:
Married = 1
if Credit_History == "Com dividas":
Credit_History = 0
else:
Credit_History = 1
LoanAmount = LoanAmount / 1000
data = {
"Gender": Gender,
"Married": Married,
"ApplicantIncome": ApplicantIncome,
"LoanAmount": LoanAmount,
"Credit_History": Credit_History
}
prediction = requests.post(API_URL + '/score', json=data, auth=HTTPBasicAuth(BASIC_AUTH_USERNAME, BASIC_AUTH_PASSWORD))
if prediction == 0:
pred = 'Rejected'
else:
pred = 'Approved'
return pred
# this is the main function in which we define our webpage
def main():
# front end elements of the web page
html_temp = """
<div style ="background-color:yellow;padding:13px">
<h1 style ="color:black;text-align:center;">Consulta de emprestimo</h1>
</div>
"""
# display the front end aspect
st.markdown(html_temp, unsafe_allow_html = True)
# following lines create boxes in which user can enter data required to make prediction
Gender = st.selectbox('Gender',("Masculino","Feminino"))
Married = st.selectbox('Marital Status',("Solteiro","Casado"))
ApplicantIncome = st.number_input("Renda Mensal")
LoanAmount = st.number_input("Valor Emprestimo")
Credit_History = st.selectbox('Credit_History',("Com dividas","Sem dividas"))
result =""
# when 'Predict' is clicked, make the prediction and store it
if st.button("Predict"):
result = prediction(Gender, Married, ApplicantIncome, LoanAmount, Credit_History)
st.success('Your loan is {}'.format(result))
if __name__=='__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
test functions for datacubes with raster labels
...
"""
import os
import shutil
import numpy as np
import rasterio
import json
from pathlib import Path
from icecube.bin.config import CubeConfig
from icecube.bin.labels_cube.labels_cube_generator import LabelsDatacubeGenerator
from icecube.bin.generate_cube import IceyeProcessGenerateCube
from icecube.bin.datacube import Datacube
from icecube.bin.datacube_variables import NAME_LABELS_BAND
res_abspath = os.path.join(Path(__file__).parent, "resources")
grd_raster_dir = os.path.join(res_abspath, "grd_stack")
cube_save_dir = os.path.join(res_abspath, "temp")
masks_raster_dir = os.path.join(res_abspath, "masks")
masks_labels_fpath = os.path.join(res_abspath, "labels/dummy_mask_labels.json")
cube_save_fpath = os.path.join(cube_save_dir, "temp.nc")
def create_run_time_masks_labels():
"""
Generated masks contain absoluate file paths according to the local system.
For github actions, dynamic generation must take place
"""
from icecube.bin.labels_cube.create_json_labels import CreateLabels
masks_names = [
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_0.png",
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_1.png",
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_2.png",
]
raster_names = [
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_0.tif",
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_1.tif",
"ICEYE_GRD_54549_20210427T215124_hollow_10x10pixels_fake_2.tif",
]
masks_fpaths = [os.path.join(masks_raster_dir, fpath) for fpath in masks_names]
raster_mask_dict = {}
for raster_name, mask_fpath in zip(raster_names, masks_fpaths):
raster_mask_dict[raster_name] = mask_fpath
create_labels = CreateLabels("raster")
for product_name, mask_fpath in raster_mask_dict.items():
seg_mask = create_labels.create_instance_segmentation(mask_fpath)
create_labels.populate_labels(product_name, seg_mask)
create_labels.write_labels_to_json(masks_labels_fpath)
def delete_temporary_cube_dir(cube_dir):
shutil.rmtree(cube_dir)
def create_temporary_cube_dir(cube_dir):
if os.path.exists(cube_dir):
delete_temporary_cube_dir(cube_dir)
os.mkdir(cube_dir)
def read_json(json_fpath):
with open(json_fpath) as f:
return json.load(f)
def confirm_masks_values_in_cube(cube_save_fpath):
dc = Datacube().read_cube(cube_save_fpath)
assert dc.xrdataset[NAME_LABELS_BAND].attrs # make sure attributes exist
all_products = dc.get_all_products(dc.get_xrarray(NAME_LABELS_BAND))
valid_products = [
product_name for product_name in all_products if product_name != "None"
]
for product_file in valid_products:
mask_local_fpath = os.path.join(
masks_raster_dir, product_file.replace(".tif", ".png")
)
mask_values = rasterio.open(mask_local_fpath).read(1)
cube_mask_values = dc.get_product_values(
product_file, dc.get_xrarray(NAME_LABELS_BAND)
)
assert (
mask_values.all() == cube_mask_values.all()
), "mask values should be same in cube as well"
# Similarly create a check for "None" rasters too.
invalid_indices = [
i for i, product_name in enumerate(all_products) if product_name == "None"
]
gt_zeros = np.zeros((10, 10))
gt_np_nans = np.empty((10, 10))
gt_np_nans[:] = np.nan
for i in invalid_indices:
dummy_values = dc.get_index_values(i, dc.get_xrarray(NAME_LABELS_BAND))
if str(dummy_values.dtype) == "float32" or str(dummy_values.dtype) == "float64":
assert dummy_values.all() == gt_np_nans.all()
else:
assert dummy_values.all() == gt_zeros.all()
def get_product_labels_from_json(product_file, json_labels):
for _, raster_label in enumerate(json_labels):
if raster_label["product_file"] == product_file:
return raster_label["labels"]
raise ValueError(f"Could not find the labels for product_file: {product_file}")
def test_grd_masks_labels_default_config():
"""
Given default configuration of user, create segmentation masks.
"""
create_run_time_masks_labels()
product_type = "GRD"
cc = CubeConfig()
cc.load_config(None)
labels_datacube = LabelsDatacubeGenerator.build(
cc, product_type, masks_labels_fpath, grd_raster_dir
)
# test saving the cube and delete then.
create_temporary_cube_dir(cube_save_dir)
labels_datacube.to_file(cube_save_fpath)
confirm_masks_values_in_cube(cube_save_fpath)
delete_temporary_cube_dir(cube_save_dir)
def test_grd_masks_labels_custom_config():
"""
Given custom configuration of user, create segmentation masks.
"""
cube_config_fpath = os.path.join(res_abspath, "json_config/config_use_case4.json")
create_run_time_masks_labels()
product_type = "GRD"
cc = CubeConfig()
cc.load_config(cube_config_fpath)
labels_datacube = LabelsDatacubeGenerator.build(
cc, product_type, masks_labels_fpath, grd_raster_dir
)
create_temporary_cube_dir(cube_save_dir)
labels_datacube.to_file(cube_save_fpath)
dc = Datacube().read_cube(cube_save_fpath)
assert (
len(dc.get_all_products(dc.get_xrarray(NAME_LABELS_BAND))) == 1
), "Cannot have more than one images with given configuration"
confirm_masks_values_in_cube(cube_save_fpath)
delete_temporary_cube_dir(cube_save_dir)
def test_grd_masks_labels_custom_config2():
"""
Given custom configuration of user, create segmentation masks.
"""
cube_config_fpath = os.path.join(res_abspath, "json_config/config_use_case5.json")
create_run_time_masks_labels()
product_type = "GRD"
cc = CubeConfig()
cc.load_config(cube_config_fpath)
labels_datacube = LabelsDatacubeGenerator.build(
cc, product_type, masks_labels_fpath, grd_raster_dir
)
create_temporary_cube_dir(cube_save_dir)
labels_datacube.to_file(cube_save_fpath)
dc = Datacube().read_cube(cube_save_fpath)
assert (
len(dc.get_all_products(dc.get_xrarray(NAME_LABELS_BAND))) == 6
), "Must contain 3 products with given configuration"
confirm_masks_values_in_cube(cube_save_fpath)
delete_temporary_cube_dir(cube_save_dir)
def test_cube_generator_with_raster_labels():
"""
test end-end workflow with sample raster labels
"""
cube_config_fpath = os.path.join(res_abspath, "json_config/config_use_case4.json")
_ = IceyeProcessGenerateCube.create_cube(
grd_raster_dir, cube_config_fpath, masks_labels_fpath
)
def test_mask_dtype():
"""
Given custom configuration of user, create segmentation masks.
"""
cube_config_fpath = os.path.join(res_abspath, "json_config/config_use_case4.json")
product_type = "GRD"
cc = CubeConfig()
cc.load_config(cube_config_fpath)
labels_datacube = LabelsDatacubeGenerator.build(
cc, product_type, masks_labels_fpath, grd_raster_dir
)
assert str(labels_datacube.xrdataset[NAME_LABELS_BAND].dtype) == "uint8"
|
nilq/baby-python
|
python
|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
class LSCnew(object):
def __init__(self, W,A,L,Bd,dBt):
self.W = W
self.A = A
self.L = L
self.Bd = Bd
self.dBt = dBt
self.u_is = PETSc.IS().createGeneral(range(W.sub(0).dim()))
self.p_is = PETSc.IS().createGeneral(range(W.sub(0).dim(),W.sub(0).dim()+W.sub(1).dim()))
def create(self, pc):
self.diag = None
kspLAMG = PETSc.KSP()
kspLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspLAMG.getPC()
kspLAMG.setType('preonly')
pc.setType('lu')
# pc.setFactorSolverPackage("pastix")
OptDB = PETSc.Options()
OptDB['pc_factor_shift_amount'] = .1
OptDB['pc_factor_mat_ordering_type'] = 'rcm'
OptDB['pc_factor_mat_solver_package'] = 'mumps'
# kspLAMG.setFromOptions()
# kspLAMG.max_it = 1
kspLAMG.setFromOptions()
self.kspLAMG = kspLAMG
# print kspLAMG.view()
nsp = PETSc.NullSpace().create(constant=True)
kspLAMG.setNullSpace(nsp)
kspNLAMG = PETSc.KSP()
kspNLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspNLAMG.getPC()
kspNLAMG.setType('preonly')
pc.setType('lu')
# pc.setFactorSolverPackage("pastix")
# kspNLAMG.max_it = 1
kspNLAMG.setFromOptions()
kspLAMG.setFromOptions()
self.kspNLAMG = kspNLAMG
# print kspNLAMG.view()
def setUp(self, pc):
# self.P = P
F = self.A.getSubMatrix(self.u_is,self.u_is)
self.Bt = self.A.getSubMatrix(self.u_is,self.p_is)
self.kspNLAMG.setOperators(F)
self.P = self.Bd*F*self.dBt
self.kspLAMG.setOperators(self.L)
def apply(self, pc, x, y):
# print 1000
# self.kspLAMG.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
yOut = y2.duplicate()
# self.kspNLAMG.solve(x1, y1)
self.kspLAMG.solve(-x2, y2)
yy2 = self.P*y2
self.kspLAMG.solve(yy2, yOut)
x1 = x1 - self.Bt*yOut
self.kspNLAMG.solve(x1, y1)
y.array = (np.concatenate([y1.array, yOut.array]))
class LSC(object):
def __init__(self, W,A,P,L):
self.W = W
self.A = A
self.P = P
self.L = L
self.u_is = PETSc.IS().createGeneral(range(W.sub(0).dim()))
self.p_is = PETSc.IS().createGeneral(range(W.sub(0).dim(),W.sub(0).dim()+W.sub(1).dim()))
def create(self, pc):
self.diag = None
kspLAMG = PETSc.KSP()
kspLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspLAMG.getPC()
kspLAMG.setType('preonly')
pc.setType('lu')
# pc.setFactorSolverPackage("pastix")
kspLAMG.max_it = 1
kspLAMG.setFromOptions()
self.kspLAMG = kspLAMG
# print kspLAMG.view()
kspNLAMG = PETSc.KSP()
kspNLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspNLAMG.getPC()
kspNLAMG.setType('preonly')
pc.setType('lu')
# pc.setFactorSolverPackage("pastix")
# kspNLAMG.max_it = 1
kspNLAMG.setFromOptions()
self.kspNLAMG = kspNLAMG
# print kspNLAMG.view()
def setUp(self, pc):
# self.P = P
F = self.A.getSubMatrix(self.u_is,self.u_is)
self.Bt = self.A.getSubMatrix(self.u_is,self.p_is)
B = self.A.getSubMatrix(self.p_is,self.u_is)
Q = self.P.getSubMatrix(self.u_is,self.u_is)
self.kspNLAMG.setOperators(F)
Pdiag = Q.getVecLeft()
Q.getDiagonal(Pdiag)
ones,invDiag = Q.getVecs()
ones.set(1)
invDiag.pointwiseDivide(ones,Pdiag)
invDiag = Pdiag
print F.view()
F.diagonalScale(invDiag)
self.Bt.diagonalScale(invDiag)
# self.PP =PETSc.Mat().create()
# self.PP.setSizes([self.W.sub(0).dim(),self.W.sub(0).dim()])
# FBt =PETSc.Mat().create()
# FBt.setSizes([self.W.sub(1).dim(),self.W.sub(0).dim()])
# self.P1 =PETSc.Mat().create()
# self.P.setSizes([self.W.sub(0).dim(),self.W.sub(0).dim()])
FBt = F.matMult(self.Bt)
self.P1 = B.matMult(self.Bt)
self.PP = B.matMult(self.Bt)
self.P1 = B*F*self.Bt
self.PP = B*self.Bt
self.kspLAMG.setOperators(self.PP)
def apply(self, pc, x, y):
# self.kspLAMG.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
yOut = y2.duplicate()
self.kspNLAMG.solve(x1, y1)
self.kspLAMG.solve(x2, y2)
yy2 = self.P1*y2
self.kspLAMG.solve(yy2, yOut)
# y1 = y1 - self.Bt*yOut
y.array = (np.concatenate([y1.array, yOut.array]))
class PCD(object):
def __init__(self, W, Q,F,L):
self.W = W
self.Q = Q
self.F = F
self.L = L
self.u_is = PETSc.IS().createGeneral(range(W.sub(0).dim()))
self.p_is = PETSc.IS().createGeneral(range(W.sub(0).dim(),W.sub(0).dim()+W.sub(1).dim()))
def create(self, pc):
self.diag = None
kspLAMG = PETSc.KSP()
kspLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspLAMG.getPC()
kspLAMG.setType('richardson')
pc.setType('hypre')
# pc.setFactorSolverPackage("pastix")
# OptDB = PETSc.Options()
# OptDB['pc_factor_shift_amount'] = .1
# OptDB['pc_factor_mat_ordering_type'] = 'rcm'
# OptDB['pc_factor_mat_solver_package'] = 'umfpack'
kspLAMG.max_it = 1
kspLAMG.setFromOptions()
self.kspLAMG = kspLAMG
# print kspLAMG.view()
kspNLAMG = PETSc.KSP()
kspNLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspNLAMG.getPC()
kspNLAMG.setType('richardson')
pc.setType('hypre')
# pc.setFactorSolverPackage("pastix")
kspNLAMG.max_it = 1
kspNLAMG.setFromOptions()
self.kspNLAMG = kspNLAMG
# print kspNLAMG.view()
kspQCG = PETSc.KSP()
kspQCG.create(comm=PETSc.COMM_WORLD)
pc = kspQCG.getPC()
kspQCG.setType('cg')
pc.setType('jacobi')
# pc.setType('icc')
# pc.setFactorSolverPackage("pastix")
# kspQCG.max_it = 4
kspQCG.setFromOptions()
self.kspQCG = kspQCG
def setUp(self, pc):
A, P, flag = pc.getOperators()
# self.P = P
self.Bt = P.getSubMatrix(self.u_is,self.p_is)
F = P.getSubMatrix(self.u_is,self.u_is)
del A, P
self.kspNLAMG.setOperators(F)
self.kspLAMG.setOperators(self.L)
self.kspQCG.setOperators(self.Q)
def apply(self, pc, x, y):
# self.kspLAMG.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
yOut = y2.duplicate()
self.kspLAMG.solve(x2, y2)
yy2 = self.F*y2
self.kspQCG.solve(yy2, yOut)
x1 = x1 - self.Bt*yOut
self.kspNLAMG.solve(x1, y1)
y.array = (np.concatenate([y1.array, yOut.array]))
class PCDdirect(object):
def __init__(self, W, Q,F,L):
self.W = W
self.Q = Q
self.F = F
self.L = L
self.u_is = PETSc.IS().createGeneral(range(W.sub(0).dim()))
self.p_is = PETSc.IS().createGeneral(range(W.sub(0).dim(),W.sub(0).dim()+W.sub(1).dim()))
def create(self, pc):
self.diag = None
kspLAMG = PETSc.KSP()
kspLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspLAMG.getPC()
kspLAMG.setType('preonly')
pc.setType('cholesky')
# pc.setFactorSolverPackage("pastix")
OptDB = PETSc.Options()
OptDB['pc_factor_shift_amount'] = .1
OptDB['pc_factor_mat_ordering_type'] = 'rcm'
OptDB['pc_factor_mat_solver_package'] = 'mumps'
# kspLAMG.max_it = 1
kspLAMG.setFromOptions()
self.kspLAMG = kspLAMG
# print kspLAMG.view()
kspNLAMG = PETSc.KSP()
kspNLAMG.create(comm=PETSc.COMM_WORLD)
pc = kspNLAMG.getPC()
kspNLAMG.setType('preonly')
pc.setType('lu')
# pc.setFactorSolverPackage("pastix")
# kspNLAMG.max_it = 1
kspNLAMG.setFromOptions()
self.kspNLAMG = kspNLAMG
# print kspNLAMG.view()
kspQCG = PETSc.KSP()
kspQCG.create(comm=PETSc.COMM_WORLD)
pc = kspQCG.getPC()
kspQCG.setType('preonly')
pc.setType('lu')
# pc.setType('icc')
# pc.setFactorSolverPackage("pastix")
# kspQCG.max_it = 4
kspQCG.setFromOptions()
self.kspQCG = kspQCG
def setUp(self, pc):
A, P, flag = pc.getOperators()
# self.P = P
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
F = A.getSubMatrix(self.u_is,self.u_is)
self.kspNLAMG.setOperators(F)
self.kspLAMG.setOperators(self.L)
self.kspQCG.setOperators(self.Q)
def apply(self, pc, x, y):
# self.kspLAMG.setOperators(self.B)
x1 = x.getSubVector(self.u_is)
y1 = x1.duplicate()
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
yOut = y2.duplicate()
self.kspLAMG.solve(x2, y2)
yy2 = self.F*y2
self.kspQCG.solve(yy2, yOut)
x1 = x1 - self.Bt*yOut
self.kspNLAMG.solve(x1, y1)
y.array = (np.concatenate([y1.array, yOut.array]))
# print y.array
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from flask import Blueprint, jsonify, redirect, render_template, request, url_for
from gator import app, db
from gator.models import Media, News
import time
# create blueprint
core = Blueprint("core", __name__, template_folder="templates")
@core.route("/")
@core.route("/lastnews/")
@core.route("/lastnews/<string:delta>/")
def index(delta=None):
if delta == "today":
end_time = datetime.now()
start_time = end_time.replace(hour=0, minute=0, second=0, microsecond=0)
elif delta == "yesterday":
end_time = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
start_time = end_time - timedelta(days=1)
elif delta == "week":
end_time = datetime.now()
start_time = end_time.replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=7)
else:
end_time = datetime.now()
start_time = end_time - timedelta(days=1)
news = News.objects(created_at__gt=start_time, created_at__lte=end_time)\
.order_by("-shares__count")[:(app.config["LINKS_PER_PAGE"] * 2)]
if request.is_xhr:
return jsonify(news=news, delta=delta)
else:
return render_template("lastnews.html", news=news, delta=delta)
@core.route("/timeline/")
@core.route("/timeline/<int:stamp>/")
def timeline(stamp=None):
page = int(request.args.get("page", 1))
search = request.args.get("search", "")
if not stamp:
news = News.objects()
else:
if page == 1:
news = News.objects(created_at__gt=datetime.utcfromtimestamp(stamp))
else:
news = News.objects(created_at__lte=datetime.utcfromtimestamp(stamp))
if search:
news = news.filter(text__icontains=search)
news = news.paginate(page=page, per_page=app.config["LINKS_PER_PAGE"])
stamp = time.time()
if request.is_xhr:
return jsonify(news=news.items, search=search, stamp=stamp)
else:
return render_template("timeline.html", news=news.items, search=search, stamp=stamp)
@core.route("/status/")
def status():
medialist = Media.objects.all()
return render_template("status.html", medialist=medialist)
|
nilq/baby-python
|
python
|
"""
Python 3.9.10 (tags/v3.9.10:f2f3f53, Jan 17 2022, 15:14:21) [MSC v.1929 64 bit (AMD64)] on win32
Данный модуль отвечает за детекцию движения
"""
import cv2 # Импортируем модуль OpenCV
import time
import os
def corrector(name_file: str, chk_video_det, xy_coord: list, frame_zoom: int, size_detect: int,
lab_o_proc, window, frame_shift, play_speed, but_start, but_pause):
"""Данная функция восстанавливает файл с поврежденной временной шкалой и запускает детектор.
name_file - Имя файла, который передается в обработку
play_speed - Скорость воспроизведения (Пока не работает)
chk_video_det - Флаг отображения окна воспроизведения при поиске
xy_coord - Список координат зоны поиска
frame_zoom - Коэффициент сжатия видео при отображении
size_detect - Размер детектируемого объекта
lab_o_proc - Ссылка на метку для отображения прогресса
window - Ссылка на окно
frame_shift - Сдвиг фреймов при обнаружении движения
play_speed - Пропуск фреймов для ускорения
but_start - Кнопка Старт
but_pause - Кнопка Пауза
"""
if os.path.exists("ffmpeg.exe"):
os.system(f'ffmpeg -i "{name_file}" -map 0:v -vcodec copy -bsf:v h264_mp4toannexb -y "{name_file[:-4]}_source-video.h264"')
os.system(f'ffmpeg -fflags +genpts -r 25 -i "{name_file[:-4]}_source-video.h264" -vcodec copy -y "{name_file[:-4]}_recovered.avi"')
os.remove(f'{name_file[:-4]}_source-video.h264')
return detector(f'{name_file[:-4]}_recovered.avi', chk_video_det, xy_coord, frame_zoom, size_detect,
lab_o_proc, window, frame_shift, play_speed, but_start, but_pause)
else:
return 'Ffmpeg'
def detector(name_file: str, chk_video_det, xy_coord: list, frame_zoom: int, size_detect: int,
lab_o_proc, window, frame_shift, play_speed, but_start, but_pause) -> str:
"""Данная функция производит поиск движения в заданной области, в текущем файле.
name_file - Имя файла, который передается в обработку
chk_video_det - Флаг отображения окна воспроизведения при поиске
xy_coord - Список координат зоны поиска
frame_zoom - Коэффициент сжатия видео при отображении
size_detect - Размер детектируемого объекта
lab_o_proc - Ссылка на метку для отображения прогресса
window - Ссылка на окно
frame_shift - Сдвиг фреймов при обнаружении движения
play_speed - Пропуск фреймов для ускорения
but_start - Кнопка Старт
but_pause - Кнопка Пауза
"""
if but_start['text'] == 'Старт':
return "OK"
none_frame: int = 0 # Счетчик для проверки пустых фреймов
start_detect = time.time() # Получение времени начала обработки видео файла
cap = cv2.VideoCapture(name_file) # Захватываем видео с файла
# cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('H', '2', '6', '4'))
off_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Получаем общее количество фреймов
frame_width_det = (cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # Получаем размер исходного видео
frame_height_det = (cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
output = cv2.VideoWriter(name_file[:-4] + "_detect" + name_file[len(name_file) - 4:],
cv2.VideoWriter_fourcc('H', '2', '6', '4'), 20,
(int(frame_width_det), int(frame_height_det))) # Параметры выгрузки MJPG PIM1 XVID
if chk_video_det:
cv2.namedWindow(name_file, 0) # Определяем окно вывода
_, x_win, y_win = window.geometry().split('+')
cv2.moveWindow(name_file, int(x_win)+350, int(y_win))
while True: # Вывод кадров производится в цикле
if but_pause['text'] == 'Продолжить':
cap.release()
output.release()
cv2.destroyAllWindows()
return 'Pause'
if but_start['text'] == 'Старт':
cap.release()
output.release()
cv2.destroyAllWindows()
break
ret1, frame1 = cap.read()
# Данное смещение позволяет сгруппировать очертания двигающегося объекта
for _ in range(frame_shift):
cap.read()
ret2, frame2 = cap.read()
# Данное смещение служит для ускорения
for _ in range(play_speed):
cap.read()
if cap.get(cv2.CAP_PROP_POS_FRAMES) == off_frames:
break
if not ret1 * ret2:
none_frame += 1
if none_frame > 10:
print('Превышено допустимое количество пустых фреймов. Начато восстановление файла.')
output.release() # Закрываем файл для вывода
cv2.destroyAllWindows()
os.remove(f'{name_file[:-4]}_detect{name_file[len(name_file) - 4:]}') # Удаляем его
return 'Correct' # Возвращаем флаг, что надо запустить восстановление
continue
# frame1=frame1[y1_search:y2_search,x1_search:x2_search] #Обрезка фрейма до нужного размера. Может пригодиться
# frame2=frame2[y1_search:y2_search,x1_search:x2_search]
# Вывод в процентах прогресса
lab_o_proc["text"] = str(cap.get(cv2.CAP_PROP_POS_FRAMES) * 100 // off_frames + 1) + " %"
window.update() # Обновление окна для отрисовки прогресса
if ret2:
if chk_video_det:
# Метод для визуализации массива кадров
frame1 = algorithm_detector_1(frame1, frame2, xy_coord, frame_zoom, size_detect, output)
cv2.imshow(name_file, frame1)
cv2.resizeWindow(name_file, int(frame_width_det) // 2,
int(frame_height_det) // 2) # Устанавливаем размер окна вывода
else:
break
if chk_video_det and cv2.getWindowProperty(name_file, 1) == 1: # Выход из программы по закрытию окна
break
if cv2.waitKey(2) == 27: # Выход по ESC
break
cap.release()
output.release()
# Проверяем количество сохраненных фреймов
output = cv2.VideoCapture(name_file[:-4] + "_detect" + name_file[len(name_file) - 4:])
frames_output = int(output.get(cv2.CAP_PROP_FRAME_COUNT))
output.release()
cv2.destroyAllWindows()
if frames_output == 0: # Если сохраненных фреймов нет, то удаляем файл
os.remove(f'{name_file[:-4]}_detect{name_file[len(name_file) - 4:]}') # Удаляем его
end_detect = time.time() # Время завершения обработки видео файла
# Выводит время затраченное на обработку файла
print(name_file, '->', str(time.strftime("%M:%S", time.localtime(end_detect - start_detect))))
return 'OK'
def algorithm_detector_1(frame1, frame2, xy_coord: list, frame_zoom: int, size_detect: int, output):
x1_search = xy_coord[0][0] * frame_zoom
y1_search = xy_coord[0][1] * frame_zoom
x2_search = xy_coord[1][0] * frame_zoom
y2_search = xy_coord[1][1] * frame_zoom
# Обработка видео фрейма для определения движения
diff_frame = cv2.absdiff(frame1, frame2) # Вычитаем из одного кадра другой
gray_frame = cv2.cvtColor(diff_frame, cv2.COLOR_BGR2GRAY) # перевод кадров в черно-белую градацию
blur_frame = cv2.GaussianBlur(gray_frame, (5, 5), 0) # фильтрация лишних контуров
_, thresh_frame = cv2.threshold(blur_frame, 20, 255,
cv2.THRESH_BINARY) # метод для выделения кромки объекта белым цветом любое
# значение больше 20 станет белым 255
dilated_frame = cv2.dilate(thresh_frame, None, iterations=3) # расширение белой зоны
'''
данный метод противоположен методу erosion(), т.е. эрозии объекта,
и расширяет выделенную на предыдущем этапе область
'''
contours, _ = cv2.findContours(dilated_frame, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE) # cv2.RETR_TREE нахождение массива контурных точек
cv2.rectangle(frame1, (x1_search, y1_search), (x2_search, y2_search), (255, 0, 0), 2) # Зона поиска
for contour in contours:
(x, y, w, h) = cv2.boundingRect(
contour)
'''
преобразование массива из предыдущего этапа в кортеж из четырех координат
метод contourArea() по заданным contour точкам, здесь кортежу,
вычисляет площадь зафиксированного объекта в каждый момент времени, это можно проверить
'''
if (w * h) < ((x2_search - x1_search) * (y2_search - y1_search) * int(size_detect) // 100):
continue
if not (x + w > x1_search and x < x2_search and y + h > y1_search and y < y2_search):
continue
output.write(frame2) # Записываем не измененный фрейм
cv2.rectangle(frame1, (x, y), (x + w, y + h), (0, 255, 0), 2) # Получение прямоугольника из точек кортежа
# Рисуем красную точку
# cv2.circle(frame1, (int(frame_width_det) - 50, int(frame_height_det) - 40), 10, (0, 0, 255),-1)
# Также можно было просто нарисовать контур объекта
# cv2.drawContours(frame1, contours, -1, (0, 255, 0), 2)
return frame1
|
nilq/baby-python
|
python
|
class Const(object):
'''
Bibliography:
[1] VideoRay Example Code [Online]
Available: https://github.com/videoray/Thruster/blob/master/thruster.py
'''
# VRCSR protocol defines
sync_request = 0x5ff5
sync_response = 0x0ff0
protocol_vrcsr_header_size = 6
protocol_vrcsr_xsum_size = 4
# CSR address for sending an application specific custom command
addr_custom_command = 0xf0
propulsion_command = 0xaa
# Flag for the standard thruster response which contain
response_thruster_standard = 0x2
response_ask_nothing = 0x00
# Standard response is the device type followed by 4 32-bit floats and 1 byte
response_thruster_standard_length = 1 + 4 * 4 + 1
thrust_response_length = (
protocol_vrcsr_header_size +
protocol_vrcsr_xsum_size +
response_thruster_standard_length +
protocol_vrcsr_xsum_size
)
# Add your stupid size to this!
response_normal_length = (
protocol_vrcsr_header_size +
protocol_vrcsr_xsum_size +
protocol_vrcsr_xsum_size
)
# TODO: Get R/W flags
csr_address = {
'undervoltage_trigger': (0xa5, 1),
'overvoltage_trigger': (0xa6, 1),
'overcurrent_trigger': (0xa7, 1),
'temp_trigger': (0xa8, 1),
'stall_count_max': (0xa9, 1),
'fault_control': (0xa4, 1),
'fault': (0x14, 4),
'save_settings': (0xee, 2),
'undervoltage_err_cnt': (0xac, 4),
'overvoltage_err_cnt': (0xb0, 4),
'overcurrent_err_cnt': (0xb4, 4),
'temp_err_cnt': (0xb8, 4),
'stall_err_cnt': (0xbc, 4),
}
format_char_map = {
1: 'B', # unsigned char integer 1
2: 'H', # unsigned short integer 2
4: 'I', # unsigned int integer 4
8: 'Q', # unsigned long long integer 8
}
if __name__ == '__main__':
addr, size = Const.csr_address['stall_err_cnt']
|
nilq/baby-python
|
python
|
src_l = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11]
res_for_check = [23, 1, 3, 10, 4, 11]
res = [el for el in range(len(src_l)) if src_l.count(el) == 1]
print(res == res_for_check)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
# In[27]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.optimize import curve_fit
from Funcoes_Bib import splitPlusMinus
df = pd.read_excel ('C:\Users\observer\Desktop\Ensaios_e_Caracterizacoes\Planilhas\Ganho_EM\HSS10MHz\Propagado.xlsm')
columns = pd.DataFrame(df)
gainR1 = columns['EM Gain'][1:11]
noiseR1 = columns['Noise (ADU)'][1:11]
medianR1 = np.median(noiseR1)
errorR1 = columns['Error (ADU)'][1:11]#np.median(abs(GAIN['noise'] - median))/0.67449
#print temp['value'], temp['noise'], temp['error']
# Constroi o modelo de uma reta
x1 = np.linspace(10,300,100)
f1 = lambda x,a,b,: a*x + b
# Faz o ajuste para cada regime, dado o modelo
R1popt, R1pcov = curve_fit(f1, gainR1, noiseR1)
residuos1 = noiseR1 - f1(gainR1, R1popt[0],R1popt[1])
gainR2 = columns['EM Gain'][0:2]
noiseR2 = columns['Noise (ADU)'][0:2]
medianR2 = np.median(noiseR2)
errorR2 = columns['Error (ADU)'][0:2]
# Constroi o modelo de uma reta
x2 = np.linspace(2,10,50)
# Faz o ajuste para cada regime, dado o modelo
R2popt, R2pcov = curve_fit(f1, gainR2, noiseR2)
residuos2 = noiseR2 - f1(gainR2, R2popt[0],R2popt[1])
# In[26]:
fontsize = 14
fig = plt.figure(figsize=(14, 4))
ax = fig.add_subplot(121)
ax.errorbar(gainR1, noiseR1, errorR1, marker='o', c='blue',linewidth=1.0)
ax.errorbar(gainR2, noiseR2, errorR2, marker='o', c='blue',linewidth=1.0)
ax.plot(x1, f1(x1,R1popt[0],R1popt[1]), '--', c='red')
ax.plot(x2, f1(x2,R2popt[0],R2popt[1]), '--', c='red')
plt.xlabel(r'$\mathtt{Ganho \quad EM}$', size=fontsize)
plt.ylabel(r'$\mathtt{Ru \acute{\i} do (ADU)}$', size=fontsize)
plt.title(r'$\mathtt{Gr \acute{a}fico \quad do \quad ru \acute{\i} do \quad em \quad fun \c c \tilde{a} o \quad do \quad Ganho \quad EM}$', size=fontsize)
#plt.ylim(15, 23)
#plt.xlim(-5, 305)
string1 = r'$\mathtt{f(x) = %.3f x + %.2f, \quad x < 10}$'%(R2popt[0],R2popt[1])
string2 = r'$\mathtt{%.4f x + %.2f, \quad x \geq 10}$'%(R1popt[0],R1popt[1])
#string2 = r'$\mathtt{\sigma^2 = %.2e,}$'%(R1pcov[0][0]) + r'$\mathtt{%.2e,}$'%(R1pcov[1][1])
ax.text(0.35, 0.3, string1, ha='left',va='center', transform=ax.transAxes, size=fontsize)
ax.text(0.475, 0.1, string2, ha='left',va='center', transform=ax.transAxes, size=fontsize)
ax = fig.add_subplot(122)
ax.errorbar(gainR1,residuos1,errorR1, marker='o', c='blue',linewidth=1.0)
ax.errorbar(gainR2[0], residuos2[0], errorR2[0], marker='o', c='blue',linewidth=1.0)
plt.xlabel(r'$\mathtt{Ganho \quad EM}$', size=fontsize)
plt.ylabel(r'$\mathtt{Ru \acute{\i} do (ADU)}$', size=fontsize)
plt.title(r'$\mathtt{Gr \acute{a}fico \quad dos \quad res \acute{\i} duos }$', size=fontsize)
plt.show()
|
nilq/baby-python
|
python
|
import sklearn.svm
from autotabular.pipeline.components.regression.liblinear_svr import LibLinear_SVR
from .test_base import BaseRegressionComponentTest
class SupportVectorComponentTest(BaseRegressionComponentTest):
__test__ = True
res = dict()
res['default_boston'] = 0.6768297818275556
res['default_boston_places'] = 2
res['default_boston_iterative'] = None
res['default_boston_sparse'] = 0.12626519114138912
res['default_boston_sparse_places'] = 2
res['default_boston_iterative_sparse'] = None
res['default_diabetes'] = 0.39152218711865661
res['default_diabetes_iterative'] = None
res['default_diabetes_sparse'] = 0.18704323088631891
res['default_diabetes_iterative_sparse'] = None
sk_mod = sklearn.svm.LinearSVR
module = LibLinear_SVR
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import requests
__author__ = "Likhit Jain and Yashita P Jain"
__copyright__ = "Copyright 2019, Kaleyra"
__license__ = "MIT"
__version__ = "1.0"
__email__ = "support@kaleyra.com"
__status__ = "Production"
class Klient:
"""
"""
def __init__(self, url):
"""
Initialises the attributes of Klient class.
"""
self.url = url
def response(self):
"""
Makes a HTTP GET request to the server.
:return:response in JSON format
"""
try:
response = requests.get(self.url)
response_json = response.json()
return response_json
except Exception as e:
print(e)
|
nilq/baby-python
|
python
|
import argparse
from pathlib import Path
import lpips
import torch as th
import wandb
from PIL import Image
import torchvision.transforms as tvt
from tqdm.auto import tqdm
from cgd import losses
from cgd import clip_util
from cgd import script_util
# Define necessary functions
def clip_guided_diffusion(
image_size: int = 128,
num_cutouts: int = 16,
prompts: "list[str]" = [],
image_prompts: "list[str]" = [],
clip_guidance_scale: int = 1000,
tv_scale: float = 150,
range_scale: float = 50,
sat_scale: float = 0,
init_scale: float = 0,
batch_size: int = 1,
init_image: Path = None,
class_cond: bool = True,
cutout_power: float = 1.0,
timestep_respacing: str = "1000",
seed: int = 0,
diffusion_steps: int = 1000,
skip_timesteps: int = 0,
checkpoints_dir: str = script_util.MODEL_PATH,
clip_model_name: str = "ViT-B/32",
randomize_class: bool = True,
prefix_path: Path = Path('./results'),
save_frequency: int = 25,
noise_schedule: str = "linear",
dropout: float = 0.0,
device: str = '',
wandb_project: str = None,
wandb_entity: str = None,
use_augs: bool = False, # enables augmentation, mostly better for timesteps <= 100
use_magnitude: bool = False, # enables magnitude of the gradient
height_offset: int = 0,
width_offset: int = 0,
progress: bool = True,
):
if len(device) == 0:
device = 'cuda' if th.cuda.is_available() else 'cpu'
print(f"Using device {device}. You can specify a device manually with `--device/-dev`")
else:
print(f"Using device {device}")
fp32_diffusion = (device == 'cpu')
wandb_run = None
if wandb_project is not None:
# just use local vars for config
wandb_run = wandb.init(project=wandb_project, entity=wandb_entity, config=locals())
else:
print(f"--wandb_project not specified. Skipping W&B integration.")
th.manual_seed(seed)
if use_magnitude == False and image_size == 64:
use_magnitude = True
tqdm.write("Enabling magnitude for 64x64 checkpoints.")
use_saturation = sat_scale != 0
Path(prefix_path).mkdir(parents=True, exist_ok=True)
Path(checkpoints_dir).mkdir(parents=True, exist_ok=True)
diffusion_path = script_util.download_guided_diffusion(image_size=image_size, checkpoints_dir=checkpoints_dir, class_cond=class_cond)
# Load CLIP model/Encode text/Create `MakeCutouts`
embeds_list = []
weights_list = []
clip_model, clip_size = clip_util.load_clip(clip_model_name, device)
for prompt in prompts:
text, weight = script_util.parse_prompt(prompt)
text, weight = clip_util.encode_text_prompt(text, weight, clip_model_name, device)
embeds_list.append(text)
weights_list.append(weight)
for image_prompt in image_prompts:
img, weight = script_util.parse_prompt(image_prompt)
image_prompt, batched_weight = clip_util.encode_image_prompt(
img, weight, image_size, num_cutouts=num_cutouts, clip_model_name=clip_model_name, device=device)
embeds_list.append(image_prompt)
weights_list.extend(batched_weight)
target_embeds = th.cat(embeds_list)
weights = th.tensor(weights_list, device=device)
if weights.sum().abs() < 1e-3: # smart :)
raise RuntimeError('The weights must not sum to 0.')
weights /= weights.sum().abs()
if use_augs: tqdm.write( f"Augmentations enabled." )
make_cutouts = clip_util.MakeCutouts(cut_size=clip_size, num_cutouts=num_cutouts,
cutout_size_power=cutout_power, use_augs=use_augs)
# Load initial image (if provided)
init_tensor = None
if init_image:
pil_image = Image.open(script_util.fetch(init_image)).convert('RGB').resize((image_size, image_size))
init_tensor = tvt.ToTensor()(pil_image)
init_tensor = init_tensor.to(device).unsqueeze(0).mul(2).sub(1)
# Class randomization requires a starting class index `y`
model_kwargs = {}
if class_cond:
model_kwargs["y"] = th.zeros(
[batch_size], device=device, dtype=th.long)
# Load guided diffusion
gd_model, diffusion = script_util.load_guided_diffusion(
checkpoint_path=diffusion_path,
image_size=image_size, class_cond=class_cond,
diffusion_steps=diffusion_steps,
timestep_respacing=timestep_respacing,
use_fp16=(not fp32_diffusion),
device=device,
noise_schedule=noise_schedule,
dropout=dropout,
)
# This is initialized lazily as it can use a bit of VRAM
if init_tensor is not None and init_scale != 0:
lpips_vgg = lpips.LPIPS(net='vgg').to(device)
current_timestep = None
def cond_fn(x, t, out, y=None):
log = {}
n = x.shape[0]
fac = diffusion.sqrt_one_minus_alphas_cumprod[current_timestep]
sigmas = 1 - fac
x_in = out["pred_xstart"] * fac + x * sigmas
if wandb_project is not None:
log[f'Generations - {timestep_respacing}'] = [
wandb.Image(x, caption=f"Noisy Sample"),
wandb.Image(out['pred_xstart'],
caption=f"Denoised Prediction"),
wandb.Image(x_in, caption=f"Blended (what CLIP sees)"),
]
clip_in = clip_util.CLIP_NORMALIZE(make_cutouts(x_in.add(1).div(2)))
cutout_embeds = clip_model.encode_image(
clip_in).float().view([num_cutouts, n, -1])
dists = losses.spherical_dist_loss(
cutout_embeds.unsqueeze(0), target_embeds.unsqueeze(0))
dists = dists.view([num_cutouts, n, -1])
clip_losses = dists.mul(weights).sum(2).mean(0)
range_losses = losses.range_loss(out["pred_xstart"])
tv_losses = losses.tv_loss(x_in)
clip_losses = clip_losses.sum() * clip_guidance_scale
range_losses = range_losses.sum() * range_scale
tv_losses = tv_losses.sum() * tv_scale
log['CLIP Loss'] = clip_losses.item()
log['Range Loss'] = range_losses.item()
log['TV Loss'] = tv_losses.item()
loss = clip_losses + tv_losses + range_losses
if use_saturation:
sat_losses = th.abs(x_in - x_in.clamp(min=-1, max=1)).mean()
sat_losses = sat_losses.sum() * sat_scale
log['Saturation Loss'] = sat_losses.item()
loss = loss + sat_losses
if init_tensor is not None and init_scale != 0:
init_losses = lpips_vgg(x_in, init_tensor)
init_losses = init_losses.sum() * init_scale
log['Init VGG Loss'] = init_losses.item()
loss = loss + init_losses
log['Total Loss'] = loss.item()
final_loss = -th.autograd.grad(loss, x)[0] # negative gradient
if use_magnitude:
magnitude = final_loss.square().mean().sqrt() # TODO experimental clamping?
log["Magnitude"] = magnitude.item()
final_loss = final_loss * magnitude.clamp(max=0.05) / magnitude
log['Grad'] = final_loss.mean().item()
if progress:
tqdm.write(
"\t".join([f"{k}: {v:.3f}" for k, v in log.items() if "loss" in k.lower()]))
if wandb_project is not None:
wandb_run.log(log)
return final_loss
# Choose between normal or DDIM
if timestep_respacing.startswith("ddim"):
diffusion_sample_loop = diffusion.ddim_sample_loop_progressive
else:
diffusion_sample_loop = diffusion.p_sample_loop_progressive
# def denoised_fn(image): return image
try:
cgd_samples = diffusion_sample_loop(
gd_model,
(batch_size, 3, image_size + height_offset, image_size + width_offset),
clip_denoised=False,
model_kwargs=model_kwargs,
cond_fn=cond_fn,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_tensor,
randomize_class=randomize_class,
cond_fn_with_grad=True,
# denoised_fn=denoised_fn,
)
# Gather generator for diffusion
current_timestep = diffusion.num_timesteps - 1
for step, sample in enumerate(cgd_samples):
current_timestep -= 1
if step % save_frequency == 0 or current_timestep == -1:
for batch_idx, image_tensor in enumerate(sample["pred_xstart"]):
yield batch_idx, script_util.log_image(image_tensor, prefix_path, prompts, step, batch_idx)
# if wandb_project is not None: wandb.log({"image": wandb.Image(image_tensor, caption="|".join(prompts))})
# for batch_idx in range(batch_size):
# script_util.create_gif(prefix_path, prompts, batch_idx)
except (RuntimeError, KeyboardInterrupt) as runtime_ex:
if "CUDA out of memory" in str(runtime_ex):
print(f"CUDA OOM error occurred.")
print(
f"Try lowering --image_size/-size, --batch_size/-bs, --num_cutouts/-cutn")
print(
f"--clip_model/-clip (currently {clip_model_name}) can have a large impact on VRAM usage.")
print(f"'RN50' will use the least VRAM. 'ViT-B/32' the second least and is good for its memory/runtime constraints.")
else:
raise runtime_ex
def main():
p = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
p.add_argument("--prompts", "-txts", type=str, default='',
help="the prompt/s to reward paired with weights. e.g. 'My text:0.5|Other text:-0.5' ")
p.add_argument("--image_prompts", "-imgs", type=str, default='',
help="the image prompt/s to reward paired with weights. e.g. 'img1.png:0.5,img2.png:-0.5'")
p.add_argument("--image_size", "-size", type=int, default=128,
help="Diffusion image size. Must be one of [64, 128, 256, 512].")
p.add_argument("--init_image", "-init", type=str, default='',
help="Blend an image with diffusion for n steps")
p.add_argument("--init_scale", "-is", type=int, default=0,
help="(optional) Perceptual loss scale for init image. ")
p.add_argument("--skip_timesteps", "-skip", type=int, default=0,
help="Number of timesteps to blend image for. CLIP guidance occurs after this.")
p.add_argument("--prefix", "-dir", default="results",
type=Path, help="output directory")
p.add_argument("--checkpoints_dir", "-ckpts", default=script_util.MODEL_PATH,
type=Path, help="Path subdirectory containing checkpoints.")
p.add_argument("--batch_size", "-bs", type=int,
default=1, help="the batch size")
p.add_argument("--clip_guidance_scale", "-cgs", type=float, default=1000,
help="Scale for CLIP spherical distance loss. Values will need tinkering for different settings.",)
p.add_argument("--tv_scale", "-tvs", type=float,
default=150., help="Controls the smoothness of the final output.",)
p.add_argument("--range_scale", "-rs", type=float,
default=50., help="Controls how far out of RGB range values may get.",)
p.add_argument("--sat_scale", "-sats", type=float, default=0.,
help="Controls how much saturation is allowed. Used for ddim. From @nshepperd.",)
p.add_argument("--seed", "-seed", type=int,
default=0, help="Random number seed")
p.add_argument("--save_frequency", "-freq", type=int,
default=1, help="Save frequency")
p.add_argument("--diffusion_steps", "-steps", type=int,
default=1000, help="Diffusion steps")
p.add_argument("--timestep_respacing", "-respace", type=str,
default="1000", help="Timestep respacing")
p.add_argument("--num_cutouts", "-cutn", type=int, default=16,
help="Number of randomly cut patches to distort from diffusion.")
p.add_argument("--cutout_power", "-cutpow", type=float,
default=1.0, help="Cutout size power")
p.add_argument("--clip_model", "-clip", type=str, default="ViT-B/32",
help=f"clip model name. Should be one of: {clip_util.CLIP_MODEL_NAMES} or a checkpoint filename ending in `.pt`")
p.add_argument("--uncond", "-uncond", action="store_true",
help='Use finetuned unconditional checkpoints from OpenAI (256px) and Katherine Crowson (512px)')
p.add_argument("--noise_schedule", "-sched", default='linear', type=str,
help="Specify noise schedule. Either 'linear' or 'cosine'.")
p.add_argument("--dropout", "-drop", default=0.0, type=float,
help="Amount of dropout to apply. ")
p.add_argument("--device", "-dev", default='', type=str,
help="Device to use. Either cpu or cuda.")
p.add_argument('--wandb_project', '-proj', default=None,
help='Name W&B will use when saving results.\ne.g. `--wandb_project "my_project"`')
p.add_argument('--wandb_entity', '-ent', default=None,
help='(optional) Name of W&B team/entity to log to.')
p.add_argument('--height_offset', '-ht', default=0, type=int, help='Height offset for image')
p.add_argument('--width_offset', '-wd', default=0, type=int, help='Width offset for image')
p.add_argument('--use_augs', '-augs', action='store_true', help="Uses augmentations from the `quick` clip guided diffusion notebook")
p.add_argument('--use_magnitude', '-mag', action='store_true', help="Uses magnitude of the gradient")
p.add_argument('--quiet', '-q', action='store_true',
help='Suppress output.')
args = p.parse_args()
_class_cond = not args.uncond
prefix_path = args.prefix
Path(prefix_path).mkdir(exist_ok=True)
if len(args.prompts) > 0:
prompts = args.prompts.split('|')
else:
prompts = []
if len(args.image_prompts) > 0:
image_prompts = args.image_prompts.split('|')
else:
image_prompts = []
cgd_generator = clip_guided_diffusion(
prompts=prompts,
image_prompts=image_prompts,
batch_size=args.batch_size,
tv_scale=args.tv_scale,
init_scale=args.init_scale,
range_scale=args.range_scale,
sat_scale=args.sat_scale,
image_size=args.image_size,
class_cond=_class_cond,
randomize_class=(_class_cond),
save_frequency=args.save_frequency,
clip_guidance_scale=args.clip_guidance_scale,
cutout_power=args.cutout_power,
num_cutouts=args.num_cutouts,
timestep_respacing=args.timestep_respacing,
seed=args.seed,
diffusion_steps=args.diffusion_steps,
skip_timesteps=args.skip_timesteps,
init_image=args.init_image,
checkpoints_dir=args.checkpoints_dir,
clip_model_name=args.clip_model,
noise_schedule=args.noise_schedule,
dropout=args.dropout,
device=args.device,
prefix_path=prefix_path,
wandb_project=args.wandb_project,
wandb_entity=args.wandb_entity,
use_augs=False,
use_magnitude=False,
height_offset=args.height_offset,
width_offset=args.width_offset,
progress=not args.quiet,
)
list(enumerate(cgd_generator)) # iterate over generator
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from scipy.interpolate import interp1d
from math import cos, pi
import _rrtm_radiation_fortran
from numpy import ndarray
INPUTS = [
# 'do_sw', # 0 Shortwave switch (integer) 1 1 / 0 => do / do not compute SW
# 'do_lw', # 0 Longwave switch (integer) 1 1 / 0 => do / do not compute LW
'p', # 1-3 Atmospheric pressure mb Default is equispaced 0-ps. p[0] is top level
'lev',
'T', # 1-3 Temperature K 283.15 Isothermal
'Tbound',
'ps', # 0-2 Surface pressure mb 1000.
'Ts', # 0-2 Surface temperature K 283.15
'q', # 1-3 Specific humidity g/kg 1.e-5
'h2o',
'o3', # 1-3 Ozone mass mix. rat. kg/kg Default obtained by interpolating a tropical data profile
'co2', # 0 CO2 ppmv 330.
'ch4', # 0 CH4 ppmv 0.
'n2o', # 0 N2O ppmv 0.
'o2', # 0 O2 volume mixing ratio
'cfc11', # 0 CFC11 ppmv 0.
'cfc12', # 0 CFC12 ppmv 0.
'cfc22', # 0 CFC22 ppmv 0.
'ccl4', # CCl4 volume mixing ratio
'aldif', # 0-2 Diffuse near-IR (SW) albedo (frac) 0.07
'aldir', # 0-2 Direct near-IR (SW) albedo (frac) 0.07
'asdif', # 0-2 Diffuse UV+vis alb (frac) 0.07
'asdir', # 0-2 Direct UV+vis alb (frac) 0.07
'lw_surface_emissivity', # should have len(LW_BANDS) members...see above
'zen', # 0-2 Solar zenith angle dgr 72.2 Daily-mean on equator at equinox
# 'calday', # 0 Calendar day (float) 80.5 Insolation computed at specified
# 'orb_yr', # 0 Orbital year (integer) 1995 Year used to compute orbital params
# 'avg', # 0 Insolation average (string) 'daily' Choices are: 'inst', 'daily', 'annual'
# 'lat', # 0-1 Latitude dgr 0. day and lat/lon if solin
# 'lon', # 0-1 Longitude dgr 0. and zen are NOT specified
# 'solin', # 0-2 Insolation W/m2 417.4 Daily-mean on equator at equinox
'scon', # 0 Solar constant W m-2 1367.
# 'tauvis', # 0 Aerosol opt. depth (float) 0. CCM3 only
# 'tau_inf', # 0 Total opt. depth - 1. Greygas scheme only
# 'alpha_greygas', # 0 Tau shape parameter - 1. Greygas scheme only
'cldf', # 1-3 Cloud fraction frac 0.:
# 'in_cld', # 0 Cloud water path flag - 0 0 / 1 => grid avg / in-cloud water paths (CAM3 only)
'cloud_single_scattering_albedo',
'cloud_asymmetry_parameter',
'cloud_forward_scattering_fraction',
'r_liq', # 1-3 Drop radius, liquid micron 10.
'r_ice', # 1-3 Drop radius, ice micron 30.
'clwp', # 1-3 Cloud liquid water path g/m2 0.
'ciwp', # 1-3 Cloud ice water path g/m2 -99. If not passed explicitly, ice frac computed internally (CAM3 only)
# 'flus' # 1-3 Upwelling LW at surface W/m2 -99. If not passed explicitly, computed from Ts using emiss=1 (CAM3 only)
'tauaer_sw', # Aerosol optical depth (iaer=10 only), Dimensions: (ncol,nlay,nbndsw), (non-delta scaled)
'ssaaer_sw', # Aerosol single scattering albedo (iaer=10 only), Dimensions: (ncol,nlay,nbndsw), (non-delta scaled)
'asmaer_sw', # Aerosol asymmetry parameter (iaer=10 only), Dimensions: (ncol,nlay,nbndsw), (non-delta scaled)
'tauaer_lw', # Aerosol optical depth (iaer=10 only), Dimensions: (ncol,nlay,nbndlw), (non-delta scaled)
'Cpd',
'tauc_lw'
]
OUTPUTS = ['swuflx', 'swdflx', 'lwuflx', 'lwdflx', 'SwToa', 'LwToa', 'lwflx', 'swflx', 'hr']
def driver(*args):
# wavenumber bands used by RRTM:
SW_BANDS = range(14)
LW_BANDS = range(16)
# gotta translate between the APIs:
climt_inputs = dict(zip(INPUTS, args))
number_of_layers = len(climt_inputs['T'])
if not climt_inputs['Tbound']: climt_inputs['Tbound'] = climt_inputs['T']
climt_inputs['pbound'] = climt_inputs['lev'].tolist() + climt_inputs['ps'][0].tolist()
climt_inputs['pbound'][0] = 1.e-9 # enforce TOA is at p=0
climt_inputs['Tbound'] = [T[0][0] for T in climt_inputs['Tbound']] + [climt_inputs['Ts'][0][0]]
interpolated_p = interp1d(range(number_of_layers + 1), climt_inputs['pbound'])
interpolated_T = interp1d(range(number_of_layers + 1), climt_inputs['Tbound'])
# import sys; sys.stderr.write(str(climt_inputs['cldf']))
for key in ['co2', 'ch4', 'n2o', 'o2', 'cfc11', 'cfc12', 'cfc22', 'ccl4']:
if not hasattr(climt_inputs[key], '__iter__'):
climt_inputs[key] = [climt_inputs[key]] * number_of_layers
for key in ['lw_surface_emissivity']:
if not hasattr(climt_inputs[key], '__iter__'):
climt_inputs[key] = [climt_inputs[key]] * len(LW_BANDS)
# import pdb; pdb.set_trace()
if 'h2o' in climt_inputs and climt_inputs['h2o']:
h2o_concentration = [[h2o[0][0] for h2o in climt_inputs['h2o']]]
else:
h2o_concentration = [[(((q/1000.)/(1. - (q/1000.)))*1.607793)[0][0] for q in climt_inputs['q']]]
for key in ['tauaer_sw', 'ssaaer_sw', 'asmaer_sw', 'tauaer_lw']:
if not climt_inputs[key]:
climt_inputs[key] = [[0] * len(locals()[key[-2:].upper() + '_BANDS']) for value in climt_inputs['lev']]
else:
if not hasattr(climt_inputs[key][0], '__iter__'):
climt_inputs[key] = [[value] * len(locals()[key[-2:].upper() + '_BANDS']) for value in climt_inputs[key]]
rrtm_inputs = [
# GENERAL, used in both SW and LW
['icld', 1 if 'cldf' in climt_inputs else 0], # Cloud overlap method, 0: Clear only, 1: Random, 2, Maximum/random] 3: Maximum
['permuteseed_sw', 150], # used for monte carlo clouds; must differ from permuteseed_lw by number of subcolumns
['permuteseed_lw', 300], # learn about these later...
['irng', 1], # more monte carlo stuff
['idrv', 0], # whether to also calculate the derivative of flux with respect to surface temp
['cpdair', climt_inputs['Cpd']],
['play', [[interpolated_p(i + .5) for i in range(number_of_layers)]]], # pressure in each layer
['plev', [climt_inputs['pbound']]], # pressure at boundaries of each layer
['tlay', [[interpolated_T(i + .5) for i in range(number_of_layers)]]], # temperature in each layer
['tlev', [climt_inputs['Tbound']]], # temperature at boundaries of each layer
['tsfc', [climt_inputs['Ts']]], # surface temperature
# GASES, used in both SW and LW
['h2ovmr', h2o_concentration],
['o3vmr', [[o3[0][0] * 0.603428 for o3 in climt_inputs['o3']]]], # convert from kg/kg to volume mixing ratio using molecular weight of dry air / ozone
['co2vmr', [[co2 * 1.e-6 for co2 in climt_inputs['co2']]]],
['ch4vmr', [[ch4 * 1.e-6 for ch4 in climt_inputs['ch4']]]],
['n2ovmr', [[n2o * 1.e-6 for n2o in climt_inputs['n2o']]]],
['o2vmr', [climt_inputs['o2']]],
['cfc11vmr', [[cfc11 * 1.e-6 for cfc11 in climt_inputs['cfc11']]]],
['cfc12vmr', [[cfc12 * 1.e-6 for cfc12 in climt_inputs['cfc12']]]],
['cfc22vmr', [[cfc22 * 1.e-6 for cfc22 in climt_inputs['cfc22']]]],
['ccl4vmr', [climt_inputs['ccl4']]],
# SURFACE OPTICAL PROPERTIES
# SW
['aldif', [climt_inputs['aldif'][0][0]]],
['aldir', [climt_inputs['aldir'][0][0]]],
['asdif', [climt_inputs['asdif'][0][0]]],
['asdir', [climt_inputs['asdir'][0][0]]],
# LW
['emis', [[1. or 1 - emis for emis in climt_inputs['lw_surface_emissivity']]]],
# THE SUN - SW
['coszen', [cos(climt_inputs['zen'][0][0] * 2 * pi / 360.)]], # cosine of the solar zenith angle
['adjes', 1.], # flux adjustment for earth/sun distance (if not dyofyr)
['dyofyr', 0], # day of the year used to get Earth/Sun distance (if not adjes)
['scon', climt_inputs['scon']], # solar constant
# CLOUDS, SW see http://www.arm.gov/publications/proceedings/conf16/extended_abs/iacono_mj.pdf
['inflgsw', 2], # Flag for cloud optical properties
# INFLAG = 0 direct specification of optical depths of clouds;
# cloud fraction and cloud optical depth (gray) are
# input for each cloudy layer
# = 1 calculation of combined ice and liquid cloud optical depths (gray)
# as in CCM2; cloud fraction and cloud water path are input for
# each cloudy layer.
# = 2 calculation of separate ice and liquid cloud optical depths, with
# parameterizations determined by values of ICEFLAG and LIQFLAG.
# Cloud fraction, cloud water path, cloud ice fraction, and
# effective ice radius are input for each cloudy layer for all
# parameterizations. If LIQFLAG = 1, effective liquid droplet radius
# is also needed.
['inflglw', 2], #
['iceflgsw', 1], # # Flag for ice particle specification
# ICEFLAG = 0 the optical depths (gray) due to ice clouds are computed as in CCM3.
# = 1 the optical depths (non-gray) due to ice clouds are computed as closely as
# possible to the method in E.E. Ebert and J.A. Curry, JGR, 97, 3831-3836 (1992).
# = 2 the optical depths (non-gray) due to ice clouds are computed by a method
# based on the parameterization used in the radiative transfer model Streamer
# (reference, J. Key, Streamer User's Guide, Technical Report 96-01] Boston
# University, 85 pp. (1996)), which is closely related to the parameterization
# of water clouds due to Hu and Stamnes (see below).
# = 3 the optical depths (non-gray) due to ice clouds are computed by a method
# based on the parameterization given in Fu et al., J. Clim.,11,2223-2237 (1998).
['iceflgslw', 1], #
['liqflgsw', 1], # # Flag for liquid droplet specification
# LIQFLAG = 0 the optical depths (gray) due to water clouds are computed as in CCM3.
# = 1 the optical depths (non-gray) due to water clouds are computed by a method
# based on the parameterization of water clouds due to Y.X. Hu and K. Stamnes,
# J. Clim., 6, 728-742 (1993).
['liqflglw', 1], #
['tauc_sw', [[[0.]* number_of_layers]] * len(SW_BANDS)], # In-cloud optical depth [IS THIS ONE NEEDED GIVEN THE OTHERS?]
['tauc_lw', [[climt_inputs['tauc_lw'] or [0.] * number_of_layers]] * len(LW_BANDS)], # in-cloud optical depth
['cldfrac', [[c[0][0] for c in climt_inputs['cldf']]]], # # layer cloud fraction
['ssac_sw', [[climt_inputs['cloud_single_scattering_albedo'] or [0.] * number_of_layers]] * len(SW_BANDS)], # # In-cloud single scattering albedo
['asmc_sw', [[climt_inputs['cloud_asymmetry_parameter'] or [0.] * number_of_layers]] * len(SW_BANDS)], # # In-cloud asymmetry parameter
['fsfc_sw', [[climt_inputs['cloud_forward_scattering_fraction'] or [0.] * number_of_layers]] * len(SW_BANDS)], ## In-cloud forward scattering fraction (delta function pointing forward "forward peaked scattering")
['ciwp', [[c[0][0] for c in climt_inputs['ciwp']]]], # # in-cloud ice water path (g/m2)
['clwp', [[c[0][0] for c in climt_inputs['clwp']]]], # # in-cloud liquid water path (g/m2)
['reic', [[c[0][0] for c in climt_inputs['r_ice']]]], # # Cloud ice particle effective size (microns)
# specific definition of reicmcl depends on setting of iceflglw:
# iceflglw = 0, ice effective radius, r_ec, (Ebert and Curry, 1992)]
# r_ec must be >= 10.0 microns
# iceflglw = 1, ice effective radius, r_ec, (Ebert and Curry, 1992)]
# r_ec range is limited to 13.0 to 130.0 microns
# iceflglw = 2, ice effective radius, r_k, (Key, Streamer Ref. Manual] 1996)
# r_k range is limited to 5.0 to 131.0 microns
# iceflglw = 3, generalized effective size, dge, (Fu, 1996)]
# dge range is limited to 5.0 to 140.0 microns
# [dge = 1.0315 * r_ec]
['relq', [[c[0][0] for c in climt_inputs['r_liq']]]], # # Cloud water drop effective radius (microns)
# AEROSOLS
# SW
['tauaer_sw', [climt_inputs['tauaer_sw'] or [[0.] * len(SW_BANDS)] * number_of_layers]], # Aerosol optical depth (iaer=10 only), Dimensions, (ncol,nlay,nbndsw)] # (non-delta scaled)
['ssaaer_sw', [climt_inputs['ssaaer_sw'] or [[0.] * len(SW_BANDS)] * number_of_layers]], # Aerosol single scattering albedo (iaer=10 only), Dimensions, (ncol,nlay,nbndsw)] # (non-delta scaled)
['asmaer_sw', [climt_inputs['asmaer_sw'] or [[0.] * len(SW_BANDS)] * number_of_layers]], # Aerosol asymmetry parameter (iaer=10 only), Dimensions, (ncol,nlay,nbndsw)] # (non-delta scaled)
['ecaer_sw', [[[0.] * 6] * number_of_layers]], # Aerosol optical depth at 0.55 micron (iaer=6 only), Dimensions, (ncol,nlay,naerec)] # (non-delta scaled)
['tauaer_lw', [climt_inputs['tauaer_lw'] or [[0.] * len(LW_BANDS)] * number_of_layers]] #
]
for pair in rrtm_inputs:
r_0 = pair[1]
if hasattr(r_0, '__iter__'):
if len(r_0) in [number_of_layers, number_of_layers + 1]:
r_0.reverse()
else:
for r_1 in r_0:
if hasattr(r_1, '__iter__'):
if len(r_1) in [number_of_layers, number_of_layers + 1]:
r_1.reverse()
else:
for r_2 in r_1:
if hasattr(r_2, '__iter__'):
if len(r_2) in [number_of_layers, number_of_layers + 1]:
r_2.reverse()
# import pdb; pdb.set_trace()
out = dict(zip(['swuflx','swdflx','swhr','swuflxc','swdflxc','swhrc','lwuflx','lwdflx','lwhr','lwuflxc','lwdflxc','lwhrc','duflx_dt','duflxc_dt'], \
list(_rrtm_radiation_fortran.driver(*[pair[1] for pair in rrtm_inputs])) ))
#
## new_output = (
## output[0][0], # swuflx
## output[1][0], # swdflx
## output[6][0], # lwuflx
## output[7][0], # lwdflx
## output[1][0][-1] - output[0][0][-1], # swToA
## output[7][0][-1] - output[6][0][-1] # lwToA
## )
## return new_output
# get outputs into CliMT-compatible format
for key in out.keys():
out[key] = out[key].transpose() # make level first index
out[key] = out[key][::-1] # indexing goes top to bottom
# fluxes defined positive downward
for key in ['swuflx','swuflxc','lwuflx','lwuflxc']: out[key] = -out[key]
# TOA fluxes
out['LwToa'] = out['lwuflx'][0]+out['lwdflx'][0]
out['SwToa'] = out['swuflx'][0]+out['swdflx'][0]
# output fluxes at layer midpoints:
for key in ['swuflx','swdflx','swuflxc','swdflxc','lwuflx','lwdflx','lwuflxc','lwdflxc']: out[key] = (out[key][1:]+out[key][:-1])/2.
# total fluxes
out['lwflx'] = out['lwuflx']+out['lwdflx']
out['swflx'] = out['swuflx']+out['swdflx']
out['hr'] = out['swhr']+out['lwhr']
return tuple([out[key] for key in OUTPUTS])
|
nilq/baby-python
|
python
|
from typing import List
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
N = len(nums)
min_num = max_num = float('inf')
for i in range(1, N):
if nums[i] < nums[i-1]:
min_num = min(nums[i:])
break
for i in reversed(range(N-1)):
if nums[i] > nums[i+1]:
max_num = max(nums[:i+1])
break
if min_num == float('inf'):
return 0
left = right = 0
for i in range(N):
if nums[i] > min_num:
left = i
break
for i in reversed(range(N)):
if nums[i] < max_num:
right = i
break
return right-left+1
|
nilq/baby-python
|
python
|
from unittest import main
from tests.base import BaseTestCase
class AppTestCase(BaseTestCase):
"""This class represents the test cases to see if the app is up"""
# App runs ----------------------------------------
def test_app_is_running(self):
# make request
res = self.client().get('/')
# assert
self.assertEqual(res.status_code, 200)
self.assertTrue("Welcome to Limbook Api" in res.get_data(as_text=True))
# Make the tests conveniently executable
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
amount = int(input())
count = 0
if int(amount/100):
count+=int(amount/100)
amount -= int(amount/100)*100
if int(amount/20):
count+=int(amount/20)
amount -= int(amount/20)*20
if int(amount/10):
count+=int(amount/10)
amount -= int(amount/10)*10
if int(amount/5):
count+=int(amount/5)
amount -= int(amount/5)*5
count+=amount
print(count)
|
nilq/baby-python
|
python
|
from db import db
class BookModel(db.Model):
__tablename__ = 'books'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(80))
author = db.Column(db.String(80))
isbn = db.Column(db.String(40))
release_date = db.Column(db.String(10))
price = db.Column(db.Float(precision=2))
store_id = db.Column(db.Integer, db.ForeignKey('stores.id'))
store = db.relationship('StoreModel')
def __init__(self, title, price, store_id, author, isbn, release_date):
self.title = title
self.price = price
self.store_id = store_id
self.author = author
self.isbn = isbn
self.release_date = release_date
def json(self):
return {'title': self.title, 'price': self.price, 'author': self.author, 'isbn': self.isbn, 'release_date': self.release_date}
@classmethod
def find_by_title(cls, title):
return cls.query.filter_by(title=title).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Archerx
# @time: 2019/4/15 上午 11:10
from xadmin import views
import xadmin
from . import models
from django.contrib.auth.forms import (UserCreationForm, UserChangeForm)
# from xadmin import PermissionModelMultipleChoiceField
# from xadmin import Fieldset, Main, Side, Row
from xadmin.plugins.auth import UserAdmin
from django.utils.translation import ugettext as _
class GlobalSetting(object):
# menu_style = 'accordion' #分组折叠显示
site_title = 'SDUTCtf'
site_footer = 'ctf.sdutsec.cn'
xadmin.site.register(views.CommAdminView, GlobalSetting) #注册到全局应用
class BaseSetting(object):
enable_themes = True
use_bootswatch = True
xadmin.site.register(views.BaseAdminView, BaseSetting)
class UserDisplay(UserAdmin):
change_user_password_template = None
list_display = ('id','user_phone','username', 'user_major', 'user_number', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
style_fields = {'user_permissions': 'm2m_transfer'}
model_icon = 'fa fa-user'
relfield_style = 'fk-ajax'
def get_model_form(self, **kwargs):
if self.org_obj is None:
self.form = UserCreationForm
else:
self.form = UserChangeForm
return super(UserDisplay, self).get_model_form(**kwargs)
# def get_form_layout(self):
# if self.org_obj:
# self.form_layout = (
# Main(
# Fieldset('',
# 'username', 'password',
# css_class='unsort no_title'
# ),
# Fieldset(_('Personal info'),
# Row('first_name', 'last_name'),
# 'email'
# ),
# Fieldset(_('Permissions'),
# 'groups', 'user_permissions'
# ),
# Fieldset(_('Important dates'),
# 'last_login', 'date_joined'
# ),
# ),
# Side(
# Fieldset(_('Status'),
# 'is_active', 'is_staff', 'is_superuser',
# ),
# )
# )
# return super(UserDisplay, self).get_form_layout()
# xadmin.site.unregister(models.UserProfile)
xadmin.site.register(models.UserProfile, UserDisplay)
class VerifyCodeDisplay(object):
list_display = ('id','code','mobile','add_time')
ordering = ['-add_time']
list_per_page = 10
xadmin.site.register(models.VerifyCode, VerifyCodeDisplay)
class UserLogDisply(object):
list_display = {'user', 'user_login_time', 'user_login_ip', 'user_login_agent', 'user_login_os'}
list_filter = {'user_login_time', 'user_login_agent', 'user_login_os'}
search_fields = {'user__username', 'user_login_ip', 'user_login_agent', 'user_login_os'}
ordering = ['-user_login_time']
list_per_page = 10
xadmin.site.register(models.UserLoginLog, UserLogDisply)
|
nilq/baby-python
|
python
|
from flask import ( g, redirect, url_for )
from tmc.db import get_db
# Insert relation tool_x_technique
def insert_tool_x_techn(table, tool_id, technique_id):
try:
author_id = g.user['id']
except (NameError, TypeError) as error:
author_id = 1
g.db = get_db()
query='INSERT INTO {} ({}, {}, {}) VALUES (?, ?, ?)'.format(table, 'author_id', 'tool_id', 'technique_id')
result = g.db.execute(query, (author_id, tool_id, technique_id))
g.db.commit()
element_id = result.lastrowid
return element_id
|
nilq/baby-python
|
python
|
from django.urls import path
from . import views
TYPE = "stream"
urlpatterns = [
path('', views.view_gallery_stream, name=f'gallery-{TYPE}'),
path('edit/<int:pk>/', views.video_stream_edit, name=f"{TYPE}-update"),
path('remove/<int:pk>/', views.removeStream, name=f"{TYPE}-delete"),
]
|
nilq/baby-python
|
python
|
import pandas as pd
import re
from django.core.management import BaseCommand
from django.conf import settings
from Styling.models import Garments, ImageURLs, Images, ProductCategories
class SanitizeData:
def __init__(self):
self.csv_path = settings.GARMENTS_DATA_URL + '\garment_items.jl'
self.garments = pd.read_json(self.csv_path, lines=True)
def sanitize_garment_data(self):
"""There are no empty cells in this data so I didn't bother writing code to account for them"""
self.garments['brand'] = self.garments.brand.astype('category')
self.garments['gender'] = self.garments.gender.astype('category')
self.garments['price'] = self.garments.price.str.replace(',', '').astype(float)
return self.garments
class Command(BaseCommand):
def __init__(self):
self.sanitize = SanitizeData()
def execute(self, *args, **options):
garments = self.sanitize.sanitize_garment_data()[:1000]
pattern = re.compile('[^a-zA-Z]')
for garment in garments.iterrows():
gar = Garments(
product_id=garment[1].product_id,
brand=pattern.sub('', garment[1].brand).upper(),
gender=garment[1].gender,
price=garment[1].price,
product_description=garment[1].product_description,
product_title=garment[1].product_title,
source=garment[1].source,
url=garment[1].url,
)
gar.save()
for url in garment[1].image_urls:
img_urls = ImageURLs(
image_url=url,
garment=gar
)
img_urls.save()
for product_category in garment[1].product_categories:
product_category = ProductCategories(
product_category=pattern.sub('', product_category).upper(),
garment=gar
)
product_category.save()
for image in garment[1].images:
image = Images(
url=image['url'],
path=image['path'],
checksum=image['checksum'],
garment=gar
)
image.save()
|
nilq/baby-python
|
python
|
# coding: utf-8
# Import libraries
import pandas as pd
from pandas import ExcelWriter
import pickle
import xlsxwriter
def extract_regulatory_genes():
"""
The EXTRACT_REGULATORY_GENES operation extracts from the set of Transcription Factors associated to a gene, the list of its candidate regulatory genes, i.e., the genes that encode for those TFs. Intermediate results files are exported locally during the execution of the function, while the final set of trasncription factors is returned as a Python dictionary (dict_RegulGenes.p), where each target gene (set as key) is associated to the list of its candidate regulatory genes (set as value).
:return: a Python dictionary
Example::
import genereg as gr
reg_genes_dict = gr.RegulatoryGenes.extract_regulatory_genes()
"""
# Starting from the dictionary containing for each gene of interest the TFs that bind to its promoters,
# extract the names of the genes encoding the TFs in order to identify the candidate regulatory genes of each gene of interest
dict_GeneTF = pickle.load(open('./1_Transcription_Factors/dict_GeneTF.p', 'rb'))
TFs_interest = []
for key, value in dict_GeneTF.items():
TFs = value[:-2] # the TFs are all the elements of the value list, except for the last two
for tf in TFs:
if tf not in TFs_interest:
TFs_interest.append(tf)
# Import the gene-TFs mapping dataframe
Mapping_df = pd.read_excel('./0_Genes_Mapping/Genes_Mapping.xlsx',sheetname='Sheet1',header=0,converters={'ENTREZ_GENE_ID':str,'HGNC_ID':str})
for index, row in Mapping_df.iterrows():
tfs_str = row['TF']
if isinstance(tfs_str,str):
tfs_list = tfs_str.split(', ')
else:
tfs_list = []
Mapping_df.set_value(index,'TF',tfs_list)
# Extract in a list all the names of the TFs contained in the mapping dataframe
mapping_df_TFs = []
for index, row in Mapping_df.iterrows():
tfs = row['TF']
if len(tfs) != 0:
for t in tfs:
if t not in mapping_df_TFs:
mapping_df_TFs.append(t)
# Create a reduced dataframe with all the distinct TFs and their encoding genes, filtering only the TFs of interest previously extracted
distinct_TFs = []
for index, row in Mapping_df.iterrows():
tfs = row['TF']
if len(tfs) != 0:
for t in tfs:
if t in TFs_interest:
if t not in distinct_TFs:
distinct_TFs.append(t)
from collections import defaultdict
dict_tf_gene = defaultdict(list)
for t in distinct_TFs:
dict_tf_gene[t] = []
for index, row in Mapping_df.iterrows():
tf = row['TF']
gene = row['GENE_SYMBOL']
for t in tf:
if t in distinct_TFs:
dict_tf_gene[t].append(gene)
TF_Gene_df = pd.DataFrame(list(dict_tf_gene.items()), columns=['TF_NAME', 'GENE_SYMBOL'])
for index, row in TF_Gene_df.iterrows():
genes = row['GENE_SYMBOL']
if len(genes) == 1:
new_gene = ''.join(genes)
TF_Gene_df.set_value(index,'GENE_SYMBOL',new_gene)
# Create a new empty dictionary with lists as values for each key (gene)
from collections import defaultdict
dict_RegulGenes = defaultdict(list)
# Set the keys and initialize their values as empty lists
for v in dict_GeneTF.keys():
dict_RegulGenes[v] = []
# Get the TFs of each target gene and extract the names of the genes encoding them from the mapping dataframe
for key, value in dict_GeneTF.items():
TFs = value[:-2] # the TFs are all the elements of the value list, except for the last two
for tf in TFs:
# for each TF, search in the mapping dataframe for the name of the encoding gene
if tf in mapping_df_TFs:
# get the name (GENE_SYMBOL) of the gene encoding the transcription factor "tf"
gene_name = TF_Gene_df.loc[TF_Gene_df['TF_NAME'] == tf, 'GENE_SYMBOL'].iloc[0]
# add the regulatory gene in correspondence of the proper gene in the dictionary
dict_RegulGenes[key].append(gene_name)
# in case the transcription factor considered is not mapped in the dataframe,
# then the name of its encoding gene is unknown ('n/a')
else: dict_RegulGenes[key].append('n/a')
# SUMMARY TABLE summarizing for each gene of interest the TFs binding to its promoters and their corresponding encoding genes:
# Each row of the table is indexed by the Gene Symbols of the genes of interest and progressive integers representing the number of TFs for each gene
genes_of_interest = []
for k in dict_GeneTF.keys():
genes_of_interest.append(k)
# Extract the highest number of regulatory genes for a single gene of interest
highest_n = 0
for k, v in dict_RegulGenes.items():
n = len(value)
if n > highest_n:
highest_n = n
top_range = highest_n + 100
# Define the number of rows in the table for each gene of interest
num_lst = []
for i in list(range(1,top_range)):
num_lst.append(i)
# Cartesian product to generate tuples for multi-indexing
import itertools
tuples = []
for i in itertools.product(genes_of_interest,num_lst):
tuples.append(i)
# Set the multiple indexes to be used in the dataframe
index = pd.MultiIndex.from_tuples(tuples, names=['GENE_SYMBOL', '#'])
# Create the dataframe and initialize the empty cells as empty strings
info_genes_of_interest = pd.DataFrame('', index = index, columns = ['Transcription Factors','Regulatory Genes','Entrez_Gene_IDs','ENTREZ_GENE_ID','GENE_SET','#TFs','#RegulatoryGenes (distinct)'])
# Set the correct Entrez Gene ID for each gene of interest
for index, row in info_genes_of_interest.iterrows():
sym = index[0]
n = index[1]
if n == 1:
eid = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == sym, 'ENTREZ_GENE_ID'].iloc[0]
info_genes_of_interest.loc[(sym, n),'ENTREZ_GENE_ID'] = eid
# Set the gene sets
for key, value in dict_GeneTF.items():
# get the list of gene sets associated to gene 'key'
# (i.e. the last element of the list related to gene 'key')
sets = value[-1]
# set the list of gene sets to the correct cell in the dataframe (in correspondence of index 'key')
n_path = len(sets)
if n_path == 1:
info_genes_of_interest.loc[(key, 1),'GENE_SET'] = sets[0]
if n_path == 2:
info_genes_of_interest.loc[(key, 1),'GENE_SET'] = sets[0]
info_genes_of_interest.loc[(key, 2),'GENE_SET'] = sets[1]
if n_path == 3:
info_genes_of_interest.loc[(key, 1),'GENE_SET'] = sets[0]
info_genes_of_interest.loc[(key, 2),'GENE_SET'] = sets[1]
info_genes_of_interest.loc[(key, 3),'GENE_SET'] = sets[2]
# Set the TFs
for key, value in dict_GeneTF.items():
# get the TFs (i.e. the list of values except for the last two elements)
tfs = value[:-2]
# set the list of TFs to the correct cell in the dataframe (in correspondence of index 'key')
for i in num_lst:
if i <= len(tfs):
info_genes_of_interest.loc[(key, i),'Transcription Factors'] = tfs[i-1]
# Set the regulatory genes (both with their Gene Symbols and Entrez Gene IDs)
for key, value in dict_RegulGenes.items():
# the set of regulatory genes is the list 'value' corresponding to each key (gene).
# Set the list of regulatory genes to the correct cell in the dataframe (in correspondence of index 'key')
for i in num_lst:
if i <= len(value):
info_genes_of_interest.loc[(key, i),'Regulatory Genes'] = value[i-1]
if value[i-1] == 'n/a':
eid = 'n/a'
else:
# get the Entrez Gene ID of the regulatory gene
eid = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == value[i-1], 'ENTREZ_GENE_ID'].iloc[0]
info_genes_of_interest.loc[(key, i),'Entrez_Gene_IDs'] = eid
# Remove the empty rows in the dataframe
for index, row in info_genes_of_interest.iterrows():
tfs = row['Transcription Factors']
path = row['GENE_SET']
if (tfs == '') & (path == ''):
info_genes_of_interest.drop(index, inplace=True)
# Extract the distinct candidate regulatory genes for each gene of interest:
# Remove from the dictionary containing regulatory genes the duplicated genes, if present, in order to have a dictionary with all the distinct candidate regulatory genes for each gene of interest
for v in dict_GeneTF.keys():
dict_RegulGenes[v] = []
for key, value in dict_GeneTF.items():
TFs = value[:-2]
for tf in TFs:
if tf in mapping_df_TFs:
gene_name = TF_Gene_df.loc[TF_Gene_df['TF_NAME'] == tf, 'GENE_SYMBOL'].iloc[0]
if gene_name not in dict_RegulGenes[key]:
dict_RegulGenes[key].append(gene_name)
# So, the general form of this second dictionary containing the information about regulatory genes is the following:
# dict_RegulGenes = {key: value, ...} = {GENE_SYMBOL: [REG_GENE1, REG_GENE2, REG_GENE3, ...]}, where each regulatory gene is identified by its GENE_SYMBOL
# Export the dictionary of genes of interest and their regulatory genes:
# Save the dictionary into a pickle file
pickle.dump(dict_RegulGenes, open('./2_Regulatory_Genes/dict_RegulGenes.p', 'wb'))
# Only for the sake of clearness, order alphabetically the list of candidate regulatory genes for each gene of interest
dict_RegulGenes_ord = dict_RegulGenes.copy()
for k in dict_RegulGenes_ord.keys():
old = dict_RegulGenes_ord[k]
sorted_genes = sorted(old)
dict_RegulGenes_ord[k] = sorted_genes
# Save the dictionary as a .xlsx file
workbook = xlsxwriter.Workbook('./2_Regulatory_Genes/dict_RegulGenes.xlsx')
worksheet = workbook.add_worksheet()
# Set the headers of the columns
worksheet.write(0,0,'GENE_SYMBOL')
worksheet.write(0,1,'ENTREZ_GENE_ID')
worksheet.write(0,2,'Distinct Regulatory Genes - GENE_SYMBOL')
worksheet.write(0,3,'Distinct Regulatory Genes - ENTREZ_GENE_ID')
row = 1
col = 0
for key in dict_RegulGenes_ord.keys():
row += 1
worksheet.write(row, col, key)
# get the ENtrez Gene ID of the gene of interest
eid = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == key, 'ENTREZ_GENE_ID'].iloc[0]
worksheet.write(row, col + 1, ''.join(eid))
for item in dict_RegulGenes_ord[key]:
worksheet.write(row, col + 2, ''.join(item))
# get the Entrez Gene ID of the regulatory gene
if item == 'PTRF':
entrez_id = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == 'CAVIN1', 'ENTREZ_GENE_ID'].iloc[0]
else:
entrez_id = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == item, 'ENTREZ_GENE_ID'].iloc[0]
worksheet.write(row, col + 3, ''.join(entrez_id))
row += 1
workbook.close()
# Save the dictionary as a .txt file
with open ('./2_Regulatory_Genes/dict_RegulGenes.txt', 'w') as fp:
for p in dict_RegulGenes_ord.items():
fp.write('%s : %s\n\n' % p)
# Count the number of TFs and distinct regulatory genes for each gene of interest:
# Store the number of TFs and distinct regulatory genes for each gene of interest in two dictionaries
from collections import defaultdict
dict_TFs_genes = defaultdict(int)
dict_regul_genes = defaultdict(int)
for k in dict_GeneTF.keys():
dict_TFs_genes[k] = 0
for k in dict_RegulGenes.keys():
dict_regul_genes[k] = 0
for k in dict_GeneTF.keys():
transcription_factors = dict_GeneTF[k][:-2]
number_TFs = len(transcription_factors)
dict_TFs_genes[k] = number_TFs
for k in dict_RegulGenes.keys():
genes = dict_RegulGenes[k]
number_genes = len(genes)
dict_regul_genes[k] = number_genes
# Create a table summarizing for each gene of interest the number of TFs binding to its promoters and the number of distinct genes encoding them
TFs_genes_df = pd.DataFrame(list(dict_TFs_genes.items()), columns=['GENE_SYMBOL', '#TFs'])
TFs_genes_df.set_index('GENE_SYMBOL', inplace=True)
regul_genes_df = pd.DataFrame(list(dict_regul_genes.items()), columns=['GENE_SYMBOL', '#RegulatoryGenes (distinct)'])
regul_genes_df.set_index('GENE_SYMBOL', inplace=True)
# Join the two dataframes into a single one to have both information together
TFs_regul_genes_df = TFs_genes_df.join(regul_genes_df)
TFs_regul_genes_df['GENE_SYMBOL'] = TFs_regul_genes_df.index
TFs_regul_genes_df.index = range(len(TFs_regul_genes_df)) # set a new progressive index for this table
# Add to the dataframe a column for storing also the Entrez Gene ID of each gene, besides the already present Gene Symbol
TFs_regul_genes_df['ENTREZ_GENE_ID'] = ''
# Add the correct Entrez Gene ID for each gene
for index, row in TFs_regul_genes_df.iterrows():
sym = row['GENE_SYMBOL']
eid = Mapping_df.loc[Mapping_df['GENE_SYMBOL'] == sym, 'ENTREZ_GENE_ID'].iloc[0]
TFs_regul_genes_df.set_value(index,'ENTREZ_GENE_ID',eid)
TFs_regul_genes_df_final = TFs_regul_genes_df[['GENE_SYMBOL','ENTREZ_GENE_ID','#TFs','#RegulatoryGenes (distinct)']].copy()
for index, row in TFs_regul_genes_df_final.iterrows():
gene = row['GENE_SYMBOL']
n_tfs = row['#TFs']
n_genes_reg = row['#RegulatoryGenes (distinct)']
info_genes_of_interest.loc[(gene, 1),'#TFs'] = n_tfs
info_genes_of_interest.loc[(gene, 1),'#RegulatoryGenes (distinct)'] = n_genes_reg
# Export the dataframe as a .xlsx file
writer = ExcelWriter('./2_Regulatory_Genes/Full_TFs-RegulatoryGenes_SUMMARY_Table.xlsx')
info_genes_of_interest.to_excel(writer,'Sheet1')
writer.save()
return dict_RegulGenes
|
nilq/baby-python
|
python
|
import math
import random
def print_n_whitespaces(n: int):
print(" " * n, end="")
def print_n_newlines(n: int):
for _ in range(n):
print()
def subroutine_1610():
B = 3 / A * random.random()
if B < 0.37:
C = 0.5
elif B < 0.5:
C = 0.4
elif B < 0.63:
C = 0.3
elif B < 0.87:
C = 0.2
else:
C = 0.1
T = math.floor(10 * C + 0.2)
print(f"THE {AS}{BS} DID A {LS[T]} JOB.")
if T >= 4:
if T == 5:
# 1800 & 1810 are unreachable, so it's not presented here
K = random.randint(1, 2)
if K == 1:
print(f"ONE OF THE {AS}{BS} WAS KILLED.")
elif K == 2:
print(f"NO {AS}{BS} WERE KILLED.")
else:
if AS != "TOREAD":
K = random.randint(1, 2)
print(f"{K} OF THE HORSES OF THE {AS}{BS} KILLED.")
K = random.randint(1, 2)
print(f"{K} OF THE {AS}{BS} KILLED.")
print()
return C
def FNC():
Q = (
4.5 + L / 6 - (D[1] + D[2]) * 2.5 + 4 * D[4] + 2 * D[5] - (D[3] ** 2) / 120 - A
) * random.random()
return Q
print_n_whitespaces(34)
print("BULL")
print_n_whitespaces(15)
print("CREATIVE COMPUTING MORRISTOWN, NEW JERSEY")
print_n_newlines(2)
L = 1
Z = input("DO YOU WANT INSTRUCTIONS? ")
if Z != "NO":
print("HELLO, ALL YOU BLOODLOVERS AND AFICIONADOS.")
print("HERE IS YOUR BIG CHANCE TO KILL A BULL.")
print()
print("ON EACH PASS OF THE BULL, YOU MAY TRY")
print("0 - VERONICA (DANGEROUS INSIDE MOVE OF THE CAPE)")
print("1 - LESS DANGEROUS OUTSIDE MOVE OF THE CAPE")
print("2 - ORDINARY SWIRL OF THE CAPE.")
print()
print("INSTEAD OF THE ABOVE, YOU MAY TRY TO KILL THE BULL")
print("ON ANY TURN: 4 (OVER THE HORNS), 5 (IN THE CHEST).")
print("BUT IF I WERE YOU,")
print("I WOULDN'T TRY IT BEFORE THE SEVENTH PASS.")
print()
print("THE CROWD WILL DETERMINE WHAT AWARD YOU DESERVE")
print("(POSTHUMOUSLY IF NECESSARY).")
print("THE BRAVER YOU ARE, THE BETTER THE AWARD YOU RECEIVE.")
print()
print("THE BETTER THE JOB THE PICADORES AND TOREADORES DO,")
print("THE BETTER YOUR CHANCES ARE.")
print_n_newlines(2)
D = {}
D[5] = 1
D[4] = 1
LS = ["", "SUPERB", "GOOD", "FAIR", "POOR", "AWFUL"]
A = random.randint(1, 5)
print(f"YOU HAVE DRAWN A {LS[A]} BULL.")
if A > 4:
print("YOU'RE LUCKY.")
elif A < 2:
print("GOOD LUCK. YOU'LL NEED IT.")
print()
print()
AS = "PICADO"
BS = "RES"
C = subroutine_1610()
D[1] = C
AS = "TOREAD"
BS = "ORES"
subroutine_1610()
D[2] = C
print_n_newlines(2)
D[3] = 0
while True:
D[3] = D[3] + 1 # 660
print(f"PASS NUMBER {D[3]}")
if D[3] >= 3:
while True: # 1930
AS = input("HERE COMES THE BULL. TRY FOR A KILL? ")
if AS not in ["YES", "NO"]:
print("INCORRECT ANSWER - - PLEASE TYPE 'YES' OR 'NO'.")
else:
break
Z1 = 1 if AS == "YES" else 2
if Z1 != 1:
print("CAPE MOVE? ", end="")
else:
pass
# goto 1130
else:
print("THE BULL IS CHARGING AT YOU! YOU ARE THE MATADOR--")
while True: # 1930
AS = input("DO YOU WANT TO KILL THE BULL? ")
if AS not in ["YES", "NO"]:
print("INCORRECT ANSWER - - PLEASE TYPE 'YES' OR 'NO'.")
else:
break
Z1 = 1 if AS == "YES" else 2
if Z1 != 1:
print("WHAT MOVE DO YOU MAKE WITH THE CAPE? ", end="")
else:
# goto 1130
pass
gore = 0
if Z1 != 1: # NO
while True:
E = float(input())
if E != float(int(abs(E))):
print("DON'T PANIC, YOU IDIOT! PUT DOWN A CORRECT NUMBER")
elif E < 3:
break
if E == 0:
M = 3
elif E == 1:
M = 2
else:
M = 0.5
L = L + M
F = (6 - A + M / 10) * random.random() / ((D[1] + D[2] + D[3] / 10) * 5)
if F < 0.51:
continue
gore = 1
else: # YES
print()
print("IT IS THE MOMENT OF TRUTH.")
print()
H = int(input("HOW DO YOU TRY TO KILL THE BULL? "))
if H not in [4, 5]:
print("YOU PANICKED. THE BULL GORED YOU.")
gore = 2
# goto 970
else:
K = (6 - A) * 10 * random.random() / ((D[1] + D[2]) * 5 * D[3])
if H == 4:
if K > 0.8:
gore = 1
else:
if K > 0.2:
gore = 1
if gore == 0:
print("YOU KILLED THE BULL!")
D[5] = 2
break
if gore > 0:
if gore == 1:
print("THE BULL HAS GORED YOU!")
death = False
while True:
_ = random.randint(1, 2) # 970
if _ == 1:
print("YOU ARE DEAD.")
D[4] = 1.5
# goto 1320
death = True
break
else:
print("YOU ARE STILL ALIVE.")
print()
print("DO YOU RUN FROM THE RING? ", end="")
while True: # 1930
AS = input()
if AS not in ["YES", "NO"]:
print("INCORRECT ANSWER - - PLEASE TYPE 'YES' OR 'NO'.")
else:
break
Z1 = 1 if AS == "YES" else 2
if Z1 == 2:
print("YOU ARE BRAVE. STUPID, BUT BRAVE.")
_ = random.randint(1, 2)
if _ == 1:
D[4] = 2
# goto 660, outter while loop
death = True
break
else:
print("YOU ARE GORED AGAIN!")
# goto 970
else:
print("COWARD")
D[4] = 0
# goto 1310, break outter while loop
death = True
break
if death == True:
break
# 1310
print_n_newlines(3)
if D[4] == 0:
print("THE CROWD BOOS FOR TEN MINUTES. IF YOU EVER DARE TO SHOW")
print("YOUR FACE IN A RING AGAIN, THEY SWEAR THEY WILL KILL YOU--")
print("UNLESS THE BULL DOES FIRST.")
else:
if D[4] == 2:
print("THE CROWD CHEERS WILDLY!")
elif D[5] == 2:
print("THE CROWD CHEERS!")
print()
print("THE CROWD AWARDS YOU")
if FNC() < 2.4:
print("NOTHING AT ALL.")
elif FNC() < 4.9:
print("ONE EAR OF THE BULL.")
elif FNC() < 7.4:
print("BOTH EARS OF THE BULL!")
print("OLE!")
else:
print("OLE! YOU ARE 'MUY HOMBRE'!! OLE! OLE!")
print()
print("ADIOS")
print_n_newlines(3)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by Chouayakh Mahdi
25/06/2010
The package contains functions to analyse all sentence of a utterance
Functions:
dispatching : to distribute the sentence
separ_sentence : to process the beginning of the sentence
exclama_sentence : to process exclamatively sentence
w_quest_where : to process many different type of where question
w_quest_class : to process what question about classification
w_quest_what : to process many different type of what question
w_quest_quant : to process many different type of how question
w_quest_how : to process many different type of how question
w_quest_which : to process which question
stc_start_subsentence : to process the subsentence at the beginning of the sentence
w_quest_whose : to process many different type of whose question
w_quest_whom : to process whom question
y_n_ques : to process the yes or no question from of a sentence
other_sentence : to process the other from of a sentence
sentences_analyzer : is the basic function of parsing
"""
from dialogs.sentence import *
from dialogs.sentence_types import *
from dialogs.resources_manager import ResourcePool
from . import analyse_nominal_group
from . import analyse_nominal_structure
from . import analyse_verb
from . import analyse_verbal_structure
from . import other_functions
from . import preprocessing
def dispatching(sentence):
"""
distributes the sentence according to:
Their functionality and their type
Input=sentence, beginning sentence list Output=class Sentence
"""
if len(sentence) > 0:
#For ending dialogue
if sentence[0].endswith('bye'):
return [Sentence(END, '', [], [])]
#When others
for x in ResourcePool().sentence_starts:
#If we find a knowing case
if sentence[0] == x[0]:
#For
if x[1] == '1':
return [Sentence(START, '', [], [])]
#It's a w_question or subsentence
if x[1] == '2':
#If there is which or no nominal group it is a question
if sentence[0] != 'which' and analyse_nominal_group.find_sn_pos(sentence, 1) != []:
#Here we have the condition of the subsentences
return [stc_start_subsentence(sentence)]
#For 'when'
if x[2] == '1':
#If we remove the first word => it becomes like y_n_question
return [y_n_ques(W_QUESTION, 'date', sentence[1:])]
#For 'where'
elif x[2] == '2':
return [w_quest_where(W_QUESTION, 'place', sentence)]
#For 'what'
elif x[2] == '3':
#Here we have to use a specific processing for 'type' and 'kind'
if sentence[1] == 'type' or sentence[1] == 'kind':
#We start by processing the end of the sentence like a y_n_question
return [w_quest_class(sentence)]
#For other type of 'what' question
else:
return [w_quest_what(W_QUESTION, sentence)]
#For 'how'
elif x[2] == '4':
if sentence[1] == 'many' or sentence[1] == 'much':
return [w_quest_quant(W_QUESTION, 'quantity', sentence)]
elif sentence[1] == 'about':
#We replace 'about' by 'is' to have a y_n_question
sentence[1] = 'is'
return [y_n_ques(W_QUESTION, 'invitation', sentence[1:])]
#For other type of 'how' question
else:
return [w_quest_how(W_QUESTION, sentence)]
#For 'why'
elif x[2] == '5':
return [y_n_ques(W_QUESTION, 'reason', sentence[1:])]
#For 'whose'
elif x[2] == '6':
return [w_quest_whose(W_QUESTION, 'owner', sentence)]
#For 'who'
elif x[2] == '7':
return [y_n_ques(W_QUESTION, 'people', sentence[1:])]
#For 'which'
elif x[2] == '8':
return [w_quest_which(W_QUESTION, 'choice', sentence[1:])]
#For 'to whom'
elif x[2] == '9':
return [w_quest_whom(W_QUESTION, 'people', sentence[1:])]
#It's a y_n_question
elif x[1] == '3':
return [y_n_ques(YES_NO_QUESTION, '', sentence)]
#It's a conditional sentence
elif x[1] == '4':
return [stc_start_subsentence(sentence)]
#Agree
elif x[1] == '5':
return separ_sentence(sentence, AGREEMENT)
#Disagree
elif x[1] == '6':
return separ_sentence(sentence, DISAGREEMENT)
#Gratulation
elif x[1] == '7':
return separ_sentence(sentence, GRATULATION)
#Interjunction
elif x[1] == '8':
return [exclama_sentence(sentence)]
#For exclamatively
if sentence[len(sentence) - 1] == '!':
return [exclama_sentence(sentence)]
#It's a statement or an imperative sentence
return [other_sentence('', '', sentence)]
#Default case
return []
def separ_sentence(sentence, data_type):
"""
process the beginning of the sentence
Input=the sentence Output=class Sentence
"""
#If we have good followed by another word it can be start
if data_type == AGREEMENT and len(sentence) > 1 and (
sentence[1] == 'morning' or sentence[1] == 'evening' or sentence[1] == 'afternoon'):
sentences = [Sentence(START, '', [], [])]
else:
#init
sentences = [Sentence(data_type, '', [], [])]
for i in sentence:
if i == ';':
#We put the first sentence in the aim
sentences[0].aim = " ".join(sentence[:sentence.index(i)]).rstrip('; ') + '.'
sentence = sentence[sentence.index(i) + 1:]
#We process the end of the sentence as a complete sentence
sentence = preprocessing.process_and_beginning_sentence(sentence)
sentences = sentences + dispatching(sentence)
break
else:
#In this case, it is the end of the sentence
sentences[0].aim = " ".join(sentence).rstrip('. ') + '.'
return sentences
def exclama_sentence(sentence):
"""
process exclamatively sentence
Input=the sentence Output=class Sentence
"""
for i in ResourcePool().sentence_starts:
if i[0] == sentence[0]:
if i[1] == '0':
analysis = Sentence(INTERJECTION, '', [], [])
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 1)
return analysis
elif i[1] == '2':
#It is an exclamation sentence
analysis = Sentence(EXCLAMATION, '', [], [])
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 0)
return analysis
#If we have an imperative it can be forced
analysis = other_sentence(INTERJECTION, '', sentence)
if analysis.data_type == INTERJECTION and not analysis.sv:
pass
else:
analysis.data_type = IMPERATIVE
return analysis
def w_quest_where(type, request, stc):
"""
process many different type of where question
Input=type and requesting of sentence, the sentence Output=class Sentence
"""
#If there is 'form' at the end => question about the origin
if stc[len(stc) - 1] == 'from' or (stc[len(stc) - 1] == '?' and stc[len(stc) - 2] == 'from'):
#If we remove the first word => it becomes like y_n_question
return y_n_ques(type, 'origin', stc[1:])
else:
#If we remove the first word => it becomes like y_n_question
return y_n_ques(type, request, stc[1:])
def w_quest_class(sentence):
"""
process what question about classification
Input=sentence Output=class Sentence
"""
analysis = y_n_ques(W_QUESTION, 'classification' + '+' + sentence[4], sentence[5:])
if analysis.sn:
#The direct object must be empty
if analysis.sv[0].d_obj:
analysis.sv[0].i_cmpl = analysis.sv[0].i_cmpl + [IndirectComplement([], analysis.sv[0].d_obj)]
analysis.sv[0].d_obj = []
return analysis
def w_quest_what(type, sentence):
"""
process many different type of what question
Input=type of sentence, the sentence and position of subject
Output=class Sentence
"""
aux_list = other_functions.recover_aux_list()
if sentence[1] in aux_list:
#We start with a processing with the function of y_n_question's case
analysis = y_n_ques(type, 'thing', sentence[1:])
vg = analysis.sv[0]
#The case when we have 'happen'
if analysis.sv[0].vrb_main[0].endswith('happen'):
analysis.aim = 'situation'
#The case when we have 'think'
elif analysis.sv[0].vrb_main[0].endswith('think+of') or analysis.sv[0].vrb_main[0].endswith('think+about'):
analysis.aim = 'opinion'
#The case when we have 'like' + conditional
elif analysis.sv[0].vrb_main[0].endswith('like') and not (analysis.sv[0].vrb_tense.endswith('conditional')):
analysis.aim = 'description'
#The case when we have 'do' + ing form
elif vg.vrb_main[0].endswith('do') and \
vg.i_cmpl != [] and \
vg.i_cmpl[0].gn[0].adj != [] and \
vg.i_cmpl[0].gn[0].adj[0][0].endswith('ing'):
analysis.aim = 'explication'
#There is a noun before the auxiliary
else:
#We will use the same code as the which questions
sentence = ['the'] + sentence[1:]
#We need to have a nominal group at the beginning
analysis = w_quest_which(type, 'thing', sentence)
return analysis
def w_quest_quant(type, request, sentence):
"""
process many different type of quantity question
Input=type and requesting of sentence, the sentence and beginning sentence list
Output=class Sentence
"""
for j in ResourcePool().sentence_starts:
if sentence[2] == j[0]:
if j[1] == '3':
#This case is the same with y_n_question
return y_n_ques(type, request, sentence[2:])
analysis = y_n_ques(type, request, sentence[3:])
#There is not sn in the sentence
if not analysis.sn:
analysis.sn = [NominalGroup(['a'], [sentence[2]], [], [], [])]
else:
#There is not direct object in the sentence
analysis.sv[0].d_obj = [NominalGroup(['a'], [sentence[2]], [], [], [])]
return analysis
def w_quest_how(type, sentence):
"""
process many different type of how question
Input=type of sentence, the sentence Output=class Sentence
"""
aux_list = other_functions.recover_aux_list()
if sentence[1] in aux_list:
analysis = y_n_ques(type, 'manner', sentence[1:])
#The case when we have 'do' + ing form
if analysis.sv[0].vrb_main[0].endswith('like'):
analysis.aim = 'opinion'
return analysis
analysis = y_n_ques(type, sentence[1], sentence[2:])
return analysis
def w_quest_which(type, request, sentence):
"""
process which question
Input=type of sentence, the sentence Output=class Sentence
"""
#We start by finding the nominal group
gr = preprocessing.determination_nominal_group(sentence, 0, 'of')
#If the nominal group contain just 2 elements
if len(gr) == 2:
return y_n_ques(type, sentence[1], sentence[2:])
else:
#After the first gr if there is no nominal group
if not analyse_nominal_group.find_sn_pos(sentence, len(gr)):
for i in ResourcePool().sentence_starts:
#If just after we have an a auxiliary
if sentence[len(gr)] == i[0] and i[1] == '3':
#With subject => it is a yes or no question form
if analyse_nominal_group.find_sn_pos(sentence, len(gr) + 1):
analysis = y_n_ques(type, request, sentence[len(gr):])
nominal_gr = other_sentence(type, request, gr)
analysis.sv[0].d_obj = nominal_gr.sn
return analysis
#Else it is like a statement
return other_sentence(type, request, sentence)
#Else if not, the first nominal group is the subject
else:
analysis = other_sentence(type, request, sentence[len(gr):])
nominal_gr = other_sentence(type, request, gr)
analysis.sv[0].d_obj = nominal_gr.sn
return analysis
def stc_start_subsentence(sentence):
"""
process the subsentence at the beginning of the sentence
Input=sentence Output=class Sentence
"""
#We have to add punctuation if there is not
if sentence[len(sentence) - 1] != '.' and sentence[len(sentence) - 1] != '?' and sentence[len(sentence) - 1] != '!':
sentence = sentence + ['.']
#We recover the subsentence
for i in sentence:
if i == ';' or i == '.' or i == '?' or i == '!':
subsentence = sentence[1:sentence.index(i)]
#We perform the 2 processing
if sentence.index(i) != len(sentence) - 1:
analysis = other_sentence(STATEMENT, '', sentence[sentence.index(i) + 1:])
else:
vg = VerbalGroup([], [], '', [], [], [], [], VerbalGroup.affirmative, [])
analysis = Sentence('', '', [], [vg])
break
#We process the subsentence
analysis.sv[0].vrb_sub_sentence = analysis.sv[0].vrb_sub_sentence + dispatching(subsentence)
if analysis.sv[0].vrb_sub_sentence:
analysis.sv[0].vrb_sub_sentence[len(analysis.sv[0].vrb_sub_sentence) - 1].data_type = SUBSENTENCE + '+' + \
analysis.sv[
0].vrb_sub_sentence[
len(analysis.sv[
0].vrb_sub_sentence) - 1].data_type
if sentence[0][0] == ':':
analysis.sv[0].vrb_sub_sentence[len(analysis.sv[0].vrb_sub_sentence) - 1].aim = sentence[0][1:]
else:
analysis.sv[0].vrb_sub_sentence[len(analysis.sv[0].vrb_sub_sentence) - 1].aim = sentence[0]
return analysis
def w_quest_whose(type, request, sentence):
"""
process many different type of whose question
Input=type and requesting of sentence and the sentence
Output=class Sentence
"""
#init
vg = VerbalGroup(['be'], [], '', [], [], [], [], VerbalGroup.affirmative, [])
analysis = Sentence(type, request, [], [])
#We replace 'whose' by 'that' to have a nominal group
sentence[0] = 'that'
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 0)
if sentence[1] == 'not':
vg.state = 'negative'
analysis.sv = [vg]
return analysis
def w_quest_whom(type, request, sentence):
"""
process whom question
Input=type and requesting of sentence and the sentence
Output=class Sentence
"""
#It is the same with yes or no question
analysis = y_n_ques(type, request, sentence)
#We have to add 'to' to the verb
analysis.sv[0].vrb_main[0] += '+to'
return analysis
def y_n_ques(type, request, sentence):
"""
process the yes or no question from of a sentence
Input=type and requesting of sentence and the sentence
Output=class Sentence
"""
#init
vg = VerbalGroup([], [], '', [], [], [], [], VerbalGroup.affirmative, [])
analysis = Sentence(type, request, [], [])
modal = []
stc = sentence
#We start with determination of probably second verb in subsentence
sentence = other_functions.find_scd_verb_sub(sentence)
#We have to add punctuation if there is not
if sentence == [] or sentence[0] == '.' or sentence[0] == '?' or sentence[0] == '!':
#We have probably the aim as an adverb
analyse_verbal_structure.find_adv([request], vg)
analysis.aim = 'thing'
analysis.sv = [vg]
return analysis
#We recover the auxiliary
aux = sentence[0]
#We have to know if there is a modal
if aux in ResourcePool().modal:
modal = aux
#If we have a negative form
if sentence[1] == 'not':
vg.state = VerbalGroup.negative
#We remove 'not'
sentence = sentence[:1] + sentence[2:]
#Wrong is a noun but not followed by the determinant
if sentence[1] == 'wrong' and request == 'thing':
analysis.sn = [NominalGroup([], [], ['wrong'], [], [])]
sentence = [sentence[0]] + sentence[2:]
#In this case we have an imperative sentence
elif analyse_nominal_group.find_sn_pos(sentence, 1) == [] and type != W_QUESTION:
#We have to reput the 'not'
if vg.state == VerbalGroup.negative:
sentence = sentence[:1] + ['not'] + sentence[1:]
return other_sentence(type, request, sentence)
#We delete the auxiliary
sentence = sentence[1:]
#We have to separate the case using these, this or there
if sentence[0] in ResourcePool().demonstrative_det and analyse_verb.infinitive([aux], 'present simple') == ['be']:
#If we have a verb or an adverb just after (if not, we have a noun)
if sentence[0].endswith('ed') or sentence[0].endswith('ing') or sentence[0].endswith('ly') or sentence[
0] in ResourcePool().adverbs:
#We recover this information and remove it
analysis.sn = [NominalGroup([sentence[0]], [], [], [], [])]
if sentence[0] == 'there' and aux == 'are':
analysis.sn[0]._quantifier = 'SOME'
sentence = sentence[1:]
if not analysis.sn:
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 0)
if aux == 'do' and not analyse_verbal_structure.can_be_imperative(sentence):
return other_sentence('', '', stc)
#If there is one element => it is an auxiliary => verb 'be'
if len(sentence) == 0:
vg.vrb_tense = analyse_verb.find_tense_statement(aux)
vg.vrb_main = ['be']
else:
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg.vrb_tense = analyse_verb.find_tense_question(sentence, aux)
#We process the verb
verb = analyse_verb.find_verb_question(sentence, aux, vg.vrb_tense)
verb_main = analyse_verb.return_verb(sentence, verb, vg.vrb_tense)
vg.vrb_main = [other_functions.convert_to_string(verb_main)]
#We delete the verb if the aux is not the verb 'be'
if vg.vrb_main != ['be']:
sentence = sentence[sentence.index(verb[0]) + len(verb_main):]
elif sentence[0] == 'be':
sentence = sentence[1:]
#Here we have special processing for different cases
if sentence:
#For 'what' descrition case
if sentence[0] == 'like' and aux != 'would':
vg.vrb_main = ['like']
sentence = sentence[1:]
#For 'how' questions with often
elif sentence[0].endswith('ing') and not (sentence[0].endswith('thing')):
vg.vrb_main[0] = vg.vrb_main[0] + '+' + sentence[0]
sentence = sentence[1:]
#We recover the conjunctive subsentence
sentence = analyse_verbal_structure.process_conjunctive_sub(sentence, vg)
#It verifies if there is a secondary verb
sec_vrb = analyse_verbal_structure.find_scd_vrb(sentence)
if sec_vrb:
sentence = analyse_verbal_structure.process_scd_sentence(sentence, vg, sec_vrb)
#We recover the subsentence
sentence = analyse_verbal_structure.process_subsentence(sentence, vg)
#Process relative changes
sentence = analyse_verbal_structure.correct_i_compl(sentence, vg.vrb_main[0])
sentence = analyse_verbal_structure.process_compare(sentence, vg)
sentence = analyse_nominal_group.find_plural(sentence)
#We recover the direct, indirect complement and the adverbial
sentence = analyse_verbal_structure.recover_obj_iobj(sentence, vg)
#We have to take off adverbs form the sentence
sentence = analyse_verbal_structure.find_adv(sentence, vg)
#We perform the processing with the modal
if modal:
vg.vrb_main = [modal + '+' + vg.vrb_main[0]]
#If there is a forgotten
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
#In case there is a state verb followed by an adjective
sentence = analyse_verbal_structure.state_adjective(sentence, vg)
#We have to correct the mistake of the subject
for p in ResourcePool().demonstrative_det:
if analysis.sn and analysis.sn[0].det == [p] and analysis.sn[0].noun == []:
if sentence != [0] and sentence[0] == '.' and sentence[0] == '?' and sentence[0] == '!':
if sentence[0] in ResourcePool().proposals:
pass
else:
analysis.sn[0].noun = [sentence[0]]
sentence = sentence[1:]
sentence = analyse_verbal_structure.state_adjective(sentence, vg)
vg = analyse_verbal_structure.DOC_to_IOC(vg)
while len(sentence) > 1:
stc = analyse_verbal_structure.create_nom_gr(sentence, request)
#We recover the direct, indirect complement and the adverbial
stc = analyse_verbal_structure.recover_obj_iobj(stc, vg)
if stc == sentence:
#We leave the loop
break
else:
sentence = stc
vg = analyse_verbal_structure.refine_indirect_complement(vg)
vg = analyse_verbal_structure.refine_subsentence(vg)
vg = analyse_verbal_structure.DOC_to_IOC(vg)
analysis.sv = [vg]
return analysis
def other_sentence(type, request, sentence):
"""
process the other from of a sentence
Input=type and requesting of sentence and the sentence
Output=class Sentence
"""
#init
vg = VerbalGroup([], [], '', [], [], [], [], VerbalGroup.affirmative, [])
analysis = Sentence(type, request, [], [])
modal = []
if not sentence:
return []
#We have to add punctuation if there is not
if sentence[len(sentence) - 1] not in ['.', '?', '!']:
sentence = sentence + ['.']
#We start with determination of probably second verb in subsentence
sentence = other_functions.find_scd_verb_sub(sentence)
#We search the subject
sbj = analyse_nominal_group.find_sn_pos(sentence, 0)
if sbj != [] or type == RELATIVE:
#If we haven't a data type => it is a statement
if type == '':
analysis.data_type = STATEMENT
#We have to separate the case using these, this or there
if sentence[0] in ResourcePool().demonstrative_det and analyse_verb.infinitive([sentence[1]],
'present simple') == ['be']:
#We recover this information and remove it
analysis.sn = [NominalGroup([sentence[0]], [], [], [], [])]
if sentence[0] == 'there' and sentence[1] == 'are':
analysis.sn[0]._quantifier = 'SOME'
sentence = sentence[1:]
if not analysis.sn:
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 0)
#End of the sentence? -> nominal sentence
if sentence == [] or sentence[0] in ['.', '!', '?']:
analysis.sv = []
return analysis
#We have to know if there is a modal
if sentence[0] in ResourcePool().modal:
modal = sentence[0]
if modal == 'can' or modal == 'must' or modal == 'shall' or modal == 'may':
sentence = sentence[1:]
#We must take into account all possible cases to recover the sentence's tense
if len(sentence) > 1 and sentence[1] == 'not':
vg.state = VerbalGroup.negative
#Before the negative form we have an auxiliary for the negation
if sentence[0] == 'do' or sentence[0] == 'does' or sentence[0] == 'did':
vg.vrb_tense = analyse_verb.find_tense_statement([sentence[0]])
sentence = sentence[2:]
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
#There is a modal
elif modal:
sentence = [sentence[0]] + sentence[2:]
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg.vrb_tense = analyse_verb.find_tense_statement(sentence)
else:
#We remove 'not' and find the tense
sentence = sentence[:1] + sentence[2:]
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg.vrb_tense = analyse_verb.find_tense_statement(sentence)
#For the affirmative processing
else:
if sentence[0] == 'not':
vg.state = VerbalGroup.negative
sentence = sentence[1:]
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg.vrb_tense = analyse_verb.find_tense_statement(sentence)
verb = analyse_verb.find_verb_statement(sentence, vg.vrb_tense)
verb_main = analyse_verb.return_verb(sentence, verb, vg.vrb_tense)
vg.vrb_main = [other_functions.convert_to_string(verb_main)]
#We delete the verb
sentence = sentence[sentence.index(verb[0]) + len(verb_main):]
#We perform the processing with the modal
if modal:
vg.vrb_main = [modal + '+' + vg.vrb_main[0]]
#This is a imperative form
else:
#re-init
analysis.data_type = IMPERATIVE
vg.vrb_tense = 'present simple'
if sentence[0] in ResourcePool().proposals:
sentence = ['.'] + sentence
#Negative form
if sentence[1] == 'not':
sentence = sentence[sentence.index('not') + 1:]
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg.state = VerbalGroup.negative
else:
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
#We process the verb
verb = [sentence[0]]
verb_main = analyse_verb.return_verb(sentence, verb, vg.vrb_tense)
vg.vrb_main = [other_functions.convert_to_string(verb_main)]
#We delete the verb
sentence = sentence[sentence.index(verb[0]) + len(verb_main):]
if sentence and sentence[-1] == '?':
analysis.data_type = YES_NO_QUESTION
#We recover the conjunctive subsentence
sentence = analyse_verbal_structure.process_conjunctive_sub(sentence, vg)
#It verifies if there is a secondary verb
sec_vrb = analyse_verbal_structure.find_scd_vrb(sentence)
if sec_vrb:
sentence = analyse_verbal_structure.process_scd_sentence(sentence, vg, sec_vrb)
#We recover the subsentence
sentence = analyse_verbal_structure.process_subsentence(sentence, vg)
if sentence != [] and vg.vrb_main != []:
#Process relative changes
sentence = analyse_verbal_structure.correct_i_compl(sentence, vg.vrb_main[0])
sentence = analyse_verbal_structure.process_compare(sentence, vg)
sentence = analyse_nominal_group.find_plural(sentence)
#We recover the direct, indirect complement and the adverbial
sentence = analyse_verbal_structure.recover_obj_iobj(sentence, vg)
#We have to take off abverbs form the sentence
sentence = analyse_verbal_structure.find_adv(sentence, vg)
#In case there is a state verb followed by an adjective
sentence = analyse_verbal_structure.state_adjective(sentence, vg)
#If there is a forgotten
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg = analyse_verbal_structure.DOC_to_IOC(vg)
while len(sentence) > 1:
stc = analyse_verbal_structure.create_nom_gr(sentence, request)
#We recover the direct, indirect complement and the adverbial
stc = analyse_verbal_structure.recover_obj_iobj(stc, vg)
if stc == sentence:
#We leave the loop
break
else:
sentence = stc
vg = analyse_verbal_structure.refine_indirect_complement(vg)
vg = analyse_verbal_structure.refine_subsentence(vg)
vg = analyse_verbal_structure.DOC_to_IOC(vg)
analysis.sv = [vg]
return analysis
def sentences_analyzer(sentences):
"""
This function is the basic function of parsing
Input=list of sentences and beginning sentence list
Output=list of class Sentence
"""
#init
class_sentence_list = []
nom_gr = []
y = 0
#We process all sentences of the list
for i in sentences:
if i:
#We have to add punctuation if there is not
if i[-1] not in ['.', '?', '!']:
i = i + ['.']
class_sentence_list = class_sentence_list + dispatching(i)
#Add some information if there is an interjection
for s in class_sentence_list:
#If there is an interjection we have to take the nominal group
if s.data_type == INTERJECTION:
nom_gr = s.sn
#If there is an imperative sentence, we put the nominal group of interjection in the subject
if nom_gr != [] and s.data_type == IMPERATIVE:
s.sn = s.sn + nom_gr
#To simplify the interpretation, we have to perform some changes
for k in class_sentence_list:
#If subject is 'there', we change it by the object
if k.sn != [] and k.sn[0].det == ['there']:
k.sn = k.sv[0].d_obj
k.sv[0].d_obj = []
#If sentence is empty, we take off the verb
if k.sv != [] and (k.sv[0].vrb_main == ['.'] or k.sv[0].vrb_main == ['?'] or k.sv[0].vrb_main == ['!']):
k.sv[0].vrb_main = []
if k.data_type == IMPERATIVE:
k.data_type = STATEMENT
#If we have imperative with verb 'see' => end
if k.data_type == IMPERATIVE and \
k.sv[0].vrb_main == ['see'] and \
len(k.sv[0].d_obj) > 0 and \
k.sv[0].d_obj[0].noun == ['you']:
k.data_type = END
k.aim = ''
k.sv = []
k.sn = []
return class_sentence_list
|
nilq/baby-python
|
python
|
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
#
#
# @param head ListNode类
# @param k int整型
# @return ListNode类
#
class Solution:
def reverseKGroup(self , head , k ):
def reverse(a,b):
pre = None
cur = a
while cur != b:
nex = cur.next
cur.next = pre
pre = cur
cur = nex
return pre
a,b = head,head
for i in range(k):
if not b: return head
b = b.next
newHead = reverse(a,b)
a.next = self.reverseKGroup(b,k)
return newHead
|
nilq/baby-python
|
python
|
from .bignet import BigHouseModel
from .goal import BigGoalHouseModel, AuxiliaryBigGoalHouseModel
|
nilq/baby-python
|
python
|
from modules import util
from modules.util import Failed
logger = util.logger
builders = ["stevenlu_popular"]
base_url = "https://s3.amazonaws.com/popular-movies/movies.json"
class StevenLu:
def __init__(self, config):
self.config = config
def get_stevenlu_ids(self, method):
if method == "stevenlu_popular":
logger.info(f"Processing StevenLu Popular Movies")
return [(i["imdb_id"], "imdb") for i in self.config.get_json(base_url)]
else:
raise Failed(f"StevenLu Error: Method {method} not supported")
|
nilq/baby-python
|
python
|
# used as reference version, for comparison/correctness
import numpy as np
from timecheck import inittime, timecheck
from neoncl.util import math_helper
def calcU(W):
Ci = W.shape[0]
kH = W.shape[1]
kW = W.shape[2]
Co = W.shape[3]
G = np.array([[1/4,0,0],
[-1/6,-1/6,-1/6],
[-1/6,1/6,-1/6],
[1/24,1/12,1/6],
[1/24,-1/12,1/6],
[0,0,1]], dtype=np.float32)
Wfull = W
U2 = np.zeros((6, 6, Co, Ci), dtype=np.float32)
Utmp = np.zeros((6, 3), dtype=np.float32)
U = np.zeros((6, 6), dtype=np.float32) # transformed filter
timecheck('allocaed U')
for co in range(Co):
for ci in range(Ci):
W = Wfull[ci,:,:,co].reshape(3,3)
#for i in range(3):
#Utmp[0][i] = 1/4 * W[0][i]
#Utmp[1][i] = - 1/6 * (W[0][i] + W[1][i] + W[2][i])
#Utmp[2][i] = - 1/6 *W[0][i] + 1/6 * W[1][i] - 1/6 * W[2][i]
#Utmp[3][i] = 1/24 * W[0][i] + 1/12 * W[1][i] + 1/6 * W[2][i]
#Utmp[4][i] = 1/24 * W[0][i] - 1/12 * W[1][i] + 1/6 * W[2][i]
#Utmp[5][i] = W[2][i]
Utmp = G.dot(W)
#for i in range(6):
#U[i][0] = 1/4 * Utmp[i][0]
#U[i][1] = - 1/6 * Utmp[i][0] - 1/6 * Utmp[i][1] - 1/6 * Utmp[i][2]
#U[i][2] = - 1/6 * Utmp[i][0] + 1/ 6 * Utmp[i][1] - 1 / 6 * Utmp[i][2]
#U[i][3] = 1/24 * Utmp[i][0] + 1/12 * Utmp[i][1] + 1/6 * Utmp[i][2]
#U[i][4] = 1/24 * Utmp[i][0] - 1/12 * Utmp[i][1] + 1/6 * Utmp[i][2]
#U[i][5] = Utmp[i][2]
U = Utmp.dot(G.T)
U2[:,:,co,ci] = U
timecheck('calced U2')
# layout:
# [xi, nu, co, ci]
return U2
def calcV(I):
Ifull = I
Ci = I.shape[0]
iH = I.shape[1]
iW = I.shape[2]
N = I.shape[3]
tiles = iW // 4
oH = iH
oW = iW
padH = 1
padW = 1
BT = np.array([[4,0,-5,0,1,0],
[0,-4,-4,1,1,0],
[0,4,-4,-1,1,0],
[0,-2,-1,2,1,0],
[0,2,-1,-2,1,0],
[0,4,0,-5,0,1]], dtype=np.float32)
V2 = np.zeros((N, 6, 6, Ci, tiles, tiles), dtype=np.float32)
timecheck('allocaed V2')
for n in range(N):
V = np.zeros((6, 6), dtype=np.float32) # transformed image
Vtmp = np.zeros((6,6), dtype=np.float32)
for th in range(tiles):
hstart = -1 + 4 * th
hend = hstart + 6 - 1
hstarttrunc = max(0, hstart)
hendtrunc = min(hend, iH - 1)
hstartoffset = hstarttrunc - hstart
hendoffset = hendtrunc - hstart
for tw in range(tiles):
wstart = -1 + 4 * tw
wend = wstart + 6 - 1
wstarttrunc = max(0, wstart)
wendtrunc = min(wend, iW - 1)
wstartoffset = wstarttrunc - wstart
wendoffset = wendtrunc - wstart
Ipadded = np.zeros((6, 6), dtype=np.float32)
for ci in range(Ci):
Ipadded[hstartoffset:hendoffset + 1,wstartoffset:wendoffset + 1] = Ifull[ci,hstarttrunc:hendtrunc+1,wstarttrunc:wendtrunc+1,n]
I = Ipadded
#for i in range(6):
#Vtmp[0][i] = + 4 * I[0][i] - 5 * I[2][i] + I[4][i]
#Vtmp[1][i] = - 4 * I[1][i] - 4 * I[2][i] + I[3][i] + I[4][i]
#Vtmp[2][i] = + 4 * I[1][i] - 4 * I[2][i] - I[3][i] + I[4][i]
#Vtmp[3][i] = - 2 * I[1][i] - I[2][i] + 2 * I[3][i] + I[4][i]
#Vtmp[4][i] = + 2 * I[1][i] - I[2][i] - 2 * I[3][i] + I[4][i]
#Vtmp[5][i] = + 4 * I[1][i] - 5 * I[3][i] + I[5][i]
Vtmp = BT.dot(I)
# each i is a row of V
#for i in range(6):
#V[i][0] = + 4 * Vtmp[i][0] - 5 * Vtmp[i][2] + Vtmp[i][4]
#V[i][1] = - 4 * Vtmp[i][1] - 4 * Vtmp[i][2] + Vtmp[i][3] + Vtmp[i][4]
#V[i][2] = + 4 * Vtmp[i][1] - 4 * Vtmp[i][2] - Vtmp[i][3] + Vtmp[i][4]
#V[i][3] = - 2 * Vtmp[i][1] - Vtmp[i][2] + 2 * Vtmp[i][3] + Vtmp[i][4]
#V[i][4] = + 2 * Vtmp[i][1] - Vtmp[i][2] - 2 * Vtmp[i][3] + Vtmp[i][4]
#V[i][5] = + 4 * Vtmp[i][1] - 5 * Vtmp[i][3] + Vtmp[i][5]
V2[n, :,:,ci,th,tw] = Vtmp.dot(BT.T)
timecheck('calced V')
return V2
def calcM(N, Co, U, V):
GK = U.shape[2]
Ci = U.shape[3]
tiles = V.shape[3]
GN = V.shape[2]
print('calcM cpu GN', GN, 'N', N)
U = U.transpose(0,1,2,4,3).reshape(6,6,GK * 32,Ci)[:,:,:Co,:]
V = V.transpose(
2,6,0,1,5,3,4).reshape(
GN * 32, 6, 6, Ci, tiles, tiles)[:N]
M = np.zeros((N, Co, tiles, tiles, 6, 6), dtype=np.float32)
for n in range(N):
for xi in range(6):
for nu in range(6):
M[n,:, :, :, xi, nu] = np.tensordot(U[xi,nu], V[n,xi,nu], 1)
timecheck('calced M')
return M
def calcM_blocked_l2(U, V, axes):
R1 = np.tensordot(U, V, axes)
return R1
def calcM_blocked_l1(N, Co, U, V):
GK = U.shape[2]
Ci = U.shape[3]
tiles = V.shape[3]
GN = V.shape[2]
M = np.zeros((GN, 32, GK, 32, tiles, tiles, 6, 6), dtype=np.float32)
# new layouts:
# U
# [xi, nu, co // 32, ci, co % 32]
# V
# [xi, nu, n // 32, th, tw, ci, n % 32]
# each block:
# U [ci, co % 32]
# V [ci, ni % 32]
N_blocksize = 32
ci_blocksize = 32
Co_blocksize = 32
printed_size = False
for Co_block in range(GK):
U_block = U[:,:,Co_block]
for N_block in range(GN):
for th in range(tiles):
for tw in range(tiles):
V_block = V[:, :, N_block, th, tw]
M_block = M[N_block, :, Co_block, :, th, tw]
for mh in range(6):
for mw in range(6):
left = U_block[mh,mw]
right = V_block[mh,mw]
if not printed_size:
printed_size = True
print('left.shape', left.shape, 'right.shape', right.shape)
src = calcM_blocked_l2(left, right, ([0], [0]))
dst = M_block[:, :, mh, mw]
dst[:] = src.T
M = M.reshape(GN * 32, GK * 32, tiles, tiles, 6, 6)
M = M[:N, :Co]
timecheck('calced M')
return M
def calcO(M):
N = M.shape[0]
Co = M.shape[1]
tiles = M.shape[2]
oH = tiles * 4 # is this always true? anyway, it's true for now...
oW = tiles * 4
O = np.zeros((Co, oH, oW, N), dtype=np.float32)
Mfull = M
Ofull = O
AT = np.array([[1,1,1,1,1,0],
[0,1,-1,2,-2,0],
[0,1,1,4,4,0],
[0,1,-1,8,-8,1]], dtype=np.float32)
timecheck('allocated AT')
# inverse transform
Otmp = np.zeros((4, 6), dtype=np.float32)
for n in range(N):
for co in range(Co):
for th in range(tiles):
for tw in range(tiles):
O = Ofull[co,th * 4:(th+1)*4,tw*4:(tw+1)*4,n]
M = Mfull[n, co, th, tw]
#for i in range(6):
#Otmp[0][i] = M[0][i] + M[1][i] + M[2][i] + M[3][i] + M[4][i]
#Otmp[1][i] = + M[1][i] - M[2][i] + 2 * M[3][i] - 2 * M[4][i]
#Otmp[2][i] = + M[1][i] + M[2][i] + 4 * M[3][i] + 4 * M[4][i]
#Otmp[3][i] = + M[1][i] - M[2][i] + 8 * M[3][i] - 8 * M[4][i] + M[5][i]
#print('AT.shape', AT.shape, 'M.shape', M.shape)
Otmp = AT.dot(M)
#for i in range(4):
#O[i][0] = Otmp[i][0] + Otmp[i][1] + Otmp[i][2] + Otmp[i][3] + Otmp[i][4]
#O[i][1] = + Otmp[i][1] - Otmp[i][2] + 2 * Otmp[i][3] - 2 * Otmp[i][4]
#O[i][2] = + Otmp[i][1] + Otmp[i][2] + 4 * Otmp[i][3] + 4 * Otmp[i][4]
#O[i][3] = + Otmp[i][1] - Otmp[i][2] + 8 * Otmp[i][3] - 8 * Otmp[i][4] + Otmp[i][5]
#print('O.shape', O.shape, 'Otmp.shape', Otmp.shape, 'AT.T.shape', AT.T.shape)
O[:] = Otmp.dot(AT.T)
timecheck('calced O')
return Ofull
|
nilq/baby-python
|
python
|
from nose.tools import eq_
from amo.tests import app_factory
class DynamicBoolFieldsTestMixin():
def setUp(self):
"""
Create an instance of the DynamicBoolFields model and call super
on the inheriting setUp.
(e.g. RatingDescriptors.objects.create(addon=self.app))
"""
self.app = app_factory()
self.model = None
self.related_name = '' # Related name of the bool table on the Webapp.
self.BOOL_DICT = []
self.flags = [] # Flag names.
self.expected = [] # Translation names.
def _get_related_bool_obj(self):
return getattr(self.app, self.related_name)
def _flag(self):
"""Flag app with a handful of flags for testing."""
self._get_related_bool_obj().update(
**dict(('has_%s' % f.lower(), True) for f in self.flags))
def _check(self, obj=None):
if not obj:
obj = self._get_related_bool_obj()
for bool_name in self.BOOL_DICT:
field = 'has_%s' % bool_name.lower()
value = bool_name in self.flags
if isinstance(obj, dict):
eq_(obj[field], value,
u'Unexpected value for field: %s' % field)
else:
eq_(getattr(obj, field), value,
u'Unexpected value for field: %s' % field)
def to_unicode(self, items):
"""
Force unicode evaluation of lazy items in the passed list, for set
comparison to a list of already-evaluated unicode strings.
"""
return [unicode(i) for i in items]
def test_bools_set(self):
self._flag()
self._check()
def test_to_dict(self):
self._flag()
self._check(self._get_related_bool_obj().to_dict())
def test_default_false(self):
obj = self.model(addon=self.app)
eq_(getattr(obj, 'has_%s' % self.flags[0].lower()), False)
|
nilq/baby-python
|
python
|
import sys, json, os
BOARD_ID='Os1ByyJc'
def read_auth_info():
script_path = os.path.dirname(os.path.realpath(__file__))
auth_file = script_path + "/trello-auth.json"
if not os.path.exists(auth_file):
sys.stderr.write("Cannot access Trello: Missing {}\n".format(auth_file))
sys.exit(1)
with open(auth_file) as f:
auth = json.loads(f.read())
if not auth.get('key', '').strip():
sys.stderr.write("Cannot access Trello: Missing 'key' from {}\n".format(auth_file))
sys.exit(1)
if not auth.get('token', '').strip():
sys.stderr.write("Cannot access Trello: Missing 'token' from {}\n".format(auth_file))
sys.exit(1)
return auth['key'], auth['token']
def set_up():
if len(sys.argv) != 2:
sys.stderr.write("Usage: {} <episode-number>\n".format(sys.argv[0]))
sys.exit(1)
episode_number = sys.argv[1]
key, token = read_auth_info()
return episode_number, key, token
def get_cards(key, token, episode_number):
list_id = find_episode_list_id(key, token, episode_number)
if not list_id:
sys.stderr.write("Could not find Trello list for episode {}\n".format(episode_number))
sys.exit(1)
return get_json('https://api.trello.com/1/lists/{}/cards?key={}&token={}'.format(list_id, key, token))
def get_cards_by_label(key, token, episode_number, label_to_find):
cards = get_cards(key, token, episode_number)
return [card for card in cards if any(label['name'] == label_to_find for label in card['labels'])]
def get_question_cards(key, token, episode_number):
return get_cards_by_label(key, token, episode_number, label_to_find='question')
# assuming there is only a single note card if it exists
def get_show_notes(key, token, episode_number):
show_notes = get_cards_by_label(key, token, episode_number, label_to_find='notes')
return show_notes[0]['desc'] if len(show_notes) > 0 else ''
def find_episode_list_id(key, token, episode_number):
lists = get_json('https://api.trello.com/1/boards/{}/lists?key={}&token={}'.format(BOARD_ID, key, token))
for lst in lists:
# FIXME Using `startswith` will break at episode 1000 since it will match episode 100.
# At current weekly cadence, this will break in the year 2034.
if lst['name'].lower().startswith("episode {}".format(episode_number)):
return lst['id']
def get_json(url):
import requests
response = requests.get(url)
if response.status_code == 200:
try:
return json.loads(response.content)
except json.decoder.JSONDecodeError as e:
sys.stderr("Invalid JSON returned from Trello API: {}, JSON: {}".format(e, response.content))
sys.exit(1)
else:
sys.stderr.write("Got error from Trello API. HTTP status code: {}, response content: {}\n".format(response.status_code, response.content))
sys.exit(1)
|
nilq/baby-python
|
python
|
import os
'''
user = os.environ['POSTGRES_USER']
password = os.environ['POSTGRES_PASSWORD']
host = os.environ['POSTGRES_HOST']
database = os.environ['POSTGRES_DB']
port = os.environ['POSTGRES_PORT']
'''
user = 'test'
password = 'password'
host = 'localhost'
database = 'example'
port = '5432'
DATABASE_CONNECTION_URI = f'postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}'
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from collections import OrderedDict
from datetime import timedelta
from itertools import chain
import datetime
from django.urls import reverse
from django.template.loader import render_to_string
from django.utils.timesince import timesince
from math import ceil
from casexml.apps.stock.models import StockTransaction
from corehq.apps.es import UserES
from corehq.apps.domain.models import Domain
from corehq.apps.commtrack.models import StockState
from corehq.apps.reports.commtrack.const import STOCK_SECTION_TYPE
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.graph_models import Axis
from corehq.apps.users.models import WebUser
from custom.common import ALL_OPTION
from custom.ewsghana.filters import ProductByProgramFilter, EWSDateFilter, EWSRestrictionLocationFilter
from custom.ewsghana.models import FacilityInCharge, EWSExtension
from custom.ewsghana.reports import EWSData, MultiReport, EWSLineChart, ProductSelectionPane
from custom.ewsghana.utils import has_input_stock_permissions, ews_date_format
from dimagi.utils.couch.database import iter_docs
from memoized import memoized
from django.utils.translation import ugettext as _
from corehq.apps.locations.dbaccessors import get_users_by_location_id
from corehq.apps.locations.models import get_location, SQLLocation
from six.moves import range
import six
class StockLevelsLegend(EWSData):
title = 'Legend'
slug = 'legend'
show_table = True
@property
def headers(self):
return DataTablesHeader(*[
DataTablesColumn(_('Icon')),
DataTablesColumn(_('Stock status')),
])
@property
def rows(self):
return [['<span class="fa fa-arrow-up" style="color:purple"/>', 'Overstock'],
['<span class="fa fa-check" style="color:green"/>', 'Adequate'],
['<span class="fa fa-exclamation-triangle" style="color:orange"/>', 'Low'],
['<span class="fa fa-remove" style="color:red"/>', 'Stockout']]
class FacilityReportData(EWSData):
slug = 'facility_report'
show_table = True
use_datatables = True
@property
def title(self):
return 'Facility Report - %s' % SQLLocation.objects.get(location_id=self.config['location_id']).name
@property
def headers(self):
return DataTablesHeader(*[
DataTablesColumn(_('Commodity')),
DataTablesColumn(_('Months of Stock')),
DataTablesColumn(_('Stockout Duration')),
DataTablesColumn(_('Current Stock')),
DataTablesColumn(_('Monthly Consumption')),
DataTablesColumn(_('Reorder Level')),
DataTablesColumn(_('Maximum Level')),
DataTablesColumn(_('Date of Last Report'))
])
def get_prod_data(self):
def get_months_until_stockout_icon(value, loc):
if float(value) == 0.0:
return '%s <span class="fa fa-remove" style="color:red"/>' % value
elif float(value) <= loc.location_type.understock_threshold:
return '%s <span class="fa fa-exclamation-triangle" style="color:orange"/>' % value
elif loc.location_type.understock_threshold < float(value) < loc.location_type.overstock_threshold:
return '%s <span class="fa fa-check" style="color:green"/>' % value
elif float(value) >= loc.location_type.overstock_threshold:
return '%s <span class="fa fa-arrow-up" style="color:purple"/>' % value
state_grouping = {}
loc = SQLLocation.objects.get(location_id=self.config['location_id'])
stock_states = StockState.objects.filter(
case_id=loc.supply_point_id,
section_id=STOCK_SECTION_TYPE,
sql_product__in=self.unique_products(SQLLocation.objects.filter(pk=loc.pk))
).order_by('-last_modified_date')
for state in stock_states:
monthly_consumption = state.get_monthly_consumption()
max_level = 0
if monthly_consumption:
monthly_consumption = round(monthly_consumption)
max_level = round(monthly_consumption * float(loc.location_type.overstock_threshold))
state_grouping[state.product_id] = {
'commodity': state.sql_product.name,
'months_until_stockout': "%.1f" % (float(state.stock_on_hand) / monthly_consumption)
if state.stock_on_hand and monthly_consumption else 0,
'stockout_duration': '',
'stockout_duration_helper': True,
'current_stock': state.stock_on_hand,
'monthly_consumption': monthly_consumption,
'reorder_level': round(max_level / 2.0),
'maximum_level': max_level,
'last_report': ews_date_format(state.last_modified_date)
}
if state.stock_on_hand == 0:
try:
st = StockTransaction.objects.filter(
case_id=loc.supply_point_id,
product_id=state.product_id,
stock_on_hand__gt=0
).latest('report__date')
state_grouping[state.product_id]['stockout_duration'] = timesince(
st.report.date, now=datetime.datetime.now()
)
except StockTransaction.DoesNotExist:
state_grouping[state.product_id]['stockout_duration'] = 'Always'
else:
state_grouping[state.product_id]['stockout_duration_helper'] = False
for values in state_grouping.values():
if values['monthly_consumption'] is not None or values['current_stock'] == 0:
months_until_stockout = get_months_until_stockout_icon(
values['months_until_stockout'] if values['months_until_stockout'] else 0.0, loc
)
else:
months_until_stockout = '-'
if values['monthly_consumption'] and values['monthly_consumption'] != 0.00:
monthly_consumption = int(values['monthly_consumption'])
else:
monthly_consumption = 'not enough data'
if values['maximum_level'] and values['maximum_level'] != 0.00:
maximum_level = int(values['maximum_level'])
else:
maximum_level = 'unknown'
if values['reorder_level'] and values['reorder_level'] != 0.00:
reorder_level = int(values['reorder_level'])
else:
reorder_level = 'unknown'
yield {
'commodity': values['commodity'],
'current_stock': int(values['current_stock']) if values['current_stock'] is not None else '--',
'monthly_consumption': monthly_consumption,
'months_until_stockout': months_until_stockout,
'stockout_duration': values['stockout_duration'],
'last_report': values['last_report'],
'reorder_level': reorder_level,
'maximum_level': maximum_level}
@property
def rows(self):
for row in self.get_prod_data():
yield [row['commodity'],
row['months_until_stockout'],
row['stockout_duration'],
row['current_stock'],
row['monthly_consumption'],
row['reorder_level'],
row['maximum_level'],
row['last_report']]
class InventoryManagementData(EWSData):
title = ''
slug = 'inventory_management'
show_table = False
show_chart = True
chart_x_label = 'Weeks'
chart_y_label = 'MOS'
@property
def rows(self):
return []
@property
def chart_data(self):
def calculate_weeks_remaining(state, daily_consumption, date):
if not daily_consumption:
return 0
consumption = round(float(daily_consumption) * 30.0)
quantity = float(state.stock_on_hand) - ((date - state.report.date).days // 7) * consumption
if consumption and consumption > 0 and quantity > 0:
return quantity / consumption
return 0
enddate = self.config['enddate']
startdate = self.config['startdate'] if 'custom_date' in self.config else enddate - timedelta(days=30)
loc = SQLLocation.objects.get(location_id=self.config['location_id'])
stoke_states = StockState.objects.filter(
case_id=loc.supply_point_id,
section_id=STOCK_SECTION_TYPE,
sql_product__in=loc.products,
)
consumptions = {ss.product_id: ss.get_daily_consumption() for ss in stoke_states}
st = StockTransaction.objects.filter(
case_id=loc.supply_point_id,
sql_product__in=loc.products,
type='stockonhand',
report__date__lte=enddate
).select_related('report', 'sql_product').order_by('report__date')
rows = OrderedDict()
weeks = ceil((enddate - startdate).days / 7)
for state in st:
product_name = '{0} ({1})'.format(state.sql_product.name, state.sql_product.code)
if product_name not in rows:
rows[product_name] = {}
for i in range(1, int(weeks + 1)):
date = startdate + timedelta(weeks=i)
if state.report.date < date:
rows[product_name][i] = calculate_weeks_remaining(
state, consumptions.get(state.product_id, None), date)
for k, v in six.iteritems(rows):
rows[k] = [{'x': key, 'y': value} for key, value in six.iteritems(v)]
rows['Understock'] = []
rows['Overstock'] = []
for i in range(1, int(weeks + 1)):
rows['Understock'].append({'x': i, 'y': float(loc.location_type.understock_threshold)})
rows['Overstock'].append({'x': i, 'y': float(loc.location_type.overstock_threshold)})
return rows
@property
def charts(self):
if self.show_chart:
loc = SQLLocation.objects.get(location_id=self.config['location_id'])
chart = EWSLineChart("Inventory Management Trends", x_axis=Axis(self.chart_x_label, 'd'),
y_axis=Axis(self.chart_y_label, '.1f'))
chart.height = 600
values = []
for product, value in six.iteritems(self.chart_data):
values.extend([a['y'] for a in value])
chart.add_dataset(product, value,
color='black' if product in ['Understock', 'Overstock'] else None)
chart.forceY = [0, loc.location_type.understock_threshold + loc.location_type.overstock_threshold]
chart.is_rendered_as_email = self.config.get('is_rendered_as_email', False)
return [chart]
return []
class InputStock(EWSData):
slug = 'input_stock'
show_table = True
@property
def rows(self):
link = reverse('input_stock', args=[self.domain, self.location.site_code])
rows = []
if has_input_stock_permissions(self.config['user'],
SQLLocation.objects.get(location_id=self.config['location_id']),
self.domain):
rows.append(["<a href='{}'>INPUT STOCK for {}</a>".format(link, self.location.name)])
try:
rows.append(
[
'The last report received was at <b>{}.</b>'.format(
StockState.objects.filter(case_id=self.location.supply_point_id)
.values('last_modified_date')
.latest('last_modified_date')['last_modified_date']
.strftime("%X on %b %d, %Y")
)
]
)
except StockState.DoesNotExist:
pass
return rows
class UsersData(EWSData):
custom_table = True
@property
def rendered_content(self):
from corehq.apps.users.views.mobile.users import EditCommCareUserView
users = get_users_by_location_id(self.config['domain'],
self.config['location_id'])
in_charges = FacilityInCharge.objects.filter(
location=self.location
).values_list('user_id', flat=True)
if self.location.parent.location_type.name == 'district':
children = self.location.parent.get_descendants()
availaible_in_charges = list(chain.from_iterable([
[u for u in get_users_by_location_id(self.config['domain'], child.location_id) if 'In Charge' in u.user_data.get('role', [])]
for child in children
]))
else:
availaible_in_charges = [u for u in get_users_by_location_id(self.domain, self.location_id) if 'In Charge' in u.user_data.get('role', [])]
user_to_dict = lambda sms_user: {
'id': sms_user.get_id,
'full_name': sms_user.full_name,
'phone_numbers': sms_user.phone_numbers,
'in_charge': sms_user.get_id in in_charges,
'location_name': sms_user.location.sql_location.name,
'url': reverse(EditCommCareUserView.urlname, args=[self.config['domain'], sms_user.get_id])
}
web_users_from_extension = list(iter_docs(
WebUser.get_db(),
EWSExtension.objects.filter(domain=self.domain,
location_id=self.location_id).values_list('user_id', flat=True)
))
WebUserInfo = collections.namedtuple('WebUserInfo', 'id first_name last_name email')
web_users = {
WebUserInfo(
id=web_user['_id'],
first_name=web_user['first_name'],
last_name=web_user['last_name'],
email=web_user['email']
)
for web_user in (UserES().web_users().domain(self.config['domain']).term(
"domain_memberships.location_id", self.location_id
).run().hits + web_users_from_extension)
}
return render_to_string('ewsghana/partials/users_tables.html', {
'users': [user_to_dict(user) for user in users],
'domain': self.domain,
'location_id': self.location_id,
'web_users': web_users,
'district_in_charges': [user_to_dict(user) for user in availaible_in_charges]
})
class StockLevelsReport(MultiReport):
title = "Aggregate Stock Report"
fields = [EWSRestrictionLocationFilter, ProductByProgramFilter, EWSDateFilter]
name = "Stock Levels Report"
slug = 'ews_stock_levels_report'
exportable = True
is_exportable = True
@property
def report_config(self):
report_config = super(StockLevelsReport, self).report_config
program = self.request.GET.get('filter_by_program')
products = self.request.GET.getlist('filter_by_product')
report_config.update(dict(
startdate=self.datespan.startdate_utc,
enddate=self.datespan.enddate_utc,
program=program if program != ALL_OPTION else None,
products=products if products and products[0] != ALL_OPTION else []
))
return report_config
@property
@memoized
def data_providers(self):
config = self.report_config
location_types = [loc_type.name for loc_type in [loc_type for loc_type in Domain.get_by_name(self.domain).location_types if not loc_type.administrative]]
if not self.needs_filters and get_location(config['location_id']).location_type_name in location_types:
if self.is_rendered_as_email:
return [FacilityReportData(config)]
else:
return [FacilityReportData(config),
StockLevelsLegend(config),
InputStock(config),
UsersData(config),
InventoryManagementData(config),
ProductSelectionPane(config, hide_columns=False)]
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return False
|
nilq/baby-python
|
python
|
from collections import namedtuple
import contextlib
import meshcat
import meshcat.geometry as meshcat_geom
import meshcat.transformations as meshcat_tf
import matplotlib.pyplot as plt
import logging
import numpy as np
import networkx as nx
import os
import yaml
import torch
import pydrake
from pydrake.common.cpp_param import List as DrakeBindingList
from pydrake.all import (
AddMultibodyPlantSceneGraph,
AngleAxis,
BasicVector,
BodyIndex,
ConnectMeshcatVisualizer,
CoulombFriction,
DiagramBuilder,
ExternallyAppliedSpatialForce,
LeafSystem,
InverseKinematics,
MeshcatVisualizer,
MinimumDistanceConstraint,
ModelInstanceIndex,
MultibodyPlant,
SpatialInertia,
Parser,
RigidTransform,
RotationMatrix,
SpatialForce,
Simulator,
SnoptSolver,
Solve,
SolverOptions,
UnitInertia,
Value
)
import pydrake.geometry as pydrake_geom
def torch_tf_to_drake_tf(tf):
return RigidTransform(tf.cpu().detach().numpy())
def drake_tf_to_torch_tf(tf):
return torch.tensor(tf.GetAsMatrix4())
default_spatial_inertia = SpatialInertia(
mass=1.0,
p_PScm_E=np.zeros(3), G_SP_E=UnitInertia(0.01, 0.01, 0.01)
)
default_friction = CoulombFriction(0.9, 0.8)
class PhysicsGeometryInfo():
'''
Container for physics and geometry info, providing simulator and
visualization interoperation.
Args:
- fixed: Whether this geometry is welded to the world (otherwise,
it will be mobilized by a 6DOF floating base).
- spatial_inertia: Spatial inertia of the body. If None,
will adopt a default mass of 1.0kg and 0.01x0.01x0.01 diagonal
rotational inertia.
- is_container: Flag whether this object will function as a
container for other objects for the purpose of collision
and stability checks. If so, then objects below this one
will be isolated from collision and clearance checks for
objects above this one, and instead only be checked against
this object's collision geometry and this object's
childrens' geometry. Valid for e.g. a cabinet full of
stuff that does not interact with anything outside of
the cabinet.
To construct a PhysicsGeometricInfo object, initialize the object
with the desired arguments above, and then use registration calls
to populate the model geometry of the following types:
- Model files (urdf/sdf), paired with a transform from the object
local origin, the name of the root body (which gets put at that
transform -- required if there's more than one body in the URDF),
and optionally, the initial joint configuration of
the model (as a dict of joint names to joint states). These
are added to the simulated scene with the specified link
welded (or translated, if not fixed) to the node transform.
- Visual and collision geometry (Drake Shape types), paired with
transforms from the object local origin and relevant color
and friction information.
- Clearance geometry (Drake Shape types), paired with transforms
from the object local origin. This represents the region around
this object that should not intersect with any other node's
clearance geometry: e.g., the space in front of a cabinet should
be clear so the doors can open.
'''
def __init__(self, fixed=True, spatial_inertia=None, is_container=False):
self.fixed = fixed
self.is_container = is_container
self.model_paths = []
self.spatial_inertia = spatial_inertia or default_spatial_inertia
self.visual_geometry = []
self.collision_geometry = []
self.clearance_geometry = []
def register_model_file(self, tf, model_path, root_body_name=None,
q0_dict={}):
self.model_paths.append((tf, model_path, root_body_name, q0_dict))
def register_geometry(self, tf, geometry, color=np.ones(4), friction=default_friction):
# Shorthand for registering the same geometry as collision + visual.
self.register_visual_geometry(tf, geometry, color)
self.register_collision_geometry(tf, geometry, friction)
def register_visual_geometry(self, tf, geometry, color=np.ones(4)):
assert isinstance(tf, torch.Tensor) and tf.shape == (4, 4)
assert isinstance(geometry, pydrake.geometry.Shape)
self.visual_geometry.append((tf, geometry, color))
def register_collision_geometry(self, tf, geometry, friction=default_friction):
assert isinstance(tf, torch.Tensor) and tf.shape == (4, 4)
assert isinstance(geometry, pydrake.geometry.Shape)
assert isinstance(friction, CoulombFriction)
self.collision_geometry.append((tf, geometry, friction))
def register_clearance_geometry(self, tf, geometry):
assert isinstance(tf, torch.Tensor) and tf.shape == (4, 4)
assert isinstance(geometry, pydrake.geometry.Shape)
self.clearance_geometry.append((tf, geometry))
def sanity_check_node_tf_and_physics_geom_info(node):
assert isinstance(node.tf, torch.Tensor), type(node.tf)
assert node.tf.shape == (4, 4), node.tf.shape
assert isinstance(node.physics_geometry_info, PhysicsGeometryInfo), type(node.physics_geometry_info)
class DecayingForceToDesiredConfigSystem(LeafSystem):
''' Connect to a MBP to apply ghost forces (that decay over time)
to encourage the scene to settle near the desired configuration. '''
def __init__(self, mbp, q_des):
LeafSystem.__init__(self)
self.set_name('DecayingForceToDesiredConfigSystem')
self.robot_state_input_port = self.DeclareVectorInputPort(
"robot_state", BasicVector(mbp.num_positions() + mbp.num_velocities()))
forces_cls = Value[DrakeBindingList[ExternallyAppliedSpatialForce]]
self.spatial_forces_output_port = self.DeclareAbstractOutputPort(
"spatial_forces_vector",
lambda: forces_cls(),
self.DoCalcAbstractOutput)
self.mbp = mbp
self.q_des = q_des
self.mbp_current_context = mbp.CreateDefaultContext()
self.mbp_des_context = mbp.CreateDefaultContext()
self.mbp.SetPositions(self.mbp_des_context, self.q_des)
def DoCalcAbstractOutput(self, context, y_data):
t = context.get_time()
# Annealing schedule
force_multiplier = 10.0*np.exp(-0.5*t)*np.abs(np.cos(t*np.pi/2.))
x_in = self.EvalVectorInput(context, 0).get_value()
self.mbp.SetPositionsAndVelocities(self.mbp_current_context, x_in)
forces = []
for k in self.mbp.GetFloatingBaseBodies():
body = self.mbp.get_body(BodyIndex(k))
# Get pose of body in world frame
body_tf = self.mbp.GetFreeBodyPose(self.mbp_current_context, body)
body_r = body_tf.rotation().matrix()
body_tfd = self.mbp.EvalBodySpatialVelocityInWorld(self.mbp_current_context, body)
des_tf = self.mbp.GetFreeBodyPose(self.mbp_des_context, body)
delta_xyz = des_tf.translation() - body_tf.translation()
delta_r = des_tf.rotation().matrix().dot(body_tf.rotation().matrix().T)
# Get mass info so we can calc correct forces
si = body.CalcSpatialInertiaInBodyFrame(self.mbp_current_context)
m = si.get_mass()
I = si.CalcRotationalInertia().CopyToFullMatrix3()
I_w = body_tf.rotation().matrix().dot(I)
# Multiply out
aa = AngleAxis(delta_r)
tau = aa.axis()*aa.angle() - 0.1*body_tfd.rotational()
f = (delta_xyz*10. - 0.1*body_tfd.translational() + np.array([0., 0., 9.81])/max(1., force_multiplier))*m
max_force = 100.
max_torque = 100.
force = SpatialForce(
tau=np.clip(tau*force_multiplier, -max_torque, max_torque),
f=np.clip(f*force_multiplier, -max_force, max_force)
)
out = ExternallyAppliedSpatialForce()
out.F_Bq_W = force
out.body_index = body.index()
forces.append(out)
y_data.set_value(forces)
class StochasticLangevinForceSource(LeafSystem):
''' Connect to a MBP to apply ghost forces. The forces are:
1) Random noise whose magnitude decays with sim time.
2) A force proportional to the gradient of the log-prob of the
object poses w.r.t the log-prob of the scene tree.
This probably doesn't work for systems with tough inter-node
constraints like planar-ity. Need to do some reparameterization
of the system under sim for that to work?
MBP + scene_tree should be corresponded to each other through
the mbp construction method in this file.
'''
def __init__(self, mbp, scene_tree, node_to_free_body_ids_map, body_id_to_node_map):
LeafSystem.__init__(self)
self.set_name('StochasticLangevinForceSystem')
self.robot_state_input_port = self.DeclareVectorInputPort(
"robot_state", BasicVector(mbp.num_positions() + mbp.num_velocities()))
forces_cls = Value[DrakeBindingList[ExternallyAppliedSpatialForce]]
self.spatial_forces_output_port = self.DeclareAbstractOutputPort(
"spatial_forces_vector",
lambda: forces_cls(),
self.DoCalcAbstractOutput)
self.scene_tree = scene_tree
self.node_to_free_body_ids_map = node_to_free_body_ids_map
self.body_id_to_node_map = body_id_to_node_map
self.mbp = mbp
self.mbp_current_context = mbp.CreateDefaultContext()
for node, body_ids in self.node_to_free_body_ids_map.items():
for body_id in body_ids:
self.mbp.SetFreeBodyPose(self.mbp_current_context, self.mbp.get_body(body_id), torch_tf_to_drake_tf(node.tf))
def DoCalcAbstractOutput(self, context, y_data):
t = context.get_time()
# TODO: Hook up random input on the right kind of random port.
noise_scale = 0.25 * 0.25**t
ll_scale = 2.0 * 0.25**t
x_in = self.EvalVectorInput(context, 0).get_value()
self.mbp.SetPositionsAndVelocities(self.mbp_current_context, x_in)
# Copy state over to scene tree.
free_bodies = self.mbp.GetFloatingBaseBodies()
body_tf_vars = {}
for body_id, node in self.body_id_to_node_map.items():
if body_id not in free_bodies:
continue
tf_dec_var = drake_tf_to_torch_tf(self.mbp.GetFreeBodyPose(self.mbp_current_context, self.mbp.get_body(body_id)))
tf_dec_var.requires_grad = True
body_tf_vars[body_id] = tf_dec_var
node.tf = tf_dec_var
# Compute log prob and backprop.
score = self.scene_tree.score(include_discrete=False, include_continuous=True)
score.backward()
forces = []
for body_id in free_bodies:
body = self.mbp.get_body(body_id)
# Get pose of body in world frame
body_tf = self.mbp.GetFreeBodyPose(self.mbp_current_context, body)
body_tfd = self.mbp.EvalBodySpatialVelocityInWorld(self.mbp_current_context, body)
# Get mass info so we can calc correct force scaling
si = body.CalcSpatialInertiaInBodyFrame(self.mbp_current_context)
m = si.get_mass()
I = si.CalcRotationalInertia().CopyToFullMatrix3()
I_w = body_tf.rotation().matrix().dot(I)
# Calculate total wrench
# Noise term
f_noise = np.random.normal(0., noise_scale, size=3)
tau_noise = np.random.normal(0., noise_scale, size=3)
# Force maximizing log prob
t_grad = body_tf_vars[body_id].grad[:3, 3].numpy()
f_ll = t_grad*m
force = SpatialForce(
tau=tau_noise - 0.01*body_tfd.rotational(),
f=f_noise + f_ll*ll_scale - body_tfd.translational()*0.5
)
out = ExternallyAppliedSpatialForce()
out.F_Bq_W = force
out.body_index = body.index()
forces.append(out)
y_data.set_value(forces)
def resolve_catkin_package_path(package_map, input_str):
if "://" in input_str:
elements = input_str.split("://")
assert len(elements) == 2, "Malformed path " + input_str
package_name, path_in_package = elements
assert package_map.Contains(package_name), "%s not in package map" % package_name
return os.path.join(
package_map.GetPath(package_name),
path_in_package
)
else:
return input_str
def compile_scene_tree_clearance_geometry_to_mbp_and_sg(scene_tree, timestep=0.001, alpha=0.25):
builder = DiagramBuilder()
mbp, scene_graph = AddMultibodyPlantSceneGraph(
builder, MultibodyPlant(time_step=timestep))
parser = Parser(mbp)
parser.package_map().PopulateFromEnvironment("ROS_PACKAGE_PATH")
world_body = mbp.world_body()
free_body_poses = []
# For generating colors.
node_class_to_color_dict = {}
cmap = plt.cm.get_cmap('jet')
cmap_counter = 0.
for node in scene_tree.nodes:
if node.tf is not None and node.physics_geometry_info is not None:
# Don't have to do anything if this does not introduce geometry.
sanity_check_node_tf_and_physics_geom_info(node)
phys_geom_info = node.physics_geometry_info
has_clearance_geometry = len(phys_geom_info.clearance_geometry) > 0
if not has_clearance_geometry:
continue
# Add a body for this node and register the clearance geometry.
# TODO(gizatt) This tree body index is built in to disambiguate names.
# But I forsee a name-to-stuff resolution crisis when inference time comes...
# this might get resolved by the solution to that.
body = mbp.AddRigidBody(name=node.name,
M_BBo_B=phys_geom_info.spatial_inertia)
tf = torch_tf_to_drake_tf(node.tf)
mbp.SetDefaultFreeBodyPose(body, tf)
# Pick out a color for this class.
node_type_string = node.__class__.__name__
if node_type_string in node_class_to_color_dict.keys():
color = node_class_to_color_dict[node_type_string]
else:
color = list(cmap(cmap_counter))
color[3] = alpha
node_class_to_color_dict[node_type_string] = color
cmap_counter = np.fmod(cmap_counter + np.pi*2., 1.)
# Handle adding primitive geometry by adding it all to one
# mbp.
if len(phys_geom_info.clearance_geometry) > 0:
for k, (tf, geometry) in enumerate(phys_geom_info.clearance_geometry):
mbp.RegisterCollisionGeometry(
body=body,
X_BG=torch_tf_to_drake_tf(tf),
shape=geometry,
name=node.name + "_col_%03d" % k,
coulomb_friction=default_friction)
mbp.RegisterVisualGeometry(
body=body,
X_BG=torch_tf_to_drake_tf(tf),
shape=geometry,
name=node.name + "_vis_%03d" % k,
diffuse_color=color)
return builder, mbp, scene_graph
def build_nonpenetration_constraint(mbp, mbp_context_in_diagram, signed_distance_threshold):
''' Given an MBP/SG pair and a signed distance threshold, returns a constraint
function that takes a context and returns whether the MBP/SG in that configuration
has all bodies farther than the given threshold. '''
return MinimumDistanceConstraint(mbp, signed_distance_threshold, mbp_context_in_diagram)
def get_collisions(mbp, mbp_context_in_diagram):
# Essentially the same logic as in ik/MinimumDistanceConstraint's distances evaluation.
query_port = mbp.get_geometry_query_input_port()
assert query_port.HasValue(mbp_context_in_diagram), \
"Either the plant geometry_query_input_port() is not properly " \
"connected to the SceneGraph's output port, or the plant_context_ is " \
"incorrect. Please refer to AddMultibodyPlantSceneGraph on connecting " \
"MultibodyPlant to SceneGraph."
query_object = query_port.Eval(mbp_context_in_diagram)
return query_object.ComputePointPairPenetration()
def resolve_sg_proximity_id_to_mbp_id(sg, mbp, geometry_id):
for model_k in range(mbp.num_model_instances()):
model_k = ModelInstanceIndex(model_k)
for body_k in mbp.GetBodyIndices(model_k):
if geometry_id in mbp.GetCollisionGeometriesForBody(mbp.get_body(body_k)):
return model_k, body_k
raise ValueError("Geometry ID not registered by this MBP.")
def expand_container_tree(full_tree, new_tree, current_node):
# Given the original tree for reference and a new tree
# that contains the current node, gets the current node's
# children, adds them all (with approp connections) to the
# new tree, and recurses on the children.
# Does not recurse on children that are containers,
# but will still add them to the tree. (Containers should
# appear in the tree above *and* below them.)
for child in full_tree.successors(current_node):
new_tree.add_node(child)
new_tree.add_edge(current_node, child)
if (child.physics_geometry_info is not None and
child.physics_geometry_info.is_container):
continue
new_tree = expand_container_tree(full_tree, new_tree, child)
return new_tree
def split_tree_into_containers(scene_tree):
# The roots will be each container + the root
# of the overall tree.
roots = [node for node in scene_tree.nodes if
(len(list(scene_tree.predecessors(node))) == 0 or
(node.physics_geometry_info is not None and
node.physics_geometry_info.is_container))]
# Build the subtree from each root until it hits a terminal or
# or a container.
trees = []
for root in roots:
# Manually add the first
new_tree = nx.DiGraph()
new_tree.add_node(root)
trees.append(expand_container_tree(scene_tree, new_tree, root))
return trees
def compile_scene_tree_to_mbp_and_sg(scene_tree, timestep=0.001):
builder = DiagramBuilder()
mbp, scene_graph = AddMultibodyPlantSceneGraph(
builder, MultibodyPlant(time_step=timestep))
parser = Parser(mbp)
parser.package_map().PopulateFromEnvironment("ROS_PACKAGE_PATH")
world_body = mbp.world_body()
node_to_free_body_ids_map = {}
body_id_to_node_map = {}
free_body_poses = []
for node in scene_tree.nodes:
node_to_free_body_ids_map[node] = []
if node.tf is not None and node.physics_geometry_info is not None:
# Don't have to do anything if this does not introduce geometry.
sanity_check_node_tf_and_physics_geom_info(node)
phys_geom_info = node.physics_geometry_info
# Don't have to do anything if this does not introduce geometry.
has_models = len(phys_geom_info.model_paths) > 0
has_prim_geometry = (len(phys_geom_info.visual_geometry)
+ len(phys_geom_info.collision_geometry)) > 0
if not has_models and not has_prim_geometry:
continue
node_model_ids = []
# Handle adding primitive geometry by adding it all to one
# mbp.
if has_prim_geometry:
# Contain this primitive geometry in a model instance.
model_id = mbp.AddModelInstance(
node.name + "::model_%d" % len(node_model_ids))
# Add a body for this node, and register any of the
# visual and collision geometry available.
# TODO(gizatt) This tree body index is built in to disambiguate names.
# But I forsee a name-to-stuff resolution crisis when inference time comes...
# this might get resolved by the solution to that.
body = mbp.AddRigidBody(name=node.name, model_instance=model_id,
M_BBo_B=phys_geom_info.spatial_inertia)
body_id_to_node_map[body.index()] = node
tf = torch_tf_to_drake_tf(node.tf)
if phys_geom_info.fixed:
weld = mbp.WeldFrames(world_body.body_frame(),
body.body_frame(),
tf)
else:
node_to_free_body_ids_map[node].append(body.index())
mbp.SetDefaultFreeBodyPose(body, tf)
for k, (tf, geometry, color) in enumerate(phys_geom_info.visual_geometry):
mbp.RegisterVisualGeometry(
body=body,
X_BG=torch_tf_to_drake_tf(tf),
shape=geometry,
name=node.name + "_vis_%03d" % k,
diffuse_color=color)
for k, (tf, geometry, friction) in enumerate(phys_geom_info.collision_geometry):
mbp.RegisterCollisionGeometry(
body=body,
X_BG=torch_tf_to_drake_tf(tf),
shape=geometry,
name=node.name + "_col_%03d" % k,
coulomb_friction=friction)
# Handle adding each model from sdf/urdf.
if has_models:
for local_tf, model_path, root_body_name, q0_dict in phys_geom_info.model_paths:
model_id = parser.AddModelFromFile(
resolve_catkin_package_path(parser.package_map(), model_path),
node.name + "::" "model_%d" % len(node_model_ids))
if root_body_name is None:
root_body_ind_possibilities = mbp.GetBodyIndices(model_id)
assert len(root_body_ind_possibilities) == 1, \
"Please supply root_body_name for model with path %s" % model_path
root_body = mbp.get_body(root_body_ind_possibilities[0])
else:
root_body = mbp.GetBodyByName(
name=root_body_name,
model_instance=model_id)
body_id_to_node_map[root_body.index()] = node
node_tf = torch_tf_to_drake_tf(node.tf)
full_model_tf = node_tf.multiply(torch_tf_to_drake_tf(local_tf))
if phys_geom_info.fixed:
mbp.WeldFrames(world_body.body_frame(),
root_body.body_frame(),
full_model_tf)
else:
node_to_free_body_ids_map[node].append(root_body.index())
mbp.SetDefaultFreeBodyPose(root_body, full_model_tf)
# Handle initial joint state
if q0_dict is not None:
for joint_name in list(q0_dict.keys()):
q0_this = q0_dict[joint_name]
joint = mbp.GetMutableJointByName(
joint_name, model_instance=model_id)
# Reshape to make Drake happy.
q0_this = q0_this.reshape(joint.num_positions(), 1)
joint.set_default_positions(q0_this)
return builder, mbp, scene_graph, node_to_free_body_ids_map, body_id_to_node_map
def project_tree_to_feasibility(tree, constraints=[], jitter_q=None, do_forward_sim=False, zmq_url=None, prefix="projection", timestep=0.001, T=1.):
# Mutates tree into tree with bodies in closest
# nonpenetrating configuration.
builder, mbp, sg, node_to_free_body_ids_map, body_id_to_node_map = \
compile_scene_tree_to_mbp_and_sg(tree, timestep=timestep)
mbp.Finalize()
# Connect visualizer if requested. Wrap carefully to keep it
# from spamming the console.
if zmq_url is not None:
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
visualizer = ConnectMeshcatVisualizer(builder, sg, zmq_url=zmq_url, prefix=prefix)
diagram = builder.Build()
diagram_context = diagram.CreateDefaultContext()
mbp_context = diagram.GetMutableSubsystemContext(mbp, diagram_context)
q0 = mbp.GetPositions(mbp_context)
nq = len(q0)
if nq == 0:
logging.warn("Generated MBP had no positions.")
return tree
# Set up projection NLP.
ik = InverseKinematics(mbp, mbp_context)
q_dec = ik.q()
prog = ik.prog()
# It's always a projection, so we always have this
# Euclidean norm error between the optimized q and
# q0.
prog.AddQuadraticErrorCost(np.eye(nq), q0, q_dec)
# Nonpenetration constraint.
ik.AddMinimumDistanceConstraint(0.01)
# Other requested constraints.
for constraint in constraints:
constraint.add_to_ik_prog(tree, ik, mbp, mbp_context, node_to_free_body_ids_map)
# Initial guess, which can be slightly randomized by request.
q_guess = q0
if jitter_q:
q_guess = q_guess + np.random.normal(0., jitter_q, size=q_guess.size)
prog.SetInitialGuess(q_dec, q_guess)
# Solve.
solver = SnoptSolver()
options = SolverOptions()
logfile = "/tmp/snopt.log"
os.system("rm %s" % logfile)
options.SetOption(solver.id(), "Print file", logfile)
options.SetOption(solver.id(), "Major feasibility tolerance", 1E-3)
options.SetOption(solver.id(), "Major optimality tolerance", 1E-3)
options.SetOption(solver.id(), "Major iterations limit", 300)
result = solver.Solve(prog, None, options)
if not result.is_success():
logging.warn("Projection failed.")
print("Logfile: ")
with open(logfile) as f:
print(f.read())
qf = result.GetSolution(q_dec)
mbp.SetPositions(mbp_context, qf)
# If forward sim is requested, do a quick forward sim to get to
# a statically stable config.
if do_forward_sim:
sim = Simulator(diagram, diagram_context)
sim.set_target_realtime_rate(1000.)
sim.AdvanceTo(T)
# Reload poses back into tree
free_bodies = mbp.GetFloatingBaseBodies()
for body_id, node in body_id_to_node_map.items():
if body_id not in free_bodies:
continue
node.tf = drake_tf_to_torch_tf(mbp.GetFreeBodyPose(mbp_context, mbp.get_body(body_id)))
return tree
def project_tree_to_feasibility_via_sim(tree, constraints=[], zmq_url=None, prefix="projection", timestep=0.0005, T=10.):
# Mutates tree into tree with bodies in closest
# nonpenetrating configuration.
builder, mbp, sg, node_to_free_body_ids_map, body_id_to_node_map = \
compile_scene_tree_to_mbp_and_sg(tree, timestep=timestep)
mbp.Finalize()
# Connect visualizer if requested. Wrap carefully to keep it
# from spamming the console.
if zmq_url is not None:
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
visualizer = ConnectMeshcatVisualizer(builder, sg, zmq_url=zmq_url, prefix=prefix)
# Forward sim under langevin forces
force_source = builder.AddSystem(
DecayingForceToDesiredConfigSystem(mbp, mbp.GetPositions(mbp.CreateDefaultContext()))
)
builder.Connect(mbp.get_state_output_port(),
force_source.get_input_port(0))
builder.Connect(force_source.get_output_port(0),
mbp.get_applied_spatial_force_input_port())
diagram = builder.Build()
diagram_context = diagram.CreateDefaultContext()
mbp_context = diagram.GetMutableSubsystemContext(mbp, diagram_context)
q0 = mbp.GetPositions(mbp_context)
nq = len(q0)
if nq == 0:
logging.warn("Generated MBP had no positions.")
return tree
# Make 'safe' initial configuration that randomly arranges objects vertically
k = 0
all_pos = []
for node in tree:
for body_id in node_to_free_body_ids_map[node]:
body = mbp.get_body(body_id)
tf = mbp.GetFreeBodyPose(mbp_context, body)
tf = RigidTransform(p=tf.translation() + np.array([0., 0., 1. + k*0.5]), R=tf.rotation())
mbp.SetFreeBodyPose(mbp_context, body, tf)
k += 1
sim = Simulator(diagram, diagram_context)
sim.set_target_realtime_rate(1000)
sim.AdvanceTo(T)
# Reload poses back into tree
free_bodies = mbp.GetFloatingBaseBodies()
for body_id, node in body_id_to_node_map.items():
if body_id not in free_bodies:
continue
node.tf = drake_tf_to_torch_tf(mbp.GetFreeBodyPose(mbp_context, mbp.get_body(body_id)))
return tree
def rejection_sample_structure_to_feasibility(
tree, constraints=[], max_n_iters=100,
do_forward_sim=False, timestep=0.001, T=1.):
# Pre-build prog to check ourselves against
builder, mbp, sg, node_to_free_body_ids_map, body_id_to_node_map = \
compile_scene_tree_to_mbp_and_sg(tree, timestep=timestep)
mbp.Finalize()
floating_base_bodies = mbp.GetFloatingBaseBodies()
diagram = builder.Build()
diagram_context = diagram.CreateDefaultContext()
mbp_context = diagram.GetMutableSubsystemContext(mbp, diagram_context)
q0 = mbp.GetPositions(mbp_context)
nq = len(q0)
# Set up projection NLP.
ik = InverseKinematics(mbp, mbp_context)
q_dec = ik.q()
prog = ik.prog()
# Nonpenetration constraint.
ik.AddMinimumDistanceConstraint(0.001)
# Other requested constraints.
for constraint in constraints:
constraint.add_to_ik_prog(tree, ik, mbp, mbp_context, node_to_free_body_ids_map)
from pyro.contrib.autoname import scope
best_q = q0
best_violation = np.inf
for k in range(max_n_iters):
node_queue = [tree.get_root()]
while len(node_queue) > 0:
parent = node_queue.pop(0)
children, rules = tree.get_children_and_rules(parent)
for child, rule in zip(children, rules):
with scope(prefix=parent.name):
rule.sample_child(parent, child)
node_queue.append(child)
for node, body_ids in node_to_free_body_ids_map.items():
for body_id in body_ids:
mbp.SetFreeBodyPose(mbp_context, mbp.get_body(body_id), torch_tf_to_drake_tf(node.tf))
q = mbp.GetPositions(mbp_context)
all_bindings = prog.GetAllConstraints()
satisfied = prog.CheckSatisfied(all_bindings, q)
if satisfied:
return tree, True
# Otherwise compute violation
evals = np.concatenate([binding.evaluator().Eval(q).flatten() for binding in all_bindings])
lbs = np.concatenate([binding.evaluator().lower_bound().flatten() for binding in all_bindings])
ubs = np.concatenate([binding.evaluator().upper_bound().flatten() for binding in all_bindings])
viols = np.maximum(np.clip(lbs - evals, 0., np.inf), np.clip(evals-ubs, 0., np.inf))
total_violation = np.sum(viols)
if total_violation < best_violation:
print("Updating best viol to ", best_violation)
best_violation = total_violation
best_q = q
# Load best q into tree
mbp.SetPositions(mbp_context, q)
for body_id, node in body_id_to_node_map.items():
if body_id in floating_base_bodies:
node.tf = drake_tf_to_torch_tf(mbp.GetFreeBodyPose(mbp_context, mbp.get_body(body_id)))
return tree, False
def simulate_scene_tree(scene_tree, T, timestep=0.001, target_realtime_rate=1.0, meshcat=None):
builder, mbp, scene_graph, _, _ = compile_scene_tree_to_mbp_and_sg(
scene_tree, timestep=timestep)
mbp.Finalize()
if meshcat:
visualizer = ConnectMeshcatVisualizer(builder, scene_graph,
zmq_url=meshcat)
diagram = builder.Build()
diag_context = diagram.CreateDefaultContext()
sim = Simulator(diagram)
sim.set_target_realtime_rate(target_realtime_rate)
sim.AdvanceTo(T)
|
nilq/baby-python
|
python
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TestModel(models.Model):
field1 = models.CharField(max_length=255)
field2 = models.IntegerField()
def __str__(self):
return '%s%d' % (self.field1, self.field2)
@python_2_unicode_compatible
class RelatedToTestModel(models.Model):
field = models.ForeignKey(TestModel, on_delete=models.CASCADE)
def __str__(self):
return self.field
@python_2_unicode_compatible
class SearchableTestModel(models.Model):
field1 = models.CharField(max_length=255)
field2 = models.IntegerField()
def __str__(self):
return '%s%d' % (self.field1, self.field2)
@staticmethod
def autocomplete_search_fields():
return 'field1'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import torch.nn as nn
import torchvision
def densenet(n_classes, pretrained=False, n_layers=121, **kwargs):
'''
Creates a DenseNet based on the parameters
Arguments:
n_layers: The number of hidden layers
n_classes: The number of classes/labels
pretrained: Boolean value indicating whether the pretrained densenet shoudl be used
Returns:
A DenseNet model
'''
dnet = None
if n_layers == 121:
dnet = torchvision.models.densenet121(pretrained=pretrained, **kwargs)
elif n_layers == 161:
dnet = torchvision.models.densenet169(pretrained=pretrained, **kwargs)
elif n_layers == 201:
dnet = torchvision.models.densenet201(pretrained=pretrained, **kwargs)
num_features = dnet.classifier.in_features
dnet.classifier = nn.Sequential(
nn.Linear(num_features, num_features),
nn.Dropout(p=0.1),
nn.Linear(num_features, n_classes),
nn.Sigmoid(),
)
return dnet
def resnet(num_classes, pretrained=False, n_layers=50, **kwargs):
'''
Creates a DenseNet based on the parameters
Arguments:
n_layers: The number of hidden layers
n_classes: The number of classes/labels
pretrained: Boolean value indicating whether the pretrained densenet shoudl be used
Returns:
A DenseNet model
'''
rnet = torchvision.models.resnet50(pretrained=pretrained)
if n_layers == 50:
rnet = torchvision.models.resnet50(pretrained=pretrained, **kwargs)
elif n_layers == 101:
rnet = torchvision.models.resnet50(pretrained=pretrained, **kwargs)
elif n_layers == 152:
rnet = torchvision.models.resnet50(pretrained=pretrained, **kwargs)
num_features = rnet.fc.in_features
rnet.fc = nn.Sequential(
nn.Linear(num_features, num_features),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(num_features, num_classes),
nn.Sigmoid(),
)
return rnet
|
nilq/baby-python
|
python
|
#hwIo.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2015-2018 NV Access Limited, Babbage B.V.
"""Raw input/output for braille displays via serial and HID.
See the L{Serial} and L{Hid} classes.
Braille display drivers must be thread-safe to use this, as it utilises a background thread.
See L{braille.BrailleDisplayDriver.isThreadSafe}.
"""
import sys
import ctypes
from ctypes import byref
from ctypes.wintypes import DWORD, USHORT
from typing import Optional, Any, Union, Tuple, Callable
import serial
from serial.win32 import OVERLAPPED, FILE_FLAG_OVERLAPPED, INVALID_HANDLE_VALUE, ERROR_IO_PENDING, COMMTIMEOUTS, CreateFile, SetCommTimeouts
import winKernel
import braille
from logHandler import log
import config
import time
LPOVERLAPPED_COMPLETION_ROUTINE = ctypes.WINFUNCTYPE(None, DWORD, DWORD, serial.win32.LPOVERLAPPED)
def _isDebug():
return config.conf["debugLog"]["hwIo"]
class IoBase(object):
"""Base class for raw I/O.
This watches for data of a specified size and calls a callback when it is received.
"""
def __init__(
self,
fileHandle: Union[ctypes.wintypes.HANDLE],
onReceive: Callable[[bytes], None],
writeFileHandle: Optional[ctypes.wintypes.HANDLE] = None,
onReceiveSize: int = 1
):
"""Constructor.
@param fileHandle: A handle to an open I/O device opened for overlapped I/O.
If L{writeFileHandle} is specified, this is only for input.
The serial implementation uses a _port_handle member for this argument.
@param onReceive: A callable taking the received data as its only argument.
@param writeFileHandle: A handle to an open output device opened for overlapped I/O.
@param onReceiveSize: The size (in bytes) of the data with which to call C{onReceive}.
"""
self._file = fileHandle
self._writeFile = writeFileHandle if writeFileHandle is not None else fileHandle
self._onReceive = onReceive
self._readSize = onReceiveSize
self._readBuf = ctypes.create_string_buffer(onReceiveSize)
self._readOl = OVERLAPPED()
self._recvEvt = winKernel.createEvent()
self._ioDoneInst = LPOVERLAPPED_COMPLETION_ROUTINE(self._ioDone)
self._writeOl = OVERLAPPED()
# Do the initial read.
@winKernel.PAPCFUNC
def init(param):
self._initApc = None
self._asyncRead()
# Ensure the APC stays alive until it runs.
self._initApc = init
braille._BgThread.queueApc(init)
def waitForRead(self, timeout:Union[int, float]) -> bool:
"""Wait for a chunk of data to be received and processed.
This will return after L{onReceive} has been called or when the timeout elapses.
@param timeout: The maximum time to wait in seconds.
@return: C{True} if received data was processed before the timeout,
C{False} if not.
"""
timeout= int(timeout*1000)
while True:
curTime = time.time()
res = winKernel.waitForSingleObjectEx(self._recvEvt, timeout, True)
if res==winKernel.WAIT_OBJECT_0:
return True
elif res==winKernel.WAIT_TIMEOUT:
if _isDebug():
log.debug("Wait timed out")
return False
elif res==winKernel.WAIT_IO_COMPLETION:
if _isDebug():
log.debug("Waiting interrupted by completed i/o")
timeout -= int((time.time()-curTime)*1000)
def _prepareWriteBuffer(self, data: bytes) -> Tuple[int, ctypes.c_char_p]:
""" Private helper method to allow derived classes to prepare buffers in different ways"""
size = len(data)
return (
size,
ctypes.create_string_buffer(data) # this will append a null char, which is intentional
)
def write(self, data: bytes):
if not isinstance(data, bytes):
raise TypeError("Expected argument 'data' to be of type 'bytes'")
if _isDebug():
log.debug("Write: %r" % data)
size, data = self._prepareWriteBuffer(data)
if not ctypes.windll.kernel32.WriteFile(self._writeFile, data, size, None, byref(self._writeOl)):
if ctypes.GetLastError() != ERROR_IO_PENDING:
if _isDebug():
log.debug("Write failed: %s" % ctypes.WinError())
raise ctypes.WinError()
byteData = DWORD()
ctypes.windll.kernel32.GetOverlappedResult(self._writeFile, byref(self._writeOl), byref(byteData), True)
def close(self):
if _isDebug():
log.debug("Closing")
self._onReceive = None
if hasattr(self, "_file") and self._file is not INVALID_HANDLE_VALUE:
ctypes.windll.kernel32.CancelIoEx(self._file, byref(self._readOl))
if hasattr(self, "_writeFile") and self._writeFile not in (self._file, INVALID_HANDLE_VALUE):
ctypes.windll.kernel32.CancelIoEx(self._writeFile, byref(self._readOl))
winKernel.closeHandle(self._recvEvt)
def __del__(self):
try:
self.close()
except AttributeError:
if _isDebug():
log.debugWarning("Couldn't delete object gracefully", exc_info=True)
def _asyncRead(self):
# Wait for _readSize bytes of data.
# _ioDone will call onReceive once it is received.
# onReceive can then optionally read additional bytes if it knows these are coming.
ctypes.windll.kernel32.ReadFileEx(self._file, self._readBuf, self._readSize, byref(self._readOl), self._ioDoneInst)
def _ioDone(self, error, numberOfBytes: int, overlapped):
if not self._onReceive:
# close has been called.
self._ioDone = None
return
elif error != 0:
raise ctypes.WinError(error)
self._notifyReceive(self._readBuf[:numberOfBytes])
winKernel.kernel32.SetEvent(self._recvEvt)
self._asyncRead()
def _notifyReceive(self, data: bytes):
"""Called when data is received.
The base implementation just calls the onReceive callback provided to the constructor.
This can be extended to perform tasks before/after the callback.
@type data: bytes
"""
if not isinstance(data, bytes):
raise TypeError("Expected argument 'data' to be of type 'bytes'")
if _isDebug():
log.debug("Read: %r" % data)
try:
self._onReceive(data)
except:
log.error("", exc_info=True)
class Serial(IoBase):
"""Raw I/O for serial devices.
This extends pyserial to call a callback when data is received.
"""
def __init__(
self,
*args,
onReceive: Callable[[bytes], None],
**kwargs):
"""Constructor.
Pass the arguments you would normally pass to L{serial.Serial}.
There is also one additional required keyword argument.
@param onReceive: A callable taking a byte of received data as its only argument.
This callable can then call C{read} to get additional data if desired.
"""
self._ser = None
self.port = args[0] if len(args) >= 1 else kwargs["port"]
if _isDebug():
log.debug("Opening port %s" % self.port)
try:
self._ser = serial.Serial(*args, **kwargs)
except Exception as e:
if _isDebug():
log.debug("Open failed: %s" % e)
raise
self._origTimeout = self._ser.timeout
# We don't want a timeout while we're waiting for data.
self._setTimeout(None)
super(Serial, self).__init__(self._ser._port_handle, onReceive)
def read(self, size=1) -> bytes:
data = self._ser.read(size)
if _isDebug():
log.debug("Read: %r" % data)
return data
def write(self, data: bytes):
if _isDebug():
log.debug("Write: %r" % data)
self._ser.write(data)
def close(self):
if not self._ser:
return
super(Serial, self).close()
self._ser.close()
def _notifyReceive(self, data: bytes):
# Set the timeout for onReceive in case it does a sync read.
self._setTimeout(self._origTimeout)
super(Serial, self)._notifyReceive(data)
self._setTimeout(None)
def _setTimeout(self, timeout: Optional[int]):
# #6035: pyserial reconfigures all settings of the port when setting a timeout.
# This can cause error 'Cannot configure port, some setting was wrong.'
# Therefore, manually set the timeouts using the Win32 API.
# Adapted from pyserial 3.4.
timeouts = COMMTIMEOUTS()
if timeout is not None:
if timeout == 0:
timeouts.ReadIntervalTimeout = serial.win32.MAXDWORD
else:
timeouts.ReadTotalTimeoutConstant = max(int(timeout * 1000), 1)
if timeout != 0 and self._ser._inter_byte_timeout is not None:
timeouts.ReadIntervalTimeout = max(int(self._ser._inter_byte_timeout * 1000), 1)
if self._ser._write_timeout is not None:
if self._ser._write_timeout == 0:
timeouts.WriteTotalTimeoutConstant = serial.win32.MAXDWORD
else:
timeouts.WriteTotalTimeoutConstant = max(int(self._ser._write_timeout * 1000), 1)
SetCommTimeouts(self._ser._port_handle, ctypes.byref(timeouts))
class HIDP_CAPS (ctypes.Structure):
_fields_ = (
("Usage", USHORT),
("UsagePage", USHORT),
("InputReportByteLength", USHORT),
("OutputReportByteLength", USHORT),
("FeatureReportByteLength", USHORT),
("Reserved", USHORT * 17),
("NumberLinkCollectionNodes", USHORT),
("NumberInputButtonCaps", USHORT),
("NumberInputValueCaps", USHORT),
("NumberInputDataIndices", USHORT),
("NumberOutputButtonCaps", USHORT),
("NumberOutputValueCaps", USHORT),
("NumberOutputDataIndices", USHORT),
("NumberFeatureButtonCaps", USHORT),
("NumberFeatureValueCaps", USHORT),
("NumberFeatureDataIndices", USHORT)
)
class Hid(IoBase):
"""Raw I/O for HID devices.
"""
_featureSize: int
def __init__(self, path: str, onReceive: Callable[[bytes], None], exclusive: bool = True):
"""Constructor.
@param path: The device path.
This can be retrieved using L{hwPortUtils.listHidDevices}.
@param onReceive: A callable taking a received input report as its only argument.
@param exclusive: Whether to block other application's access to this device.
"""
if _isDebug():
log.debug("Opening device %s" % path)
handle = CreateFile(
path,
winKernel.GENERIC_READ | winKernel.GENERIC_WRITE,
0 if exclusive else winKernel.FILE_SHARE_READ|winKernel.FILE_SHARE_WRITE,
None,
winKernel.OPEN_EXISTING,
FILE_FLAG_OVERLAPPED,
None
)
if handle == INVALID_HANDLE_VALUE:
if _isDebug():
log.debug("Open failed: %s" % ctypes.WinError())
raise ctypes.WinError()
pd = ctypes.c_void_p()
if not ctypes.windll.hid.HidD_GetPreparsedData(handle, byref(pd)):
raise ctypes.WinError()
caps = HIDP_CAPS()
ctypes.windll.hid.HidP_GetCaps(pd, byref(caps))
ctypes.windll.hid.HidD_FreePreparsedData(pd)
if _isDebug():
log.debug("Report byte lengths: input %d, output %d, feature %d"
% (caps.InputReportByteLength, caps.OutputReportByteLength,
caps.FeatureReportByteLength))
self._featureSize = caps.FeatureReportByteLength
self._writeSize = caps.OutputReportByteLength
# Reading any less than caps.InputReportByteLength is an error.
super(Hid, self).__init__(handle, onReceive,
onReceiveSize=caps.InputReportByteLength
)
def _prepareWriteBuffer(self, data: bytes) -> Tuple[int, ctypes.c_char_p]:
""" For HID devices, the buffer to be written must match the
OutputReportByteLength fetched from HIDP_CAPS, to ensure this is the case
we create a buffer of that size. We also check that data is not bigger than
the write size, which we do not currently support. If it becomes necessary to
support this, we could split the data and send it several chunks.
"""
# On Windows 7, writing any less than caps.OutputReportByteLength is also an error.
# See also: http://www.onarm.com/forum/20152/
if len(data) > self._writeSize:
log.error(u"Attempting to send a buffer larger than supported.")
raise RuntimeError("Unable to send buffer of: %d", len(data))
return (
self._writeSize,
ctypes.create_string_buffer(data, self._writeSize)
)
def getFeature(self, reportId: bytes) -> bytes:
"""Get a feature report from this device.
@param reportId: The report id.
@return: The report, including the report id.
"""
buf = ctypes.create_string_buffer(reportId, size=self._featureSize)
if not ctypes.windll.hid.HidD_GetFeature(self._file, buf, self._featureSize):
if _isDebug():
log.debug("Get feature %r failed: %s"
% (reportId, ctypes.WinError()))
raise ctypes.WinError()
if _isDebug():
log.debug("Get feature: %r" % buf.raw)
return buf.raw
def setFeature(self, report: bytes) -> None:
"""Send a feature report to this device.
@param report: The report, including its id.
"""
buf = ctypes.create_string_buffer(report, size=len(report))
bufSize = ctypes.sizeof(buf)
if _isDebug():
log.debug("Set feature: %r" % report)
result = ctypes.windll.hid.HidD_SetFeature(
self._file,
buf,
bufSize
)
if not result:
if _isDebug():
log.debug("Set feature failed: %s" % ctypes.WinError())
raise ctypes.WinError()
def setOutputReport(self, report: bytes) -> None:
"""
Write the given report to the device using HidD_SetOutputReport.
This is instead of using the standard WriteFile which may freeze with some USB HID implementations.
@param report: The report, including its id.
"""
buf = ctypes.create_string_buffer(report, size=len(report))
bufSize = ctypes.sizeof(buf)
if _isDebug():
log.debug("Set output report: %r" % report)
result = ctypes.windll.hid.HidD_SetOutputReport(
self._writeFile,
buf,
bufSize
)
if not result:
if _isDebug():
log.debug("Set output report failed: %s" % ctypes.WinError())
raise ctypes.WinError()
def close(self):
super(Hid, self).close()
winKernel.closeHandle(self._file)
self._file = None
class Bulk(IoBase):
"""Raw I/O for bulk USB devices.
This implementation assumes that the used Bulk device has two separate end points for input and output.
"""
def __init__(
self, path: str, epIn: int, epOut: int,
onReceive: Callable[[bytes], None],
onReceiveSize: int = 1
):
"""Constructor.
@param path: The device path.
@param epIn: The endpoint to read data from.
@param epOut: The endpoint to write data to.
@param onReceive: A callable taking a received input report as its only argument.
"""
if _isDebug():
log.debug("Opening device %s" % path)
readPath="{path}\\{endpoint}".format(path=path,endpoint=epIn)
writePath="{path}\\{endpoint}".format(path=path,endpoint=epOut)
readHandle = CreateFile(readPath, winKernel.GENERIC_READ,
0, None, winKernel.OPEN_EXISTING, FILE_FLAG_OVERLAPPED, None)
if readHandle == INVALID_HANDLE_VALUE:
if _isDebug():
log.debug("Open read handle failed: %s" % ctypes.WinError())
raise ctypes.WinError()
writeHandle = CreateFile(writePath, winKernel.GENERIC_WRITE,
0, None, winKernel.OPEN_EXISTING, FILE_FLAG_OVERLAPPED, None)
if writeHandle == INVALID_HANDLE_VALUE:
if _isDebug():
log.debug("Open write handle failed: %s" % ctypes.WinError())
raise ctypes.WinError()
super(Bulk, self).__init__(readHandle, onReceive,
writeFileHandle=writeHandle, onReceiveSize=onReceiveSize)
def close(self):
super(Bulk, self).close()
if hasattr(self, "_file") and self._file is not INVALID_HANDLE_VALUE:
winKernel.closeHandle(self._file)
if hasattr(self, "_writeFile") and self._writeFile is not INVALID_HANDLE_VALUE:
winKernel.closeHandle(self._writeFile)
def boolToByte(arg: bool) -> bytes:
return arg.to_bytes(
length=1,
byteorder=sys.byteorder, # for a single byte big/little endian does not matter.
signed=False # Since this represents length, it makes no sense to send a negative value.
)
def intToByte(arg: int) -> bytes:
""" Convert an int (value < 256) to a single byte bytes object
"""
return arg.to_bytes(
length=1, # Will raise if value overflows, eg arg > 255
byteorder=sys.byteorder, # for a single byte big/little endian does not matter.
signed=False # Since this represents length, it makes no sense to send a negative value.
)
def getByte(arg: bytes, index: int) -> bytes:
""" Return the single byte at index"""
return arg[index:index+1]
|
nilq/baby-python
|
python
|
from finta import TA
import scipy as sp
from scipy.signal import argrelextrema
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import yfinance as yf
from collections import defaultdict
class Loss(object):
def __init__(self,method, sup, res):
self.method = method
self.sup = sup
self.res = res
def __repr__(self):
return f"{self.method} sup:{self.sup} res:{self.res}"
class Kline(object):
def __init__(self):
self.buy = [self.double_bottom, self.hammer_head]
self.sell = []
def double_bottom(self, row):
pass
def hammer_head(self, row):
#when decreasing
pass
class MA(object): #using different combinations
def __init__(self,yahoo_tick):
self.days = [5,10,20,50,80,120,180,200]
self.fibdays = [8,21,34,55,89,144,233]
self.madict = {} #key:{day:pddf} #everything in here is shown in the plot
self.plotma = [] #ma lines to be plotted
self.maxdaylength = max(max(self.days), max(self.fibdays))
self.expected_num = 20
self.shiftAmount = 7 #how many max to look back
self.chosen_sup ={}
self.chosen_res = {}
self.half_hist = yahoo_tick.history(period=f"60d", interval="30m")
self.hour_hist = yahoo_tick.history(period=f"60d", interval="1h")
self.__hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1d")
self.week_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="5d")
self.month_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1mo")
self.get_MA(self.hist)
def refresh(self, choice=None):
if choice=="hour":
self.hour_hist = yahoo_tick.history(period=f"60d", interval="1h")
elif choice == "half":
self.half_hist = yahoo_tick.history(period=f"60d", interval="30m")
elif choice == "week":
self.week_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="5d")
elif choice == "month":
self.month_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1mo")
elif choice == "day":
self.__hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1d")
else:
self.half_hist = yahoo_tick.history(period=f"60d", interval="30m")
self.hour_hist = yahoo_tick.history(period=f"60d", interval="1h")
self.__hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1d")
self.week_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="5d")
self.month_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1mo")
@property
def hist(self):
return self.__hist
@hist.setter
def hist(self, value):
self.__hist = value
self.get_MA(value)
def get_MA(self, hist):
new_cols = [x.lower() for x in hist.columns]
hist.columns = new_cols
for day in self.days:
self.madict[f"SMA_{day}"] = TA.SMA(hist, day)
self.madict[f"SMA_{day}"].name = f"SMA_{day}"
for day in self.days:
self.madict[f"WMA_{day}"] = TA.WMA(hist, day)
for day in self.days:
self.madict[f"HMA_{day}"] = TA.HMA(hist, day)
for day in self.days:
self.madict[f"EMA_{day}"] = TA.EMA(hist, day)
for day in self.fibdays:
self.madict[f"SMA_{day}F"] = TA.SMA(hist, day)
for day in self.fibdays:
self.madict[f"WMA_{day}F"] = TA.WMA(hist, day)
for day in self.fibdays:
self.madict[f"HMA_{day}F"] = TA.HMA(hist, day)
for day in self.fibdays:
self.madict[f"EMA_{day}F"] = TA.EMA(hist, day)
mavalue = self.madict[f"SMA_50"]
sma50 = np.argwhere(np.isnan(mavalue.values)).flatten()
curve_point = np.setxor1d(np.argwhere(np.diff(np.sign(self.madict[f"SMA_20"].values - mavalue.values))).flatten(), sma50)
indexs = mavalue.iloc[curve_point]
dff = self.madict[f"SMA_50"].to_frame('50dfvalue')
dff["inter"] = np.nan
dff.loc[indexs.index,"inter"] = mavalue.loc[indexs.index]
self.overlap_sma2050 = dff["inter"]
#print(dff)
'''
plt.plot(self.madict[f"SMA_50"].index, self.madict[f"SMA_50"] , "-o")
plt.plot(self.madict[f"SMA_20"].index, self.madict[f"SMA_20"] , "-o")
plt.plot(self.overlap_sma2050.index, self.overlap_sma2050, "o")
plt.show()
exit()
'''
def findATR(self, hist):
result= TA.ATR(hist)
return result.dropna()
def findRSI(self, hist):
result = TA.RSI(hist, 14)
return result.dropna()
def update_hists(self, row_data):
#update half, hour, day, week, month
pass
def find_max_min(self, hist): #find all the max points and all the min points
arr_size = len(hist.Bottom)
expected_width = arr_size // self.expected_num // 2
print('expected width of peaks: ', expected_width)
maximaIdxs = sp.signal.find_peaks_cwt(hist.Bottom, np.linspace(2, expected_width, 10))
minimaIdxs = sp.signal.find_peaks_cwt(-1*hist.Bottom, np.linspace(2, expected_width, 10))
hist["critical"] = ""
old_index = hist.index.name
hist = hist.reset_index()
hist.loc[minimaIdxs, "critical"] = "min"
hist.loc[maximaIdxs, "critical"] = "max"
hist = hist.set_index(old_index)
hist = self.findSupandRes(hist)
return hist
def findSupandRes(self, hist):
lossvalue = {}
for method, series in self.madict.items():
series.name = method
old_index = hist.index
if hist.index.name == "Datetime":
hist.index = hist.index.date
hist = pd.merge(hist, series,left_index=True, right_index=True)
hist.index = old_index
min_df = hist[hist["critical"] == "min"]
min_df_shift = pd.DataFrame(index=min_df.index)
for i in range(1, self.shiftAmount+1):
min_df_shift[[f'{method}_{i}', f'Bottom_{i}']] = min_df[[method, "Bottom"]].shift(i)
#min_df_shift[[f'{method}_{-i}', f'Bottom_{-i}']] = min_df[[method, "Bottom"]].shift(-i, fill_value=0)
max_df = hist[hist["critical"] == "max"]
max_df_shift = pd.DataFrame(index=max_df.index)
for i in range(1, self.shiftAmount+1):
max_df_shift[[f'{method}_{i}', f'Bottom_{i}']] = max_df[[method, "Bottom"]].shift(i)
#max_df_shift[[f'{method}_{-i}', f'Bottom_{-i}']] = max_df[[method, "Bottom"]].shift(-i, fill_value=0)
sup_cond, res_cond = self.findcondition(hist, method, min_df_shift, max_df_shift)
hist.loc[min_df.index, f'{method}valuemin'] = sup_cond
hist.loc[max_df.index, f'{method}valuemax'] = res_cond
sup_count = hist.loc[min_df.index, f'{method}valuemin'].sum()
res_count = hist.loc[max_df.index, f'{method}valuemax'].sum()
self.chosen_sup[method] = sup_cond
self.chosen_res[method] = res_cond
lossvalue[method] = Loss(method, sup_count, res_count)
print(lossvalue)
key_min_sup = max(lossvalue, key=lambda k: lossvalue[k].sup)
key_min_res = max(lossvalue, key=lambda k: lossvalue[k].res)
supMethod = lossvalue[key_min_sup].method
resMethod = lossvalue[key_min_res].method
sup_cond = self.chosen_sup[supMethod]
index_v = sup_cond[sup_cond==1].index
plt.plot(sup_cond[sup_cond==1].index, hist.loc[index_v, "close"], "o", markersize=12, label="sup")
res_cond = self.chosen_res[resMethod]
index_v = res_cond[res_cond==1].index
plt.plot(res_cond[res_cond==1].index, hist.loc[index_v, "close"], "o", markersize=12, label="res")
print(f"{lossvalue[key_min_sup].method} sup:{lossvalue[key_min_sup].sup}")
print(f"{lossvalue[key_min_res].method} res:{lossvalue[key_min_res].res}")
if lossvalue[key_min_sup].sup != 0:
self.plotma.append(lossvalue[key_min_sup].method)
if lossvalue[key_min_res].res != 0:
self.plotma.append(lossvalue[key_min_res].method)
#self.plotma.append("HMA_80")
return hist
def findcondition(self, hist, method, min_df, max_df):
col_names = min_df.columns
min_df["all_met"] = min_df.apply(lambda row : self.filter_condition(row, "min", col_names), axis = 1)
max_df["all_met"] = max_df.apply(lambda row : self.filter_condition(row, "max", col_names), axis = 1)
sup_cond = (hist.loc[min_df.index, method] <= hist.loc[min_df.index, 'Bottom']) & \
(hist.loc[min_df.index, method] >= hist.loc[min_df.index, 'low'])& \
min_df["all_met"]
#(hist.loc[min_df.index, method] >= hist.loc[min_df.index, 'Bottom'] - hist.loc[min_df.index, 'Bottom']*self.threshold) & \
res_cond = (hist.loc[max_df.index, method] >= hist.loc[max_df.index, 'Bottom']) & \
(hist.loc[max_df.index, method] <= hist.loc[max_df.index, 'high']) & \
max_df["all_met"]
#(hist.loc[max_df.index, method] <= hist.loc[max_df.index, 'Bottom'] + hist.loc[max_df.index, 'Bottom']*self.threshold) & \
return sup_cond.astype(int), res_cond.astype(int)
def filter_condition(self, row, target, col_names):
results = []
if target == "min":
for i, value in enumerate(row):
if i % 2 == 0:
results.append(value <= row[col_names[i+1]])
if target == "max":
for i, value in enumerate(row):
if i % 2 == 0:
results.append(value >= row[col_names[i+1]])
return np.all(results)
if __name__ == "__main__":
smoothing = 3
window = 10
yahoo_tick = yf.Ticker("SENS")
myMA = MA(yahoo_tick)
ticks = ["SENS", "GIK", "NNDM", "SPY"]
ema_list = [5]
window_list = [5]
results = myMA.screener(ticks, ema_list, window_list, plot=True, results=True)
print(results)
plt.show()
#minmax = myMA.get_max_min(smoothing, window)
#print(minmax)
|
nilq/baby-python
|
python
|
# This file is Copyright (c) 2010 by the GPSD project
# BSD terms apply: see the file COPYING in the distribution root for details.
#
# Creates build/lib.linux-${arch}-${pyvers}/gpspacket.so,
# where ${arch} is an architecture and ${pyvers} is a Python version.
from distutils.core import setup, Extension
import os
import sys
# For VPATH builds, this script must be run from $(srcdir) with the
# abs_builddir environment variable set to the location of the build
# directory. This is necessary because Python's distutils package
# does not have built-in support for VPATH builds.
# These dependencies are enforced here and not in the Makefile to make
# it easier to build the Python parts without building everything else
# (the user can run 'python setup.py' without having to run 'make').
needed_files = ['gpsd.h', 'packet_names.h']
created_files = []
manpages = []
try:
where = sys.argv.index('--mangenerator')
# Doesn't matter what it is, just that we have one
if sys.argv[where+1]:
manpages=[('share/man/man1', ['gpscat.1', 'gpsfake.1','gpsprof.1',
'xgps.1', 'xgpsspeed.1'])]
print("Installing manual pages, generator is %s" %( sys.argv[where+1]))
sys.argv = sys.argv[:where] + sys.argv[where+2:]
except ValueError:
pass
if not manpages:
print("No XML processor, omitting manual-page installation.")
MAKE = ("MAKE" in os.environ) and os.environ["MAKE"] or "make"
if not 'clean' in sys.argv:
abs_builddir = ("abs_builddir" in os.environ) and os.environ["abs_builddir"] or ""
if not os.path.exists(os.path.join(abs_builddir, 'gpsd_config.h')):
sys.stderr.write('\nPlease run configure first!\n')
sys.exit(1)
cdcmd = abs_builddir and ("cd '" + abs_builddir + "' && ") or ""
for f_name in needed_files:
# TODO: Shouldn't make be run unconditionally in case a
# dependency of f_name has been updated?
if not os.path.exists(os.path.join(abs_builddir, f_name)):
cmd = cdcmd + MAKE + " '" + f_name + "'"
print(cmd)
make_out = os.popen(cmd)
print(make_out.read())
if make_out.close():
sys.exit(1)
created_files.append(f_name)
gpspacket_sources = ["gpspacket.c", "packet.c", "isgps.c",
"driver_rtcm2.c", "strl.c", "hex.c", "crc24q.c"]
include_dirs = [ os.path.realpath(os.path.dirname(__file__)) ]
version_out = os.popen(MAKE + " -s version")
version = version_out.read()
print(version)
if version_out.close():
sys.exit(1)
version = version.split('\n')[-2]
version = version.strip()
setup( name="gps",
version=version,
description='Python libraries for the gpsd service daemon',
url="http://gpsd.berlios.de/",
author='the GPSD project',
author_email="gpsd-dev@lists.berlios.de",
license="BSD",
ext_modules=[
Extension("gps.packet", gpspacket_sources, include_dirs=include_dirs),
Extension("gps.clienthelpers", ["gpsclient.c", "geoid.c", "gpsdclient.c", "strl.c"], include_dirs=include_dirs)
],
packages = ['gps'],
scripts = ['gpscat','gpsfake','gpsprof', 'xgps', 'xgpsspeed'],
data_files= manpages
)
|
nilq/baby-python
|
python
|
"""
The :class:`Signature` object and associated functionality. This
provides a way to represent rich callable objects and type check
calls.
"""
from collections import defaultdict
from .error_code import ErrorCode
from .stacked_scopes import (
AndConstraint,
Composite,
Constraint,
ConstraintType,
NULL_CONSTRAINT,
AbstractConstraint,
Varname,
)
from .value import (
AnnotatedValue,
AsyncTaskIncompleteValue,
CanAssignContext,
GenericValue,
HasAttrExtension,
HasAttrGuardExtension,
KnownValue,
ParameterTypeGuardExtension,
SequenceIncompleteValue,
DictIncompleteValue,
TypeGuardExtension,
TypeVarValue,
TypedDictValue,
UNRESOLVED_VALUE,
Value,
TypeVarMap,
CanAssign,
CanAssignError,
extract_typevars,
stringify_object,
unify_typevar_maps,
unite_values,
)
import ast
import asynq
import collections.abc
from dataclasses import dataclass, field
from functools import reduce
from types import MethodType, FunctionType
import inspect
import qcore
from qcore.helpers import safe_str
from typing import (
Any,
Iterable,
NamedTuple,
Optional,
ClassVar,
Union,
Callable,
Dict,
List,
Set,
TypeVar,
Tuple,
TYPE_CHECKING,
)
from typing_extensions import Literal
if TYPE_CHECKING:
from .name_check_visitor import NameCheckVisitor
EMPTY = inspect.Parameter.empty
ARGS = qcore.MarkerObject("*args")
KWARGS = qcore.MarkerObject("**kwargs")
# Representation of a single argument to a call. Second member is
# None for positional args, str for keyword args, ARGS for *args, KWARGS
# for **kwargs.
Argument = Tuple[Composite, Union[None, str, Literal[ARGS], Literal[KWARGS]]]
class ImplReturn(NamedTuple):
"""Return value of :term:`impl` functions.
These functions return either a single :class:`pyanalyze.value.Value`
object, indicating what the function returns, or an instance of this class.
"""
return_value: Value
"""The return value of the function."""
constraint: AbstractConstraint = NULL_CONSTRAINT
"""A :class:`pyanalyze.stacked_scopes.Constraint` indicating things that are true
if the function returns a truthy value."""
no_return_unless: AbstractConstraint = NULL_CONSTRAINT
"""A :class:`pyanalyze.stacked_scopes.Constraint` indicating things that are true
unless the function does not return."""
@dataclass
class CallContext:
"""The context passed to an :term:`impl` function."""
vars: Dict[str, Value]
"""Dictionary of variable names passed to the function."""
visitor: "NameCheckVisitor"
"""Using the visitor can allow various kinds of advanced logic
in impl functions."""
bound_args: inspect.BoundArguments
node: ast.AST
"""AST node corresponding to the function call. Useful for
showing errors."""
def ast_for_arg(self, arg: str) -> Optional[ast.AST]:
composite = self.composite_for_arg(arg)
if composite is not None:
return composite.node
return None
def varname_for_arg(self, arg: str) -> Optional[Varname]:
"""Return a :term:`varname` corresponding to the given function argument.
This is useful for creating a :class:`pyanalyze.stacked_scopes.Constraint`
referencing the argument.
"""
composite = self.composite_for_arg(arg)
if composite is not None:
return composite.varname
return None
def composite_for_arg(self, arg: str) -> Optional[Composite]:
composite = self.bound_args.arguments.get(arg)
if isinstance(composite, Composite):
return composite
return None
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.incompatible_call,
*,
arg: Optional[str] = None,
node: Optional[ast.AST] = None,
detail: Optional[str] = None,
) -> None:
"""Show an error.
If the `arg` parameter is given, we attempt to find the
AST for that argument to the function and point the error
to it.
"""
node = None
if arg is not None:
node = self.ast_for_arg(arg)
if node is None:
node = self.node
self.visitor.show_error(node, message, error_code=error_code, detail=detail)
Impl = Callable[[CallContext], Union[Value, ImplReturn]]
class SigParameter(inspect.Parameter):
"""Wrapper around :class:`inspect.Parameter` that stores annotations
as :class:`pyanalyze.value.Value` objects."""
__slots__ = ()
def __init__(
self,
name: str,
kind: inspect._ParameterKind = inspect.Parameter.POSITIONAL_OR_KEYWORD,
*,
default: Union[None, Value, Literal[EMPTY]] = None,
annotation: Union[None, Value, Literal[EMPTY]] = None,
) -> None:
if default is None:
default_composite = EMPTY
elif isinstance(default, Value):
default_composite = Composite(default, None, None)
else:
default_composite = default
if annotation is None:
annotation = EMPTY
super().__init__(name, kind, default=default_composite, annotation=annotation)
def substitute_typevars(self, typevars: TypeVarMap) -> "SigParameter":
if self._annotation is EMPTY:
annotation = self._annotation
else:
annotation = self._annotation.substitute_typevars(typevars)
return SigParameter(
name=self._name,
kind=self._kind,
default=self._default,
annotation=annotation,
)
def get_annotation(self) -> Value:
if self.annotation is EMPTY:
return UNRESOLVED_VALUE
return self.annotation
def __str__(self) -> str:
# Adapted from Parameter.__str__
kind = self.kind
formatted = self._name
if self._annotation is not EMPTY:
formatted = f"{formatted}: {self._annotation}"
if self._default is not EMPTY:
if self._annotation is not EMPTY:
formatted = f"{formatted} = {self._default.value}"
else:
formatted = f"{formatted}={self._default.value}"
if kind is SigParameter.VAR_POSITIONAL:
formatted = "*" + formatted
elif kind is SigParameter.VAR_KEYWORD:
formatted = "**" + formatted
return formatted
@dataclass
class Signature:
"""Represents the signature of a Python callable.
This is used to type check function calls and it powers the
:class:`pyanalyze.value.CallableValue` class.
"""
_return_key: ClassVar[str] = "%return"
signature: inspect.Signature
"""The underlying :class:`inspect.Signature`, storing the parameters
and the return annotation."""
impl: Optional[Impl] = field(default=None, compare=False)
""":term:`impl` function for this signature."""
callable: Optional[Callable[..., Any]] = field(default=None, compare=False)
"""The callable that this signature represents."""
is_asynq: bool = False
"""Whether this signature represents an asynq function."""
has_return_annotation: bool = True
is_ellipsis_args: bool = False
"""Whether this signature represents a ``Callable[..., T]`` callable. Such
a callable is compatible with any other callable with a compatible return type."""
allow_call: bool = False
"""Whether type checking can call the actual function to retrieve a precise return value."""
typevars_of_params: Dict[str, List["TypeVar"]] = field(
init=False, default_factory=dict, repr=False, compare=False
)
all_typevars: Set["TypeVar"] = field(
init=False, default_factory=set, repr=False, compare=False
)
def __post_init__(self) -> None:
for param_name, param in self.signature.parameters.items():
if param.annotation is EMPTY:
continue
typevars = list(extract_typevars(param.annotation))
if typevars:
self.typevars_of_params[param_name] = typevars
if self.signature.return_annotation is not EMPTY:
return_typevars = list(extract_typevars(self.signature.return_annotation))
if return_typevars:
self.typevars_of_params[self._return_key] = return_typevars
self.all_typevars = {
typevar
for tv_list in self.typevars_of_params.values()
for typevar in tv_list
}
def _check_param_type_compatibility(
self,
param: SigParameter,
var_value: Value,
visitor: "NameCheckVisitor",
node: ast.AST,
typevar_map: TypeVarMap,
) -> bool:
if param.annotation is not EMPTY and not (
isinstance(param.default, Composite) and var_value is param.default.value
):
if typevar_map:
param_typ = param.annotation.substitute_typevars(typevar_map)
else:
param_typ = param.annotation
tv_map = param_typ.can_assign(var_value, visitor)
if isinstance(tv_map, CanAssignError):
visitor.show_error(
node,
f"Incompatible argument type for {param.name}: expected {param_typ}"
f" but got {var_value}",
ErrorCode.incompatible_argument,
detail=str(tv_map),
)
return False
return True
def _translate_bound_arg(self, argument: Any) -> Value:
if argument is EMPTY:
return UNRESOLVED_VALUE
elif isinstance(argument, Composite):
return argument.value
elif isinstance(argument, tuple):
return SequenceIncompleteValue(
tuple, [composite.value for composite in argument]
)
elif isinstance(argument, dict):
return DictIncompleteValue(
[
(KnownValue(key), composite.value)
for key, composite in argument.items()
]
)
else:
raise TypeError(repr(argument))
def _apply_annotated_constraints(
self, raw_return: Union[Value, ImplReturn], bound_args: inspect.BoundArguments
) -> ImplReturn:
if isinstance(raw_return, Value):
ret = ImplReturn(raw_return)
else:
ret = raw_return
constraints = []
if ret.constraint is not NULL_CONSTRAINT:
constraints.append(ret.constraint)
if isinstance(ret.return_value, AnnotatedValue):
for guard in ret.return_value.get_metadata_of_type(
ParameterTypeGuardExtension
):
if guard.varname in bound_args.arguments:
composite = bound_args.arguments[guard.varname]
if (
isinstance(composite, Composite)
and composite.varname is not None
):
constraint = Constraint(
composite.varname,
ConstraintType.is_value_object,
True,
guard.guarded_type,
)
constraints.append(constraint)
for guard in ret.return_value.get_metadata_of_type(TypeGuardExtension):
# This might miss some cases where we should use the second argument instead. We'll
# have to come up with additional heuristics if that comes up.
if isinstance(self.callable, MethodType) or (
isinstance(self.callable, FunctionType)
and self.callable.__name__ != self.callable.__qualname__
):
index = 1
else:
index = 0
composite = bound_args.args[index]
if isinstance(composite, Composite) and composite.varname is not None:
constraint = Constraint(
composite.varname,
ConstraintType.is_value_object,
True,
guard.guarded_type,
)
constraints.append(constraint)
for guard in ret.return_value.get_metadata_of_type(HasAttrGuardExtension):
if guard.varname in bound_args.arguments:
composite = bound_args.arguments[guard.varname]
if (
isinstance(composite, Composite)
and composite.varname is not None
):
constraint = Constraint(
composite.varname,
ConstraintType.add_annotation,
True,
HasAttrExtension(
guard.attribute_name, guard.attribute_type
),
)
constraints.append(constraint)
if constraints:
constraint = reduce(AndConstraint, constraints)
else:
constraint = NULL_CONSTRAINT
return ImplReturn(ret.return_value, constraint, ret.no_return_unless)
def check_call(
self, args: Iterable[Argument], visitor: "NameCheckVisitor", node: ast.AST
) -> ImplReturn:
"""Type check a call to this Signature with the given arguments.
This may call the :term:`impl` function or the underlying callable,
but normally just uses :meth:`inspect.Signature.bind`.
"""
call_args = []
call_kwargs = {}
for composite, label in args:
if label is None:
call_args.append(composite)
elif isinstance(label, str):
call_kwargs[label] = composite
elif label is ARGS or label is KWARGS:
# TODO handle these:
# - type check that they are iterables/mappings
# - if it's a KnownValue or SequenceIncompleteValue, just add to call_args
# - else do something smart to still typecheck the call
return ImplReturn(UNRESOLVED_VALUE)
if self.is_ellipsis_args:
if self.allow_call:
runtime_return = self._maybe_perform_call(
call_args, call_kwargs, visitor, node
)
if runtime_return is not None:
return ImplReturn(runtime_return)
return_value = self.signature.return_annotation
if return_value is EMPTY:
return ImplReturn(UNRESOLVED_VALUE)
return ImplReturn(return_value)
try:
bound_args = self.signature.bind(*call_args, **call_kwargs)
except TypeError as e:
if self.callable is not None:
message = f"In call to {stringify_object(self.callable)}: {e}"
else:
message = str(e)
visitor.show_error(node, message, ErrorCode.incompatible_call)
return ImplReturn(UNRESOLVED_VALUE)
bound_args.apply_defaults()
variables = {
name: self._translate_bound_arg(value)
for name, value in bound_args.arguments.items()
}
return_value = self.signature.return_annotation
typevar_values: Dict[TypeVar, Value] = {}
if self.all_typevars:
tv_possible_values: Dict[TypeVar, List[Value]] = defaultdict(list)
for param_name in self.typevars_of_params:
if param_name == self._return_key:
continue
var_value = variables[param_name]
param = self.signature.parameters[param_name]
if param.annotation is EMPTY:
continue
tv_map = param.annotation.can_assign(var_value, visitor)
if not isinstance(tv_map, CanAssignError):
# For now, the first assignment wins.
for typevar, value in tv_map.items():
tv_possible_values[typevar].append(value)
typevar_values = {
typevar: unite_values(
*tv_possible_values.get(typevar, [UNRESOLVED_VALUE])
)
for typevar in self.all_typevars
}
if self._return_key in self.typevars_of_params:
return_value = return_value.substitute_typevars(typevar_values)
had_error = False
for name, var_value in variables.items():
param = self.signature.parameters[name]
if not self._check_param_type_compatibility(
param, var_value, visitor, node, typevar_values
):
had_error = True
# don't call the implementation function if we had an error, so that
# the implementation function doesn't have to worry about basic
# type checking
if not had_error and self.impl is not None:
ctx = CallContext(
vars=variables, visitor=visitor, bound_args=bound_args, node=node
)
return_value = self.impl(ctx)
if self.allow_call:
runtime_return = self._maybe_perform_call(
call_args, call_kwargs, visitor, node
)
if runtime_return is not None:
if isinstance(return_value, ImplReturn):
return_value = ImplReturn(
runtime_return,
return_value.constraint,
return_value.no_return_unless,
)
else:
return_value = runtime_return
if return_value is EMPTY:
return ImplReturn(UNRESOLVED_VALUE)
else:
return self._apply_annotated_constraints(return_value, bound_args)
def _maybe_perform_call(
self,
call_args: List[Composite],
call_kwargs: Dict[str, Composite],
visitor: "NameCheckVisitor",
node: ast.AST,
) -> Optional[Value]:
if self.callable is None:
return None
args = []
for composite in call_args:
if isinstance(composite.value, KnownValue):
args.append(composite.value.val)
else:
return None
kwargs = {}
for key, composite in call_kwargs.items():
if isinstance(composite.value, KnownValue):
kwargs[key] = composite.value.val
else:
return None
try:
value = self.callable(*args, **kwargs)
except Exception as e:
message = f"Error calling {self}: {safe_str(e)}"
visitor._show_error_if_checking(node, message, ErrorCode.incompatible_call)
return None
else:
return KnownValue(value)
def can_assign(self, other: "Signature", ctx: CanAssignContext) -> CanAssign:
"""Equivalent of :meth:`pyanalyze.value.Value.can_assign`. Checks
whether another ``Signature`` is compatible with this ``Signature``.
"""
if self.is_asynq and not other.is_asynq:
return CanAssignError("callable is not asynq")
their_return = other.signature.return_annotation
my_return = self.signature.return_annotation
return_tv_map = my_return.can_assign(their_return, ctx)
if isinstance(return_tv_map, CanAssignError):
return CanAssignError(
"return annotation is not compatible", [return_tv_map]
)
if self.is_ellipsis_args or other.is_ellipsis_args:
return {}
tv_maps = [return_tv_map]
their_params = list(other.signature.parameters.values())
their_args = other.get_param_of_kind(SigParameter.VAR_POSITIONAL)
if their_args is not None:
their_args_index = their_params.index(their_args)
args_annotation = their_args.get_annotation()
else:
their_args_index = -1
args_annotation = None
their_kwargs = other.get_param_of_kind(SigParameter.VAR_KEYWORD)
if their_kwargs is not None:
kwargs_annotation = their_kwargs.get_annotation()
else:
kwargs_annotation = None
consumed_positional = set()
consumed_keyword = set()
for i, my_param in enumerate(self.signature.parameters.values()):
my_annotation = my_param.get_annotation()
if my_param.kind is SigParameter.POSITIONAL_ONLY:
if i < len(their_params) and their_params[i].kind in (
SigParameter.POSITIONAL_ONLY,
SigParameter.POSITIONAL_OR_KEYWORD,
):
if (
my_param.default is not EMPTY
and their_params[i].default is EMPTY
):
return CanAssignError(
f"positional-only param {my_param.name!r} has no default"
)
their_annotation = their_params[i].get_annotation()
tv_map = their_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of positional-only parameter {my_param.name!r} is"
" incompatible",
[tv_map],
)
tv_maps.append(tv_map)
consumed_positional.add(their_params[i].name)
elif args_annotation is not None:
new_tv_maps = can_assign_var_positional(
my_param, args_annotation, i - their_args_index, ctx
)
if isinstance(new_tv_maps, CanAssignError):
return new_tv_maps
tv_maps += new_tv_maps
else:
return CanAssignError(
f"positional-only parameter {i} is not accepted"
)
elif my_param.kind is SigParameter.POSITIONAL_OR_KEYWORD:
if (
i < len(their_params)
and their_params[i].kind is SigParameter.POSITIONAL_OR_KEYWORD
):
if my_param.name != their_params[i].name:
return CanAssignError(
f"param name {their_params[i].name!r} does not match"
f" {my_param.name!r}"
)
if (
my_param.default is not EMPTY
and their_params[i].default is EMPTY
):
return CanAssignError(f"param {my_param.name!r} has no default")
their_annotation = their_params[i].get_annotation()
tv_map = their_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
consumed_positional.add(their_params[i].name)
consumed_keyword.add(their_params[i].name)
elif (
i < len(their_params)
and their_params[i].kind is SigParameter.POSITIONAL_ONLY
):
return CanAssignError(
f"parameter {my_param.name!r} is not accepted as a keyword"
" argument"
)
elif args_annotation is not None and kwargs_annotation is not None:
new_tv_maps = can_assign_var_positional(
my_param, args_annotation, i - their_args_index, ctx
)
if isinstance(new_tv_maps, CanAssignError):
return new_tv_maps
tv_maps += new_tv_maps
new_tv_maps = can_assign_var_keyword(
my_param, kwargs_annotation, ctx
)
if isinstance(new_tv_maps, CanAssignError):
return new_tv_maps
tv_maps += new_tv_maps
else:
return CanAssignError(
f"parameter {my_param.name!r} is not accepted"
)
elif my_param.kind is SigParameter.KEYWORD_ONLY:
their_param = other.signature.parameters.get(my_param.name)
if their_param is not None and their_param.kind in (
SigParameter.POSITIONAL_OR_KEYWORD,
SigParameter.KEYWORD_ONLY,
):
if my_param.default is not EMPTY and their_param.default is EMPTY:
return CanAssignError(
f"keyword-only param {my_param.name!r} has no default"
)
their_annotation = their_param.get_annotation()
tv_map = their_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
consumed_keyword.add(their_param.name)
elif kwargs_annotation is not None:
new_tv_maps = can_assign_var_keyword(
my_param, kwargs_annotation, ctx
)
if isinstance(new_tv_maps, CanAssignError):
return new_tv_maps
tv_maps += new_tv_maps
else:
return CanAssignError(
f"parameter {my_param.name!r} is not accepted"
)
elif my_param.kind is SigParameter.VAR_POSITIONAL:
if args_annotation is None:
return CanAssignError("*args are not accepted")
tv_map = args_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError("type of *args is incompatible", [tv_map])
tv_maps.append(tv_map)
extra_positional = [
param
for param in their_params
if param.name not in consumed_positional
and param.kind
in (
SigParameter.POSITIONAL_ONLY,
SigParameter.POSITIONAL_OR_KEYWORD,
)
]
for extra_param in extra_positional:
tv_map = extra_param.get_annotation().can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of param {extra_param.name!r} is incompatible with "
"*args type",
[tv_map],
)
tv_maps.append(tv_map)
elif my_param.kind is SigParameter.VAR_KEYWORD:
if kwargs_annotation is None:
return CanAssignError("**kwargs are not accepted")
tv_map = kwargs_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError("type of **kwargs is incompatible", [tv_map])
tv_maps.append(tv_map)
extra_keyword = [
param
for param in their_params
if param.name not in consumed_keyword
and param.kind
in (SigParameter.KEYWORD_ONLY, SigParameter.POSITIONAL_OR_KEYWORD)
]
for extra_param in extra_keyword:
tv_map = extra_param.get_annotation().can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of param {extra_param.name!r} is incompatible with "
"**kwargs type",
[tv_map],
)
tv_maps.append(tv_map)
return unify_typevar_maps(tv_maps)
def get_param_of_kind(self, kind: inspect._ParameterKind) -> Optional[SigParameter]:
for param in self.signature.parameters.values():
if param.kind is kind:
return param
return None
def substitute_typevars(self, typevars: TypeVarMap) -> "Signature":
return Signature(
signature=inspect.Signature(
[
param.substitute_typevars(typevars)
for param in self.signature.parameters.values()
],
return_annotation=self.signature.return_annotation.substitute_typevars(
typevars
),
),
impl=self.impl,
callable=self.callable,
is_asynq=self.is_asynq,
has_return_annotation=self.has_return_annotation,
is_ellipsis_args=self.is_ellipsis_args,
allow_call=self.allow_call,
)
def walk_values(self) -> Iterable[Value]:
yield from self.signature.return_annotation.walk_values()
for param in self.signature.parameters.values():
if param.annotation is not EMPTY:
yield from param.annotation.walk_values()
def get_asynq_value(self) -> "Signature":
"""Return the :class:`Signature` for the `.asynq` attribute of an
:class:`pyanalyze.extensions.AsynqCallable`."""
if not self.is_asynq:
raise TypeError("get_asynq_value() is only supported for AsynqCallable")
return_annotation = AsyncTaskIncompleteValue(
asynq.AsyncTask, self.signature.return_annotation
)
return Signature.make(
self.signature.parameters.values(),
return_annotation,
impl=self.impl,
callable=self.callable,
has_return_annotation=self.has_return_annotation,
is_ellipsis_args=self.is_ellipsis_args,
is_asynq=False,
allow_call=self.allow_call,
)
@classmethod
def make(
cls,
parameters: Iterable[SigParameter],
return_annotation: Optional[Value] = None,
*,
impl: Optional[Impl] = None,
callable: Optional[object] = None,
has_return_annotation: bool = True,
is_ellipsis_args: bool = False,
is_asynq: bool = False,
allow_call: bool = False,
) -> "Signature":
"""Create a :class:`Signature` object.
This is more convenient to use than the constructor
because it abstracts away the creation of the underlying
:class:`inspect.Signature`.
"""
if return_annotation is None:
return_annotation = UNRESOLVED_VALUE
has_return_annotation = False
return cls(
signature=inspect.Signature(
parameters, return_annotation=return_annotation
),
impl=impl,
callable=callable,
has_return_annotation=has_return_annotation,
is_ellipsis_args=is_ellipsis_args,
is_asynq=is_asynq,
allow_call=allow_call,
)
def __str__(self) -> str:
param_str = ", ".join(self._render_parameters())
asynq_str = "@asynq " if self.is_asynq else ""
rendered = f"{asynq_str}({param_str})"
if self.signature.return_annotation is not EMPTY:
rendered += f" -> {self.signature.return_annotation}"
return rendered
def _render_parameters(self) -> Iterable[str]:
# Adapted from Signature's own __str__
if self.is_ellipsis_args:
yield "..."
return
render_pos_only_separator = False
render_kw_only_separator = True
for param in self.signature.parameters.values():
formatted = str(param)
kind = param.kind
if kind == SigParameter.POSITIONAL_ONLY:
render_pos_only_separator = True
elif render_pos_only_separator:
yield "/"
render_pos_only_separator = False
if kind == SigParameter.VAR_POSITIONAL:
render_kw_only_separator = False
elif kind == SigParameter.KEYWORD_ONLY and render_kw_only_separator:
yield "*"
render_kw_only_separator = False
yield formatted
if render_pos_only_separator:
yield "/"
# TODO: do we need these?
def has_return_value(self) -> bool:
return self.has_return_annotation
@property
def return_value(self) -> Value:
return self.signature.return_annotation
ANY_SIGNATURE = Signature.make(
[], UNRESOLVED_VALUE, is_ellipsis_args=True, is_asynq=True
)
""":class:`Signature` that should be compatible with any other
:class:`Signature`."""
@dataclass
class BoundMethodSignature:
"""Signature for a method bound to a particular value."""
signature: Signature
self_composite: Composite
return_override: Optional[Value] = None
def check_call(
self, args: Iterable[Argument], visitor: "NameCheckVisitor", node: ast.AST
) -> ImplReturn:
ret = self.signature.check_call(
[(self.self_composite, None), *args], visitor, node
)
if self.return_override is not None and not self.signature.has_return_value():
return ImplReturn(
self.return_override, ret.constraint, ret.no_return_unless
)
return ret
def get_signature(self, *, preserve_impl: bool = False) -> Optional[Signature]:
if self.signature.is_ellipsis_args:
return ANY_SIGNATURE
params = list(self.signature.signature.parameters.values())
if not params or params[0].kind not in (
SigParameter.POSITIONAL_ONLY,
SigParameter.POSITIONAL_OR_KEYWORD,
):
return None
return Signature(
signature=inspect.Signature(
params[1:], return_annotation=self.return_value
),
# We don't carry over the implementation function by default, because it
# may not work when passed different arguments.
impl=self.signature.impl if preserve_impl else None,
callable=self.signature.callable,
is_asynq=self.signature.is_asynq,
has_return_annotation=self.has_return_value(),
is_ellipsis_args=self.signature.is_ellipsis_args,
allow_call=self.signature.allow_call,
)
def has_return_value(self) -> bool:
if self.return_override is not None:
return True
return self.signature.has_return_value()
@property
def return_value(self) -> Value:
if self.signature.has_return_value():
return self.signature.return_value
if self.return_override is not None:
return self.return_override
return UNRESOLVED_VALUE
def substitute_typevars(self, typevars: TypeVarMap) -> "BoundMethodSignature":
return BoundMethodSignature(
self.signature.substitute_typevars(typevars),
self.self_composite.substitute_typevars(typevars),
self.return_override.substitute_typevars(typevars)
if self.return_override is not None
else None,
)
@dataclass
class PropertyArgSpec:
"""Pseudo-argspec for properties."""
obj: object
return_value: Value = UNRESOLVED_VALUE
def check_call(
self, args: Iterable[Argument], visitor: "NameCheckVisitor", node: ast.AST
) -> ImplReturn:
raise TypeError("property object is not callable")
def has_return_value(self) -> bool:
return self.return_value is not UNRESOLVED_VALUE
def substitute_typevars(self, typevars: TypeVarMap) -> "PropertyArgSpec":
return PropertyArgSpec(
self.obj, self.return_value.substitute_typevars(typevars)
)
MaybeSignature = Union[None, Signature, BoundMethodSignature, PropertyArgSpec]
def make_bound_method(
argspec: MaybeSignature,
self_composite: Composite,
return_override: Optional[Value] = None,
) -> Optional[BoundMethodSignature]:
if argspec is None:
return None
if isinstance(argspec, Signature):
return BoundMethodSignature(argspec, self_composite, return_override)
elif isinstance(argspec, BoundMethodSignature):
if return_override is None:
return_override = argspec.return_override
return BoundMethodSignature(argspec.signature, self_composite, return_override)
else:
assert False, f"invalid argspec {argspec}"
T = TypeVar("T")
IterableValue = GenericValue(collections.abc.Iterable, [TypeVarValue(T)])
K = TypeVar("K")
V = TypeVar("V")
MappingValue = GenericValue(collections.abc.Mapping, [TypeVarValue(K), TypeVarValue(V)])
def can_assign_var_positional(
my_param: SigParameter, args_annotation: Value, idx: int, ctx: CanAssignContext
) -> Union[List[TypeVarMap], CanAssignError]:
tv_maps = []
my_annotation = my_param.get_annotation()
if isinstance(args_annotation, SequenceIncompleteValue):
length = len(args_annotation.members)
if idx >= length:
return CanAssignError(
f"parameter {my_param.name!r} is not accepted; {args_annotation} only"
f" accepts {length} values"
)
their_annotation = args_annotation.members[idx]
tv_map = their_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible: *args[{idx}]"
" type is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
else:
tv_map = IterableValue.can_assign(args_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"{args_annotation} is not an iterable type", [tv_map]
)
iterable_arg = tv_map.get(T, UNRESOLVED_VALUE)
tv_map = iterable_arg.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible: "
"*args type is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
return tv_maps
def can_assign_var_keyword(
my_param: SigParameter, kwargs_annotation: Value, ctx: CanAssignContext
) -> Union[List[TypeVarMap], CanAssignError]:
my_annotation = my_param.get_annotation()
tv_maps = []
if isinstance(kwargs_annotation, TypedDictValue):
if my_param.name not in kwargs_annotation.items:
return CanAssignError(
f"parameter {my_param.name!r} is not accepted by {kwargs_annotation}"
)
their_annotation = kwargs_annotation.items[my_param.name]
tv_map = their_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible:"
f" *kwargs[{my_param.name!r}] type is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
else:
mapping_tv_map = MappingValue.can_assign(kwargs_annotation, ctx)
if isinstance(mapping_tv_map, CanAssignError):
return CanAssignError(
f"{kwargs_annotation} is not a mapping type", [mapping_tv_map]
)
key_arg = mapping_tv_map.get(K, UNRESOLVED_VALUE)
tv_map = key_arg.can_assign(KnownValue(my_param.name), ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"parameter {my_param.name!r} is not accepted by **kwargs type",
[tv_map],
)
tv_maps.append(tv_map)
value_arg = mapping_tv_map.get(V, UNRESOLVED_VALUE)
tv_map = value_arg.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible: **kwargs type"
" is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
return tv_maps
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
"""PhoneHome server listening to requests from deployed instances to test E2E network connectivity.
Usage:
{prog}
Environment:
SANITY_CHECKS_SETTINGS (Optional) Path to settings file
TEST_PHONEHOME_LOGGING (Optional) Path to logging configuration file
TEST_PHONEHOME_ENDPOINT (Optional) PhoneHome service endpoint
Files:
etc/settings.json Default settings file
etc/logging_phonehome.conf Default logging configuration file
"""
from commons.constants import PROPERTIES_CONFIG_TEST, PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT, \
PHONEHOME_DBUS_OBJECT_PATH, PHONEHOME_DBUS_OBJECT_METADATA_PATH, PHONEHOME_TX_ID_HEADER, \
DEFAULT_PHONEHOME_LOGGING_CONF, DEFAULT_SETTINGS_FILE
from os import environ
from dbus_phonehome_service import DbusPhoneHomeServer
from cherrypy import _cperror
import cherrypy
import httplib
import logging
import json
import sys
import urlparse
import logging.config
import os.path
import uuid
# Global DBus server instance
dbus_server = None
# Global logger
logger = None
class PhoneHome:
exposed = True
@cherrypy.tools.accept(media='text/plain')
def POST(self):
"""Manages a POST request. Phonehome service.
Emits a new DBus signal to the PhoneHome object published.
The request always will return 200OK if some content is received. This content will be emitted in the signal.
:return: None
"""
global dbus_server
content_length = int(cherrypy.request.headers['Content-Length'])
content = cherrypy.request.body.read(content_length)
logger.info("%s: %s - POST: ", PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id)
path = cherrypy.request.path_info
# Get data from body
if content:
if path == PHONEHOME_DBUS_OBJECT_METADATA_PATH:
if "Hostname" in cherrypy.request.headers:
hostname = cherrypy.request.headers['Hostname']
dbus_server.logdebug("{0}: {1} - Sending signal to hostname: {2}".format(
PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id, hostname))
dbus_server.emit_phonehome_signal(str(content), PHONEHOME_DBUS_OBJECT_METADATA_PATH,
hostname, cherrypy.request.transaction_id)
cherrypy.response.status = httplib.OK
return
else:
cherrypy.response.status = httplib.BAD_REQUEST
return "{0}: {1} - Hostname header is not present in HTTP PhoneHome request".format(
PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id)
elif path == PHONEHOME_DBUS_OBJECT_PATH:
dbus_server.logdebug("{0}: {1} - Sending signal".format(PHONEHOME_TX_ID_HEADER,
cherrypy.request.transaction_id))
dbus_server.emit_phonehome_signal(str(content), PHONEHOME_DBUS_OBJECT_PATH, None,
cherrypy.request.transaction_id)
cherrypy.response.status = httplib.OK
return
else:
cherrypy.response.status = httplib.NOT_FOUND
return "{0}: {1} - Path not found for HTTP PhoneHome request".format(
PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id)
else:
# Bad Request
cherrypy.response.status = httplib.BAD_REQUEST
return "{0}: {1} - Invalid data received in HTTP PhoneHome request".\
format(PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id)
def handle_error():
cherrypy.response.status = httplib.INTERNAL_SERVER_ERROR
cherrypy.response.body = "Internal Server Error"
print(_cperror.format_exc())
class Root(object):
_cp_config = {'request.error_response': handle_error}
pass
def before_request_body():
"""
Add a Tool to our new Toolbox.
"""
logger.info("before_request_body: %s ", cherrypy.request.params)
if PHONEHOME_TX_ID_HEADER in cherrypy.request.headers:
transaction_id = cherrypy.request.headers[PHONEHOME_TX_ID_HEADER]
elif PHONEHOME_TX_ID_HEADER in cherrypy.request.params:
transaction_id = cherrypy.request.params[PHONEHOME_TX_ID_HEADER]
cherrypy.request.params = {}
else:
transaction_id = str(uuid.uuid1())
cherrypy.request.transaction_id = transaction_id
logger.info("%s: %s - before_request_body, path: %s", PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id,
cherrypy.request.path_info)
request = cherrypy.serving.request
def processor(entity):
"""Important! Do nothing with body"""
if not entity.headers.get("Content-Length", ""):
raise cherrypy.HTTPError(411)
try:
content_length = int(cherrypy.request.headers['Content-Length'])
logger.info("%s: %s - body - content_length: %s ", PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id,
content_length)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid Content-Length')
request.body.processors['application/x-www-form-urlencoded'] = processor
def on_end_request():
"""
After each request
"""
logger.info("%s: %s - on_end_request", PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id)
print 'end'
class HttpPhoneHomeServer:
"""
This Server will be waiting for POST requests. If some request is received to '/' resource (root), it will be
processed. POST body is processed using a DBus PhoneHome Client and 200 OK is always returned.
"""
def __init__(self, port, timeout=None):
"""Creates a PhoneHome server
:param port: Listen port
:param timeout: Timeout to wait for some request. Only is used when 'single request server' is configured.
:return: None
"""
logger.debug("Creating PhoneHome Server. Port %d; Timeout: %s", port, str(timeout))
self.timeout = timeout
self.port = port
def start_forever(self):
"""Starts the server. Forever...
:return: None
"""
logger.debug("Waiting for calls...")
conf = {
'global': {
'server.socket_host': '0.0.0.0',
'server.socket_port': self.port,
'tools.newprocessor_open.on': True,
'tools.newprocessor_close.on': True,
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'response.timeout': self.timeout,
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
}
}
root = Root()
root.phonehome = PhoneHome()
root.metadata = PhoneHome()
cherrypy.tools.newprocessor_open = cherrypy.Tool('before_request_body', before_request_body, priority=100)
cherrypy.tools.newprocessor_close = cherrypy.Tool('on_end_request', on_end_request)
cherrypy.log.error_log.propagate = False
cherrypy.log.access_log.propagate = False
cherrypy.log.screen = None
cherrypy.quickstart(root, '/', conf)
if __name__ == '__main__':
global logger
# Configuration files
parentdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
settings_file = os.environ.get('SANITY_CHECKS_SETTINGS', os.path.join(parentdir, DEFAULT_SETTINGS_FILE))
logging_conf = os.environ.get('TEST_PHONEHOME_LOGGING', os.path.join(parentdir, DEFAULT_PHONEHOME_LOGGING_CONF))
# Configure logger
logging.config.fileConfig(logging_conf)
logger = logging.getLogger("HttpPhoneHomeServer")
# Load properties
logger.info("Loading test settings...")
conf = dict()
with open(settings_file) as settings:
try:
conf = json.load(settings)
except Exception as e:
print "Error parsing config file '{}': {}".format(settings_file, e)
sys.exit(-1)
# Check and load PhoneHome configuration (settings or environment variabless)
default_phonehome_endpoint = conf[PROPERTIES_CONFIG_TEST][PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT]
phonehome_endpoint = environ.get('TEST_PHONEHOME_ENDPOINT', default_phonehome_endpoint)
env_conf = {
PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT: phonehome_endpoint
}
conf[PROPERTIES_CONFIG_TEST].update(env_conf)
if not phonehome_endpoint:
logger.error("No value found for '%s.%s' setting. PhoneHome server will NOT be launched",
PROPERTIES_CONFIG_TEST, PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT)
sys.exit(1)
phonehome_port = urlparse.urlsplit(phonehome_endpoint).port
logger.info("PhoneHome port to be used by server: %d", phonehome_port)
# Create global DBus server
logger.info("Creating DBus PhoneHome service with object: %s", PHONEHOME_DBUS_OBJECT_PATH)
logger.info("Creating DBus PhoneHome service with object: %s", PHONEHOME_DBUS_OBJECT_METADATA_PATH)
dbus_server = DbusPhoneHomeServer(logger)
dbus_server.register_phonehome_object(PHONEHOME_DBUS_OBJECT_PATH)
dbus_server.register_phonehome_object(PHONEHOME_DBUS_OBJECT_METADATA_PATH)
# Create and start server
logger.info("Creating and starting PhoneHome Server")
server = HttpPhoneHomeServer(phonehome_port)
server.start_forever()
|
nilq/baby-python
|
python
|
import math, statistics, random, time, sys
import numpy as np
import pandas as pd
import ray
import time
import holoviews as hv
from holoviews import opts
from holoviews.streams import Counter, Tap
from bokeh_util import square_circle_plot, two_lines_plot, means_stddevs_plot
hv.extension('bokeh')
from bokeh.layouts import gridplot, layout
from bokeh.models import Slider, Button
from bokeh.plotting import figure, output_file, show
from pi_calc import MonteCarloPi, compute_pi_for
DEFAULT_NS = [1000, 10000, 100000]
DEFAULT_RADIUS = 1.0
DEFAULT_BOUNDS = (-DEFAULT_RADIUS, -DEFAULT_RADIUS, DEFAULT_RADIUS, DEFAULT_RADIUS)
DEFAULT_MIN_N = 100
DEFAULT_MAX_N = 100000
DEFAULT_N_PER_PI_CALC = DEFAULT_MIN_N
DEFAULT_PLOT_SIZE = 1200
DEFAULT_IMAGE_SIZE = round(DEFAULT_PLOT_SIZE/2)
DEFAULT_CMAP = 'Spectral'
DEFAULT_IMAGE_COLOR_IDX = 2
DEFAULT_POINT_COLOR_IDX = 125
DEFAULT_PI_UPDATE_FORMAT = 'Pi ~= {:8.7f}\nerror = {:6.3f}%\nn = {:d}\n(N ~ {:d})'
img_opts = opts.Image(cmap=DEFAULT_CMAP, toolbar=None,
height=DEFAULT_PLOT_SIZE, width=DEFAULT_PLOT_SIZE,
xaxis=None, yaxis=None)
def make_circle(radius=DEFAULT_RADIUS):
def circle(t):
return (radius*np.sin(t), radius*np.cos(t), t)
lin = np.linspace(-np.pi, np.pi, 200)
return hv.Path([circle(lin)]).opts(img_opts).opts(line_width=2, color='red')
def make_rect(bounds=DEFAULT_BOUNDS, color='blue'):
minX, minY, maxX, maxY = bounds
return hv.Path([(minX, minY), (maxX, minY), (maxX, maxY), (minX, maxY), (minX, minY)]).opts(
img_opts).opts(line_width=2, color='blue')
def make_text(content):
return hv.Text(0, 0, content).opts(img_opts).opts(
toolbar=None, height=100, width=150, xaxis=None, yaxis=None,
text_alpha=1.0, bgcolor='lightgrey')
def make_image(data=None, image_size=DEFAULT_IMAGE_SIZE, bounds=DEFAULT_BOUNDS, color_idx=DEFAULT_IMAGE_COLOR_IDX, label='Pi:'):
if data == None:
data = np.full((image_size, image_size), color_idx, dtype=np.uint8)
return hv.Image(data, label=label, bounds=bounds).opts(img_opts)
def to_pixel(array, image_size=DEFAULT_IMAGE_SIZE):
"""
NumPy array input for real coordinates. Returns image pixel index.
To keep indices between 0, inclusize, and image_size, exclusive, we set the upper bound to image_size - 1
"""
array2 = (array+1.0)/2.0 # Shift to origin range between (0-1,0-1)
return np.rint((image_size-1)*array2).astype(int) # Scale to pixels
def make_overlay(items, width=DEFAULT_PLOT_SIZE, height=DEFAULT_PLOT_SIZE):
return hv.Overlay(items=items).opts(width=width, height=height)
def make_update(k, N, counter_instance,
n_per_pi_calc=DEFAULT_N_PER_PI_CALC, pi_update_format=DEFAULT_PI_UPDATE_FORMAT):
"""Returns a closure used as the update function for a dmap."""
pi_calc = MonteCarloPi()
image = make_image()
rect = make_rect()
circle = make_circle()
text = make_text('Pi calculation')
def update(counter):
"""
Due to an apparent bug in Holoview's ``periodic`` class for
DynamicMaps, the update gets called far more than the specified
``count`` value in ``run_simulations`` below. Unfortunately, we
can't just "ignore" extra invocations (if we've already computed
N values), because we have to return an overlay and there
appears to be no reliable way to save the last one(?). That's
why we call ``counter_instance.clear()``, which removes the
dmap as a subscriber.
"""
def updated_image(value, xys, img):
xs, ys = xys[:,0], xys[:,1]
pxs, pys = to_pixel(xs), to_pixel(ys)
for i in range(pxs.size):
img.data[pxs[i]][pys[i]] = value
return img
pi, count_inside, count_total, xys_in, xys_out = pi_calc.sample(n_per_pi_calc)
error = 100.0*abs(pi - math.pi)/math.pi
label = pi_update_format.format(pi, error, count_total, N)
img1 = updated_image(1, xys_in, image)
img2 = updated_image(0, xys_out, img1)
img3 = hv.Image(img2, label=label)
text = make_text(label)
overlay = make_overlay(items=[img3, rect, circle, text])
if count_total >= N:
counter_instance.clear() # basically stop further updates.
return overlay
return update
def make_dmaps(Ns = DEFAULT_NS):
dmaps = []
for k in range(len(Ns)):
N = Ns[k]
counter = Counter(transient=True)
psize = int(DEFAULT_PLOT_SIZE/len(Ns))
dmap_update = make_update(k, N, counter)
dmap = hv.DynamicMap(dmap_update, streams=[counter]).opts(height=psize, width=psize)
# We fetch DEFAULT_N_PER_PI_CALC points each pass through "update", so only count up to N/...
dmaps.append(dmap)
return dmaps
def run_simulations(dmaps, Ns = DEFAULT_NS, n_per_pi_calc=DEFAULT_N_PER_PI_CALC):
for i in range(len(dmaps)):
dmaps[i].periodic(0.01, count=int(Ns[i]/n_per_pi_calc)-1, block=False)
def stop_simulations(dmaps):
[dmap.periodic.stop() for dmap in dmaps]
if __name__ == '__main__':
dmaps = make_dmaps(DEFAULT_NS)
show(dmaps[0] + dmaps[1] + dmaps[2])
run_simulations(dmaps)
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
OrderCloud
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0
Contact: ordercloud@four51.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class PaymentApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create(self, direction, order_id, payment, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create(direction, order_id, payment, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param Payment payment: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_with_http_info(direction, order_id, payment, **kwargs)
else:
(data) = self.create_with_http_info(direction, order_id, payment, **kwargs)
return data
def create_with_http_info(self, direction, order_id, payment, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_with_http_info(direction, order_id, payment, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param Payment payment: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `create`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `create`")
# verify the required parameter 'payment' is set
if ('payment' not in params) or (params['payment'] is None):
raise ValueError("Missing the required parameter `payment` when calling `create`")
resource_path = '/orders/{direction}/{orderID}/payments'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'payment' in params:
body_params = params['payment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def create_transaction(self, direction, order_id, payment_id, payment_transaction, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_transaction(direction, order_id, payment_id, payment_transaction, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param PaymentTransaction payment_transaction: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_transaction_with_http_info(direction, order_id, payment_id, payment_transaction, **kwargs)
else:
(data) = self.create_transaction_with_http_info(direction, order_id, payment_id, payment_transaction, **kwargs)
return data
def create_transaction_with_http_info(self, direction, order_id, payment_id, payment_transaction, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_transaction_with_http_info(direction, order_id, payment_id, payment_transaction, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param PaymentTransaction payment_transaction: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment_id', 'payment_transaction']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_transaction" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `create_transaction`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `create_transaction`")
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params) or (params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `create_transaction`")
# verify the required parameter 'payment_transaction' is set
if ('payment_transaction' not in params) or (params['payment_transaction'] is None):
raise ValueError("Missing the required parameter `payment_transaction` when calling `create_transaction`")
resource_path = '/orders/{direction}/{orderID}/payments/{paymentID}/transactions'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
if 'payment_id' in params:
path_params['paymentID'] = params['payment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'payment_transaction' in params:
body_params = params['payment_transaction']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete(self, direction, order_id, payment_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete(direction, order_id, payment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_with_http_info(direction, order_id, payment_id, **kwargs)
else:
(data) = self.delete_with_http_info(direction, order_id, payment_id, **kwargs)
return data
def delete_with_http_info(self, direction, order_id, payment_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_with_http_info(direction, order_id, payment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `delete`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `delete`")
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params) or (params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `delete`")
resource_path = '/orders/{direction}/{orderID}/payments/{paymentID}'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
if 'payment_id' in params:
path_params['paymentID'] = params['payment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete_transaction(self, direction, order_id, payment_id, transaction_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_transaction(direction, order_id, payment_id, transaction_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param str transaction_id: ID of the transaction. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_transaction_with_http_info(direction, order_id, payment_id, transaction_id, **kwargs)
else:
(data) = self.delete_transaction_with_http_info(direction, order_id, payment_id, transaction_id, **kwargs)
return data
def delete_transaction_with_http_info(self, direction, order_id, payment_id, transaction_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_transaction_with_http_info(direction, order_id, payment_id, transaction_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param str transaction_id: ID of the transaction. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment_id', 'transaction_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_transaction" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `delete_transaction`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `delete_transaction`")
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params) or (params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `delete_transaction`")
# verify the required parameter 'transaction_id' is set
if ('transaction_id' not in params) or (params['transaction_id'] is None):
raise ValueError("Missing the required parameter `transaction_id` when calling `delete_transaction`")
resource_path = '/orders/{direction}/{orderID}/payments/{paymentID}/transactions/{transactionID}'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
if 'payment_id' in params:
path_params['paymentID'] = params['payment_id']
if 'transaction_id' in params:
path_params['transactionID'] = params['transaction_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get(self, direction, order_id, payment_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get(direction, order_id, payment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_with_http_info(direction, order_id, payment_id, **kwargs)
else:
(data) = self.get_with_http_info(direction, order_id, payment_id, **kwargs)
return data
def get_with_http_info(self, direction, order_id, payment_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_with_http_info(direction, order_id, payment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `get`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `get`")
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params) or (params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `get`")
resource_path = '/orders/{direction}/{orderID}/payments/{paymentID}'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
if 'payment_id' in params:
path_params['paymentID'] = params['payment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def list(self, direction, order_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list(direction, order_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str search: Word or phrase to search for.
:param str search_on: Comma-delimited list of fields to search on.
:param str sort_by: Comma-delimited list of fields to sort by.
:param int page: Page of results to return. Default: 1
:param int page_size: Number of results to return per page. Default: 20, max: 100.
:param dict(str, str) filters: Any additional key/value pairs passed in the query string are interpretted as filters. Valid keys are top-level properties of the returned model or 'xp.???'
:return: ListPayment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_with_http_info(direction, order_id, **kwargs)
else:
(data) = self.list_with_http_info(direction, order_id, **kwargs)
return data
def list_with_http_info(self, direction, order_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_with_http_info(direction, order_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str search: Word or phrase to search for.
:param str search_on: Comma-delimited list of fields to search on.
:param str sort_by: Comma-delimited list of fields to sort by.
:param int page: Page of results to return. Default: 1
:param int page_size: Number of results to return per page. Default: 20, max: 100.
:param dict(str, str) filters: Any additional key/value pairs passed in the query string are interpretted as filters. Valid keys are top-level properties of the returned model or 'xp.???'
:return: ListPayment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'search', 'search_on', 'sort_by', 'page', 'page_size', 'filters']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `list`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `list`")
resource_path = '/orders/{direction}/{orderID}/payments'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
query_params = {}
if 'search' in params:
query_params['search'] = params['search']
if 'search_on' in params:
query_params['searchOn'] = params['search_on']
if 'sort_by' in params:
query_params['sortBy'] = params['sort_by']
if 'page' in params:
query_params['page'] = params['page']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'filters' in params:
query_params['filters'] = params['filters']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListPayment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def patch(self, direction, order_id, payment_id, partial_payment, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch(direction, order_id, payment_id, partial_payment, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param Payment partial_payment: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.patch_with_http_info(direction, order_id, payment_id, partial_payment, **kwargs)
else:
(data) = self.patch_with_http_info(direction, order_id, payment_id, partial_payment, **kwargs)
return data
def patch_with_http_info(self, direction, order_id, payment_id, partial_payment, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_with_http_info(direction, order_id, payment_id, partial_payment, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param Payment partial_payment: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment_id', 'partial_payment']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `patch`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `patch`")
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params) or (params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `patch`")
# verify the required parameter 'partial_payment' is set
if ('partial_payment' not in params) or (params['partial_payment'] is None):
raise ValueError("Missing the required parameter `partial_payment` when calling `patch`")
resource_path = '/orders/{direction}/{orderID}/payments/{paymentID}'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
if 'payment_id' in params:
path_params['paymentID'] = params['payment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'partial_payment' in params:
body_params = params['partial_payment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
|
nilq/baby-python
|
python
|
import calendar
import datetime as dt
import time
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.000Z'
DATE_FORMAT = '%Y-%m-%dT00:00:00.000Z'
DPLUS_FORMAT = '%Y-%m-%dT00:01:00.000Z'
def valid_rfcformat(potential):
try:
dt.datetime.strptime(potential, DATETIME_FORMAT)
return True
except:
return False
def to_rfc3339(unknown):
if hasattr(unknown, 'timetuple'):
if hasattr(unknown, 'tzinfo') and unknown.tzinfo is not None:
utc_timestamp = calendar.timegm(unknown.utctimetuple())
else:
utc_timestamp = time.mktime(unknown.timetuple())
utc_datetime = dt.datetime.utcfromtimestamp(utc_timestamp)
return utc_datetime.strftime(DATETIME_FORMAT)
elif type(unknown) in (float, int):
utc_datetime = dt.datetime.utcfromtimestamp(unknown)
return utc_datetime.strftime(DATETIME_FORMAT)
elif valid_rfcformat(unknown):
return unknown
else:
raise RFC3339ConversionError(unknown)
def from_rfc3339(rfc3339):
time_tuple = time.strptime(rfc3339, DATETIME_FORMAT)
utc_timestamp = calendar.timegm(time_tuple)
return dt.datetime.fromtimestamp(utc_timestamp)
def to_date_rfc3339(unknown, plus_a_min=False):
if plus_a_min:
rfc_format = DPLUS_FORMAT
else:
rfc_format = DATE_FORMAT
if hasattr(unknown, 'strftime'):
return unknown.strftime(rfc_format)
elif type(unknown) in (float, int):
return dt.date.fromtimestamp(unknown).strftime(rfc_format)
elif valid_rfcformat(unknown):
return to_date_rfc3339(from_date_rfc3339(unknown), plus_a_min)
else:
raise RFC3339ConversionError(unknown)
def from_date_rfc3339(rfc3339):
return dt.datetime.strptime(rfc3339, DATE_FORMAT).date()
class RFC3339ConversionError(Exception):
def __str__(self, culprit):
return 'Could not convert {} to RFC 3339 timestamp.'.format(culprit)
|
nilq/baby-python
|
python
|
"""Test the cli module."""
|
nilq/baby-python
|
python
|
import asyncio, re, json
from smsgateway.sources.sms import command_list
from smsgateway.config import *
from smsgateway.sources.utils import *
from smsgateway import sink_sms
from telethon import TelegramClient, utils
from telethon.tl.types import Chat, User, Channel, \
PeerUser, PeerChat, PeerChannel, \
MessageMediaGeo, MessageMediaContact, MessageMediaPhoto, \
MessageMediaDocument, MessageMediaWebPage, \
Document, DocumentAttributeFilename, DocumentAttributeSticker
def init():
global app_log, IDENTIFIER, command_regex, api_id, api_hash, session_path
app_log = setup_logging("telegram-send")
IDENTIFIER = "TG"
command_regex = re.compile('^(?P<command>[a-zA-Z ]+)$')
api_id = 242101
api_hash = "80cbc97ce425aae38c1e0291ef2ab2a4"
session_path = os.path.join(CONFIG_DIR, 'telegram-send')
def check(cmd, multiline):
init()
# print("Checking %s" % cmd)
if cmd.lower() == IDENTIFIER.lower() and multiline:
return True
else:
return False
def get_display_name(entity):
# app_log.debug("Looking up entity " + entity.stringify())
if isinstance(entity, User):
return ' '.join([x for x in [entity.first_name, entity.last_name] if x])
elif isinstance(entity, Chat) or isinstance(entity, Channel):
return entity.title
else:
return None
async def send_message(message, to_matched):
app_log.info("Starting client..")
client = TelegramClient(session_path, api_id, api_hash)
try:
await client.start()
except Exception as e:
ret = "Could not connect! Run python3 -m smsgateway.sources.commands.send_telegram to authorize!\nError: %s" % e
app_log.error(ret)
return (False, ret)
to = None
async for x in client.iter_dialogs():
name = get_display_name(x.entity)
if not to and name and name == to_matched:
to = x.entity.id
app_log.info("Found it via display_name: %s" % x.entity.stringify())
break
if not to:
app_log.warning(f"Couldn't find {to}! Trying directly..")
to = name = to_matched
app_log.info("Sending Telegram msg:\n%s" % message)
try:
import getpass
app_log.info("I am: %s" % getpass.getuser())
except:
pass
await client.send_message(to, message)
await client.disconnect()
msg = format_sms(IDENTIFIER, message, {
'to': name,
'status': 'Processed'
})
app_log.info(msg)
# ret = '\n'.join([
# IDENTIFIER,
# f"To: {name}",
# "",
# message
# ])
return True, msg
def run(lines):
init()
app_log.info("Forwarding Telegram Message")
messageStarted = False
to_matched = None
message = ""
for line in lines[1:]: # skip IDENTIFIER
if messageStarted:
if message:
message += "\n"
message += f"{line}"
elif not line.strip(): # empty line
messageStarted = True
else:
mTo = re.match("^To: (.*)$", line)
if mTo:
to_matched = mTo.group(1).strip()
else:
app_log.warning(f"Unkown header: {line}!")
if to_matched and message:
loop = asyncio.get_event_loop()
(success, ret) = loop.run_until_complete(send_message(message, to_matched))
if success:
ret = None
loop.close()
else:
ret = f"Couldn't match To: {to_matched} or message {message}"
app_log.error(ret)
return ret
if __name__ == '__main__':
init()
client = TelegramClient(session_path, api_id, api_hash)
if not client.start():
app_log.error(
"Could not connect to Telegram!\nIf you haven't authorized this client, run python3 -m smsgateway.sources.commands.send_telegram!")
sys.exit(1)
command_list.append({
'name': 'TG-Forwarder',
'check': check,
'run': run
})
|
nilq/baby-python
|
python
|
from environs import Env
from lektor.pluginsystem import Plugin
__version__ = "18.6.12.3"
DEFAULT_PREFIX = "LEKTOR_"
class LektorEnv:
def __init__(self, config=None):
self.env = Env()
if not config:
self.prefix = DEFAULT_PREFIX
else:
self.prefix = config.get("envvar.prefix", DEFAULT_PREFIX)
def envvars(self, name, var_type=None, no_prefix=False):
prefix = "" if no_prefix else self.prefix
with self.env.prefixed(prefix):
if var_type:
return getattr(self.env, var_type)(name)
else:
return self.env(name)
class EnvvarsPlugin(Plugin):
name = "Environment Variables"
description = "A plugin making environment variables available in templates."
def on_setup_env(self, **extra):
config = self.get_config()
self.env.jinja_env.globals.update({"envvars": LektorEnv(config).envvars})
|
nilq/baby-python
|
python
|
import sys
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
from PyQt5.QtCore import *
import logging.handlers
import time
from pandas import DataFrame
is_64bits = sys.maxsize > 2**32
if is_64bits:
print('64bit 환경입니다.')
else:
print('32bit 환경입니다.')
formatter = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s')
logger = logging.getLogger("crumbs")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
TR_REQ_TIME_INTERVAL = 0.2
class Openapi(QAxWidget):
def __init__(self):
print("openapi __name__:", __name__)
super().__init__()
self._create_open_api_instance()
self._set_signal_slots()
self.comm_connect()
self.account_info()
def _opt10081(self, rqname, trcode):
# 몇번 반복 실행 할지 설정
ohlcv_cnt = self._get_repeat_cnt(trcode, rqname)
# 하나의 row씩 append
for i in range(ohlcv_cnt):
date = self._get_comm_data(trcode, rqname, i, "일자")
open = self._get_comm_data(trcode, rqname, i, "시가")
high = self._get_comm_data(trcode, rqname, i, "고가")
low = self._get_comm_data(trcode, rqname, i, "저가")
close = self._get_comm_data(trcode, rqname, i, "현재가")
volume = self._get_comm_data(trcode, rqname, i, "거래량")
self.ohlcv['date'].append(date)
self.ohlcv['open'].append(int(open))
self.ohlcv['high'].append(int(high))
self.ohlcv['low'].append(int(low))
self.ohlcv['close'].append(int(close))
self.ohlcv['volume'].append(int(volume))
def _receive_tr_data(self, screen_no, rqname, trcode, record_name, next, unused1, unused2, unused3, unused4):
# print("_receive_tr_data!!!")
# print(rqname, trcode, next)
if next == '2':
self.remained_data = True
else:
self.remained_data = False
if rqname == "opt10081_req":
self._opt10081(rqname, trcode)
elif rqname == "opw00001_req":
# print("opw00001_req!!!")
# print("Get an de_deposit!!!")
self._opw00001(rqname, trcode)
elif rqname == "opw00018_req":
# print("opw00018_req!!!")
# print("Get the possessed item !!!!")
self._opw00018(rqname, trcode)
elif rqname == "opt10074_req":
# print("opt10074_req!!!")
# print("Get the profit")
self._opt10074(rqname, trcode)
elif rqname == "opw00015_req":
# print("opw00015_req!!!")
# print("deal list!!!!")
self._opw00015(rqname, trcode)
elif rqname == "opt10076_req":
# print("opt10076_req")
# print("chegyul list!!!!")
self._opt10076(rqname, trcode)
elif rqname == "opt10073_req":
# print("opt10073_req")
# print("Get today profit !!!!")
self._opt10073(rqname, trcode)
elif rqname == "opt10080_req":
# print("opt10080_req!!!")
# print("Get an de_deposit!!!")
self._opt10080(rqname, trcode)
try:
self.tr_event_loop.exit()
except AttributeError:
pass
# get_total_data : 특정 종목의 일자별 거래 데이터 조회 함수
# 사용방법
# code: 종목코드(ex. '005930' )
# start : 기준일자. (ex. '20200424') => 20200424 일자 까지의 모든 open, high, low, close, volume 데이터 출력
def get_total_data(self, code, start):
self.ohlcv = {'date': [], 'open': [], 'high': [], 'low': [], 'close': [], 'volume': []}
self.set_input_value("종목코드", code)
self.set_input_value("기준일자", start)
self.set_input_value("수정주가구분", 1)
self.comm_rq_data("opt10081_req", "opt10081", 0, "0101")
# 이 밑에는 한번만 가져오는게 아니고 싹다 가져오는거다.
while self.remained_data == True:
# time.sleep(TR_REQ_TIME_INTERVAL)
self.set_input_value("종목코드", code)
self.set_input_value("기준일자", start)
self.set_input_value("수정주가구분", 1)
self.comm_rq_data("opt10081_req", "opt10081", 2, "0101")
time.sleep(0.2)
# data 비어있는 경우
if len(self.ohlcv) == 0:
return []
if self.ohlcv['date'] == '':
return []
df = DataFrame(self.ohlcv, columns=['open', 'high', 'low', 'close', 'volume'], index=self.ohlcv['date'])
return df
# get_one_day_option_data : 특정 종목의 특정 일 open(시작가), high(최고가), low(최저가), close(종가), volume(거래량) 조회 함수
# 사용방법
# code : 종목코드
# start : 조회 일자
# option : open(시작가), high(최고가), low(최저가), close(종가), volume(거래량)
def get_one_day_option_data(self, code, start, option):
self.ohlcv = {'date': [], 'open': [], 'high': [], 'low': [], 'close': [], 'volume': []}
self.set_input_value("종목코드", code)
self.set_input_value("기준일자", start)
self.set_input_value("수정주가구분", 1)
self.comm_rq_data("opt10081_req", "opt10081", 0, "0101")
if self.ohlcv['date'] == '':
return False
df = DataFrame(self.ohlcv, columns=['open', 'high', 'low', 'close', 'volume'], index=self.ohlcv['date'])
if option == 'open':
return df.iloc[0, 0]
elif option == 'high':
return df.iloc[0, 1]
elif option == 'low':
return df.iloc[0, 2]
elif option == 'close':
return df.iloc[0, 3]
elif option == 'volume':
return df.iloc[0, 4]
else:
return False
def multi_601_get_ohlcv_daliy_craw(self, code, code_name, start):
self.ohlcv = {'index': [], 'date': [], 'open': [], 'high': [], 'low': [], 'close': [], 'volume': []}
self.set_input_value("종목코드", code)
self.set_input_value("기준일자", start)
self.set_input_value("수정주가구분", 1)
self.comm_rq_data("opt10081_req", "opt10081", 0, "0101")
time.sleep(0.2)
if self.ohlcv['date'][0] == '':
return []
if self.ohlcv['date'] == '':
return []
df = DataFrame(self.ohlcv, columns=['date', 'open', 'high', 'low', 'close', 'volume'])
return df
def account_info(self):
account_number = self.get_login_info("ACCNO")
self.account_number = account_number.split(';')[0]
logger.debug("계좌번호: " + self.account_number)
def get_login_info(self, tag):
try:
ret = self.dynamicCall("GetLoginInfo(QString)", tag)
time.sleep(TR_REQ_TIME_INTERVAL)
return ret
except Exception as e:
logger.critical(e)
def _create_open_api_instance(self):
try:
self.setControl("KHOPENAPI.KHOpenAPICtrl.1")
except Exception as e:
logger.critical(e)
def _set_signal_slots(self):
try:
self.OnEventConnect.connect(self._event_connect)
self.OnReceiveTrData.connect(self._receive_tr_data)
self.OnReceiveMsg.connect(self._receive_msg)
# 주문체결 시점에서 키움증권 서버가 발생시키는 OnReceiveChejanData 이벤트를 처리하는 메서드
self.OnReceiveChejanData.connect(self._receive_chejan_data)
except Exception as e:
is_64bits = sys.maxsize > 2**32
if is_64bits:
logger.critical('현재 Anaconda는 64bit 환경입니다. 32bit 환경으로 실행하여 주시기 바랍니다.')
else:
logger.critical(e)
def _receive_chejan_data(self, gubun, item_cnt, fid_list):
print("_receive_chejan_data!!!")
print("gubun!!!")
print(gubun)
# 체결 data!
if gubun == "0":
print("in 체결 data!!!!!")
order_num = self.get_chejan_data(9203)
code_name_temp = self.get_chejan_data(302)
code_name = self.change_format3(code_name_temp)
code = self.codename_to_code(code_name)
chegyul_fail_amount_temp = self.get_chejan_data(902)
order_gubun = self.get_chejan_data(905)
purchase_price = self.get_chejan_data(10)
if code != False and code != "" and code != 0 and code != "0":
if chegyul_fail_amount_temp != "":
if self.is_all_item_db_check(code) == False:
print("all_item_db에 매도 안 된 종목이 없음 ! 즉 신규다!!")
if chegyul_fail_amount_temp == "0":
print("완벽히 싹 다 체결됨!")
self.db_to_all_item(order_num, code, code_name, 0, purchase_price)
else:
print("체결 되었지만 덜 체결 됨!")
self.db_to_all_item(order_num, code, code_name, 1, purchase_price)
elif order_gubun == "+매수":
if chegyul_fail_amount_temp != "0" and self.stock_chegyul_check(code) == True:
print("재매수던 매수던 미체결 수량이 남아있고, stock_chegyul_check True인 놈 / 즉, 계속 사야되는 종목!")
pass
elif chegyul_fail_amount_temp == "0" and self.stock_chegyul_check(code) == True:
print("재매수던 매수던 미체결 수량이 없고, stock_chegyul_check True인 놈 / 즉, 매수 끝난 종목!")
self.end_invest_count_check(code)
elif self.stock_chegyul_check(code) == False:
print("현재 all db에 존재하고 체결 체크가 0인 종목, 재매수 하는 경우!")
else:
pass
elif order_gubun == "-매도":
if chegyul_fail_amount_temp == "0":
print("all db에 존재하고 전량 매도하는 경우!")
self.sell_final_check(code)
else:
print("all db에 존재하고 수량 남겨 놓고 매도하는 경우!")
self.sell_chegyul_fail_check(code)
else:
pass
else:
print("_receive_chejan_data 에서 code 가 불량은 아닌데 체결된 종목이 빈공간인 경우!")
else:
print("_receive_chejan_data 에서 code가 불량이다!!")
elif gubun == "1":
print("잔고데이터!!!!!")
chegyul_fail_amount_temp = self.get_chejan_data(902)
print(chegyul_fail_amount_temp)
else:
pass
def comm_connect(self):
try:
self.dynamicCall("CommConnect()")
time.sleep(TR_REQ_TIME_INTERVAL)
self.login_event_loop = QEventLoop()
self.login_event_loop.exec_()
except Exception as e:
logger.critical(e)
def _receive_msg(self, sScrNo, sRQName, sTrCode, sMsg):
print(sMsg)
def _event_connect(self, err_code):
try:
if err_code == 0:
logger.debug("connected")
else:
logger.debug(f"disconnected. err_code : {err_code}")
self.login_event_loop.exit()
except Exception as e:
logger.critical(e)
def get_connect_state(self):
try:
ret = self.dynamicCall("GetConnectState()")
time.sleep(TR_REQ_TIME_INTERVAL)
return ret
except Exception as e:
logger.critical(e)
def set_input_value(self, id, value):
try:
self.dynamicCall("SetInputValue(QString, QString)", id, value)
except Exception as e:
logger.critical(e)
def comm_rq_data(self, rqname, trcode, next, screen_no):
self.dynamicCall("CommRqData(QString, QString, int, QString)", rqname, trcode, next, screen_no)
time.sleep(TR_REQ_TIME_INTERVAL)
self.tr_event_loop = QEventLoop()
self.tr_event_loop.exec_()
def _get_comm_data(self, code, field_name, index, item_name):
ret = self.dynamicCall("GetCommData(QString, QString, int, QString)", code, field_name, index, item_name)
return ret.strip()
def _get_repeat_cnt(self, trcode, rqname):
try:
ret = self.dynamicCall("GetRepeatCnt(QString, QString)", trcode, rqname)
return ret
except Exception as e:
logger.critical(e)
if __name__ == "__main__":
app = QApplication(sys.argv)
Openapi()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A sample script showing how to start and stop Google Compute Engine instances.
"""
from google.cloud import compute_v1
# [START compute_start_instance]
def start_instance(project_id: str, zone: str, instance_name: str):
"""
Starts a stopped Google Compute Engine instance (with unencrypted disks).
Args:
project_id: project ID or project number of the Cloud project your instance belongs to.
zone: name of the zone your instance belongs to.
instance_name: name of the instance your want to start.
"""
instance_client = compute_v1.InstancesClient()
op_client = compute_v1.ZoneOperationsClient()
op = instance_client.start(project=project_id, zone=zone, instance=instance_name)
while op.status != compute_v1.Operation.Status.DONE:
op = op_client.wait(
operation=op.name, zone=zone, project=project_id
)
return
# [END compute_start_instance]
# [START compute_start_enc_instance]
def start_instance_with_encryption_key(project_id: str, zone: str, instance_name: str, key: bytes):
"""
Starts a stopped Google Compute Engine instance (with encrypted disks).
Args:
project_id: project ID or project number of the Cloud project your instance belongs to.
zone: name of the zone your instance belongs to.
instance_name: name of the instance your want to start.
key: bytes object representing a raw base64 encoded key to your machines boot disk.
For more information about disk encryption see:
https://cloud.google.com/compute/docs/disks/customer-supplied-encryption#specifications
"""
instance_client = compute_v1.InstancesClient()
op_client = compute_v1.ZoneOperationsClient()
instance_data = instance_client.get(project=project_id, zone=zone, instance=instance_name)
# Prepare the information about disk encryption
disk_data = compute_v1.CustomerEncryptionKeyProtectedDisk()
disk_data.source = instance_data.disks[0].source
disk_data.disk_encryption_key = compute_v1.CustomerEncryptionKey()
# Use raw_key to send over the key to unlock the disk
# To use a key stored in KMS, you need to provide `kms_key_name` and `kms_key_service_account`
disk_data.disk_encryption_key.raw_key = key
enc_data = compute_v1.InstancesStartWithEncryptionKeyRequest()
enc_data.disks = [disk_data]
op = instance_client.start_with_encryption_key(project=project_id, zone=zone, instance=instance_name,
instances_start_with_encryption_key_request_resource=enc_data)
while op.status != compute_v1.Operation.Status.DONE:
op = op_client.wait(
operation=op.name, zone=zone, project=project_id
)
return
# [END compute_start_enc_instance]
# [START compute_stop_instance]
def stop_instance(project_id: str, zone: str, instance_name: str):
"""
Stops a stopped Google Compute Engine instance.
Args:
project_id: project ID or project number of the Cloud project your instance belongs to.
zone: name of the zone your instance belongs to.
instance_name: name of the instance your want to stop.
"""
instance_client = compute_v1.InstancesClient()
op_client = compute_v1.ZoneOperationsClient()
op = instance_client.stop(project=project_id, zone=zone, instance=instance_name)
while op.status != compute_v1.Operation.Status.DONE:
op = op_client.wait(
operation=op.name, zone=zone, project=project_id
)
return
# [END compute_stop_instance]
# [START compute_reset_instance]
def reset_instance(project_id: str, zone: str, instance_name: str):
"""
Resets a stopped Google Compute Engine instance (with unencrypted disks).
Args:
project_id: project ID or project number of the Cloud project your instance belongs to.
zone: name of the zone your instance belongs to.
instance_name: name of the instance your want to reset.
"""
instance_client = compute_v1.InstancesClient()
op_client = compute_v1.ZoneOperationsClient()
op = instance_client.reset(project=project_id, zone=zone, instance=instance_name)
while op.status != compute_v1.Operation.Status.DONE:
op = op_client.wait(
operation=op.name, zone=zone, project=project_id
)
return
# [END compute_reset_instance]
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.