content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from MainWindow import Ui_MainWindow
from PyQt6 import QtWidgets
import sys
class CalcWindow(Ui_MainWindow, QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.equalButton.clicked.connect(self.calculation)
self.cButton.clicked.connect(self.pressC)
self.plusorminusButton.clicked.connect(self.change_sign)
self.arrowButton.clicked.connect(self.remove_it)
self.percentButton.clicked.connect(self.percent)
self.zeroButton.clicked.connect(self.zero)
self.oneButton.clicked.connect(self.one)
self.twoButton.clicked.connect(self.two)
self.threeButton.clicked.connect(self.three)
self.fourButton.clicked.connect(self.four)
self.fiveButton.clicked.connect(self.five)
self.sixButton.clicked.connect(self.six)
self.sevenButton.clicked.connect(self.seven)
self.eightButton.clicked.connect(self.eight)
self.nineButton.clicked.connect(self.nine)
self.plusButton.clicked.connect(self.plus)
self.minusButton.clicked.connect(self.subtract)
self.divideButton.clicked.connect(self.divide)
self.multiplyButton.clicked.connect(self.multiply)
self.periodButton.clicked.connect(self.dot_it)
def pressC(self):
# if self.pressed == 'c':
self.outputLabel.setText("0")
def pressButton(self):
screen = self.outputLabel.text()
print(screen)
if screen == "0" or screen == "NaN" or screen == "Incomplete":
self.outputLabel.setText("0")
self.outputLabel.setText(f'{screen}')
#Numbers and symbols
def zero(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "0")
def one(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "1")
def two(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "2")
def three(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "3")
def four(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "4")
def five(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "5")
def six(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "6")
def seven(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "7")
def eight(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "8")
def nine(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "9")
def dot(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + ".")
def plus(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "+")
def multiply(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "*")
def divide(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "/")
def subtract(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "-")
def percent(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "%")
# Remove a symbol
def remove_it(self):
screen = self.outputLabel.text()
print(len(screen))
if len(screen) > 1:
screen = screen[:-1]
else:
screen = "0"
self.outputLabel.setText(f'{screen}')
def calculation(self):
screen = self.outputLabel.text()
try:
answer = eval(screen)
self.outputLabel.setText(f"{answer}")
except ZeroDivisionError:
self.outputLabel.setText("NaN")
except SyntaxError:
self.outputLabel.setText("Incomplete")
except NameError:
self.pressC()
# Change sign of the number
def change_sign(self):
screen = self.outputLabel.text()
if "-" in screen:
self.outputLabel.setText(f"{screen[1:]}")
print(f"{screen[0]}")
else:
self.outputLabel.setText(f'-{screen}')
# Add a decimal
def check_symbol(self, s, arr):
result = []
for i in arr:
if i in s:
result.append(i)
return result
def dot_it(self):
screen = self.outputLabel.text()
if screen[-1] == ".":
pass
elif "." in screen:
symbolList = ["+", "-", "/", "*"]
result = self.check_symbol(screen, symbolList)
if result is not []:
for symbol in result:
num = screen.rindex(symbol)
if "." not in screen[num-1:]:
self.outputLabel.setText(f'{screen}.')
else:
self.outputLabel.setText(f'{screen}.')
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ui = CalcWindow()
ui.show()
sys.exit(app.exec())
|
nilq/baby-python
|
python
|
from tornado.web import HTTPError
class APIError(HTTPError):
def __init__(
self,
status_code: int,
reason: str,
message: str = None,
details: dict = None,
):
log_message = ': '.join(map(str, filter(None, [message, details]))) or None
super().__init__(
status_code=status_code,
reason=reason,
log_message=log_message,
)
self.message = message
self.details = details
class InternalError(APIError):
def __init__(self):
super().__init__(500, 'Internal error')
class NotFoundError(APIError):
def __init__(self, message: str):
super().__init__(404, 'Not found', message)
class InvalidMethod(APIError):
def __init__(self):
super().__init__(405, 'Invalid method')
class ValidationError(APIError):
def __init__(self, message: str, details: dict = None):
super().__init__(400, 'Invalid request', message, details)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# Report generator
import api
import fn
import sys
import dumper
def main(argv):
config = {}
config["usagetext"] = ("repgen.py (-s SEARCHPREFIX|-a) [-c CONFIGFILE]\n"+
" This script generates a report for all servers if -a is used,\n"+
"or just the servers with SEARCHPREFIX in the server label if -s is used.\n\n"+
"Make sure you correctly configure config.conf.\n"+
"you can use -c to specify a different configuration file. Otherwise, ./config.conf is assumed.\n\n"
"In config.conf: search_field will determine the metadata field that SEARCHPREFIX is applied to\n"+
" to create the list of servers that will be reported on.\n"+
"The output configuration value will determine the output format for the information.\n"+
" Text is mainly for debugging and may not produce as much meaningful information as html or pdf.\n"+
" HTML and PDF files are placed in the ./outfile folder. If it doesn't exist, the script will fail.")
config["configfile"] = "config.conf"
serverolist = []
config = fn.set_config_items(config,argv)
serverolist = fn.build_server_list(config['host'], config['authtoken'], config['search_string'], config['search_field'], config['prox'])
serverolist = fn.enrich_server_data(config['host'], config['authtoken'], serverolist, config['prox'])
# Here we re-write the config if the logo file is on the local filesystem, because relative paths don't work well with PDF rendering.
if fn.where_is_img(config['logo_url'])[0] == 'local' and config['output'] == 'pdf':
try:
config['logo_url'] = fn.where_is_img(config['logo_url'])[1]
except:
# Here we empty that field in case the data was bad...
config['logo_url'] = ''
fn.handle_output(config, serverolist)
if __name__ == "__main__":
print sys.argv[1:]
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
import os
import logging
import json
from codecs import open
from collections import Counter
import numpy as np
import spacy
from tqdm import tqdm
"""
The content of this file is mostly copied from https://github.com/HKUST-KnowComp/R-Net/blob/master/prepro.py
"""
nlp = spacy.blank("en")
def word_tokenize(sent):
doc = nlp(sent)
return [token.text for token in doc]
def convert_idx(text, tokens):
current = 0
spans = []
for token in tokens:
current = text.find(token, current)
if current < 0:
print("Token {} cannot be found".format(token))
raise Exception()
spans.append((current, current + len(token)))
current += len(token)
return spans
def _get_answer_span(answer, spans, texts):
text = answer["text"]
start = answer["answer_start"]
end = start + len(text)
texts.append(text)
answer_span = []
# this loop finds the overlap of answer and context
for idx, span in enumerate(spans):
if not (end <= span[0] or start >= span[1]):
answer_span.append(idx)
return answer_span[0], answer_span[-1]
def keep_unique_answers(y1, y2):
if len(y1) > 0:
a, b = zip(*list(set([(i, j) for i, j in zip(y1, y2)])))
return a, b
return y1, y2
def process_file(filename, data_type, word_counter, char_counter, version="v2.0"):
"""
filename: json file to read
data_type : 'train'/'test'/'dev'
word_counter: Just a counter for word occurence
char_counter: Just a counter for char
"""
print("Generating {} examples...\n".format(data_type))
examples = []
eval_examples = {}
total = 0
with open(filename, "r") as fh:
source = json.load(fh)
for article in tqdm(source["data"]):
for para in article["paragraphs"]:
# tokenize the para and store the span of each token in spans
# we store spans because we get position of answer start and the answer in the data
context = para["context"].replace("''", '" ').replace("``", '" ')
context_tokens = word_tokenize(context)
spans = convert_idx(context, context_tokens)
context_chars = [list(token) for token in context_tokens]
for token in context_tokens:
word_counter[token] += len(para["qas"])
for char in token:
char_counter[char] += len(para["qas"])
for qa in para["qas"]:
total += 1
ques = qa["question"].replace("''", '" ').replace("``", '" ')
ques_tokens = word_tokenize(ques)
ques_chars = [list(token) for token in ques_tokens]
for token in ques_tokens:
word_counter[token] += 1
for char in token:
char_counter[char] += 1
if version == "v2.0":
y1s, y2s = [], []
answer_texts = []
plausible_y1s, plausible_y2s = [], []
plausible_answer_texts = []
is_impossible = bool(qa["is_impossible"])
# if answering is impossible, some qas might have plausible answer and we record that.
if is_impossible:
for answer in qa["plausible_answers"]:
y1, y2 = _get_answer_span(
answer, spans, plausible_answer_texts
)
plausible_y1s.append(y1)
plausible_y2s.append(y2)
plausible_y1s, plausible_y2s = keep_unique_answers(
plausible_y1s, plausible_y2s
)
else:
for answer in qa["answers"]:
y1, y2 = _get_answer_span(answer, spans, answer_texts)
y1s.append(y1)
y2s.append(y2)
y1s, y2s = keep_unique_answers(y1s, y2s)
example = {
"context_tokens": context_tokens,
"context_chars": context_chars,
"ques_tokens": ques_tokens,
"ques_chars": ques_chars,
"y1s": y1s,
"y2s": y2s,
"plausible_y1s": plausible_y1s,
"plausible_y2s": plausible_y2s,
"id": total,
"uuid": qa["id"],
"is_impossible": is_impossible,
}
examples.append(example)
eval_examples[str(total)] = {
"context": context,
"spans": spans,
"answers": answer_texts,
"plausible_answers": plausible_answer_texts,
"uuid": qa["id"],
"is_impossible": is_impossible,
}
elif version == "v1.1": # v1.1 case
y1s, y2s = [], []
answer_texts = []
for answer in qa["answers"]:
y1, y2 = _get_answer_span(answer, spans, answer_texts)
y1s.append(y1)
y2s.append(y2)
y1s, y2s = keep_unique_answers(y1s, y2s)
example = {
"context_tokens": context_tokens,
"context_chars": context_chars,
"ques_tokens": ques_tokens,
"ques_chars": ques_chars,
"y1s": y1s,
"y2s": y2s,
"id": total,
"uuid": qa["id"],
}
examples.append(example)
# note eval files are now indexed by uuid here
eval_examples[str(total)] = {
"context": context,
"spans": spans,
"answers": answer_texts,
"uuid": qa["id"],
}
print(f"{len(examples)} questions in total")
return examples, eval_examples
def get_embedding(counter, data_type, limit=-1, emb_file=None, vec_size=None):
print("Generating {} embedding...".format(data_type))
embedding_dict = {}
filtered_elements = [k for k, v in counter.items() if v > limit]
# load from file if there is
if emb_file is not None:
assert vec_size is not None
with open(emb_file, "r") as fh:
for line in tqdm(fh):
array = line.split()
l = len(array)
word = "".join(array[0 : l - vec_size])
vector = list(map(float, array[l - vec_size : l]))
if word in counter and counter[word] > limit:
embedding_dict[word] = vector
print(
"{} / {} tokens have corresponding {} embedding vector".format(
len(embedding_dict), len(filtered_elements), data_type
)
)
# random embedding initialization
else:
assert vec_size is not None
for token in filtered_elements:
embedding_dict[token] = [
np.random.normal(scale=0.1) for _ in range(vec_size)
]
print(
"{} tokens have corresponding embedding vector".format(
len(filtered_elements)
)
)
# NULL and OOV are index 0 and 1 and zero vectors
NULL = "--NULL--"
OOV = "--OOV--"
token2idx_dict = {token: idx for idx, token in enumerate(embedding_dict.keys(), 2)}
token2idx_dict[NULL] = 0
token2idx_dict[OOV] = 1
embedding_dict[NULL] = [0.0 for _ in range(vec_size)]
embedding_dict[OOV] = [0.0 for _ in range(vec_size)]
idx2emb_dict = {idx: embedding_dict[token] for token, idx in token2idx_dict.items()}
emb_mat = [idx2emb_dict[idx] for idx in range(len(idx2emb_dict))]
return emb_mat, token2idx_dict
def convert_to_features(config, data, word2idx_dict, char2idx_dict):
def _get_word(word):
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in word2idx_dict:
return word2idx_dict[each]
return 1
def _get_char(char):
if char in char2idx_dict:
return char2idx_dict[char]
return 1
def filter_func(example):
return (
len(example["context_tokens"]) > para_limit
or len(example["ques_tokens"]) > ques_limit
)
example = {}
context, question = data
context = context.replace("''", '" ').replace("``", '" ')
question = question.replace("''", '" ').replace("``", '" ')
example["context_tokens"] = word_tokenize(context)
example["ques_tokens"] = word_tokenize(question)
example["context_chars"] = [list(token) for token in example["context_tokens"]]
example["ques_chars"] = [list(token) for token in example["ques_tokens"]]
spans = convert_idx(context, example["context_tokens"])
para_limit = config.para_limit
ques_limit = config.ques_limit
ans_limit = config.ans_limit
char_limit = config.char_limit
if filter_func(example):
print(" Warning: Context/Question length is over the limit")
context_idxs = np.zeros([para_limit], dtype=np.int32)
context_char_idxs = np.zeros([para_limit, char_limit], dtype=np.int32)
ques_idxs = np.zeros([ques_limit], dtype=np.int32)
ques_char_idxs = np.zeros([ques_limit, char_limit], dtype=np.int32)
y1 = np.zeros([para_limit], dtype=np.float32)
y2 = np.zeros([para_limit], dtype=np.float32)
for i, token in enumerate(example["context_tokens"][:para_limit]):
context_idxs[i] = _get_word(token)
for i, token in enumerate(example["ques_tokens"][:ques_limit]):
ques_idxs[i] = _get_word(token)
for i, token in enumerate(example["context_chars"][:para_limit]):
for j, char in enumerate(token[:char_limit]):
context_char_idxs[i, j] = _get_char(char)
for i, token in enumerate(example["ques_chars"][:ques_limit]):
for j, char in enumerate(token[:char_limit]):
ques_char_idxs[i, j] = _get_char(char)
return context_idxs, context_char_idxs, ques_idxs, ques_char_idxs, spans
def build_features(
config, examples, data_type, out_file, word2idx_dict, char2idx_dict, is_test=False
):
def _get_word(word):
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in word2idx_dict:
return word2idx_dict[each]
return word2idx_dict["--OOV--"]
def _get_char(char):
if char in char2idx_dict:
return char2idx_dict[char]
return char2idx_dict["--OOV--"]
def filter_func(example, is_test=False):
# in case of test filter nothing
if is_test:
return False
if version == "v2.0":
if example["is_impossible"]:
return (
len(example["context_tokens"]) > para_limit
or len(example["ques_tokens"]) > ques_limit
)
return (
len(example["context_tokens"]) > para_limit
or len(example["ques_tokens"]) > ques_limit
or (example["y2s"][-1] - example["y1s"][-1]) > ans_limit
)
para_limit = config.para_limit
ques_limit = config.ques_limit
ans_limit = config.ans_limit
char_limit = config.char_limit
version = config.version
print(f"Processing {data_type} examples...")
total = 0
meta = {}
N = len(examples)
context_idxs = []
context_char_idxs = []
ques_idxs = []
ques_char_idxs = []
y1s = []
y2s = []
ids = []
uuids = []
id_to_uuid = {}
if version == "v2.0":
impossibles = []
for n, example in tqdm(enumerate(examples)):
# if filter returns true, then move to next example
if filter_func(example, is_test):
continue
total += 1
context_idx = np.zeros([para_limit], dtype=np.int32)
context_char_idx = np.zeros([para_limit, char_limit], dtype=np.int32)
ques_idx = np.zeros([ques_limit], dtype=np.int32)
ques_char_idx = np.zeros([ques_limit, char_limit], dtype=np.int32)
for i, token in enumerate(example["context_tokens"][:para_limit]):
context_idx[i] = _get_word(token)
for i, token in enumerate(example["ques_tokens"][:ques_limit]):
ques_idx[i] = _get_word(token)
for i, token in enumerate(example["context_chars"][:para_limit]):
for j, char in enumerate(token[:char_limit]):
context_char_idx[i, j] = _get_char(char)
for i, token in enumerate(example["ques_chars"][:ques_limit]):
for j, char in enumerate(token[:char_limit]):
ques_char_idx[i, j] = _get_char(char)
if version == "v2.0":
if not example["is_impossible"]:
starts, ends = example["y1s"], example["y2s"]
elif config.use_plausible is True and len(example["plausible_y1s"]) > 0:
starts, ends = example["plausible_y1s"], example["plausible_y2s"]
else:
starts, ends = [-1], [-1]
# append one example for each possible answer
for start, end in zip(starts, ends):
ques_char_idxs.append(ques_char_idx)
context_idxs.append(context_idx)
ques_idxs.append(ques_idx)
context_char_idxs.append(context_char_idx)
y1s.append(start)
y2s.append(end)
ids.append(example["id"])
impossibles.append(example["is_impossible"])
uuids.append(example["uuid"])
id_to_uuid[example["id"]] = example["uuid"]
else:
starts, ends = example["y1s"], example["y2s"]
for start, end in zip(starts, ends):
ques_char_idxs.append(ques_char_idx)
context_idxs.append(context_idx)
ques_idxs.append(ques_idx)
context_char_idxs.append(context_char_idx)
y1s.append(start)
y2s.append(end)
ids.append(example["id"])
uuids.append(example["uuid"])
id_to_uuid[example["id"]] = example["uuid"]
if version == "v2.0":
np.savez(
out_file,
context_idxs=np.array(context_idxs),
context_char_idxs=np.array(context_char_idxs),
ques_idxs=np.array(ques_idxs),
ques_char_idxs=np.array(ques_char_idxs),
y1s=np.array(y1s),
y2s=np.array(y2s),
ids=np.array(ids),
impossibles=np.array(impossibles),
uuids=np.array(uuids),
)
else:
np.savez(
out_file,
context_idxs=np.array(context_idxs),
context_char_idxs=np.array(context_char_idxs),
ques_idxs=np.array(ques_idxs),
ques_char_idxs=np.array(ques_char_idxs),
y1s=np.array(y1s),
y2s=np.array(y2s),
ids=np.array(ids),
uuids=np.array(uuids),
)
print("Built {} / {} instances of features in total".format(len(y1s), N))
print("Processed {} instances of features in total".format(total))
meta["total"] = len(y1s)
meta["id_to_uuid"] = id_to_uuid
return meta
def save(filename, obj, message=None):
if message is not None:
print("Saving {}...".format(message))
with open(filename, "w") as fh:
json.dump(obj, fh, indent=4, sort_keys=True)
def preprocess(args, config):
word_counter, char_counter = Counter(), Counter()
# get embeddings
word_emb_file = config.glove_word_file
char_emb_file = config.glove_char_file if config.pretrained_char else None
# handle train file
train_examples, train_eval = process_file(
config.raw_train_file, "train", word_counter, char_counter, config.version
)
dev_examples, dev_eval = process_file(
config.raw_dev_file, "dev", word_counter, char_counter, config.version
)
if os.path.exists(config.raw_test_file):
test_examples, test_eval = process_file(
config.raw_test_file, "test", word_counter, char_counter
)
# Note that we are getting embeddings for as much as data as possible (train/test/dev) while training.
word_emb_mat, word2idx_dict = get_embedding(
word_counter, "word", emb_file=word_emb_file, vec_size=config.word_emb_dim
)
char_emb_mat, char2idx_dict = get_embedding(
char_counter, "char", emb_file=char_emb_file, vec_size=config.char_emb_dim
)
build_features(
config, train_examples, "train", config.train_file, word2idx_dict, char2idx_dict
)
dev_meta = build_features(
config,
dev_examples,
"dev",
config.dev_file,
word2idx_dict,
char2idx_dict,
is_test=True,
)
if os.path.exists(config.raw_test_file):
test_meta = build_features(
config,
test_examples,
"test",
config.test_record_file,
word2idx_dict,
char2idx_dict,
is_test=True,
)
save(config.word_emb_file, word_emb_mat, message="word embedding")
save(config.char_emb_file, char_emb_mat, message="char embedding")
save(config.word2idx_file, word2idx_dict, message="word dictionary")
save(config.char2idx_file, char2idx_dict, message="char dictionary")
save(config.train_eval_file, train_eval, message="train eval")
save(config.dev_eval_file, dev_eval, message="dev eval")
save(config.dev_meta_file, dev_meta, message="dev meta")
if os.path.exists(config.raw_test_file):
save(config.test_eval_file, test_eval, message="test eval")
save(config.test_meta_file, test_meta, message="test meta")
|
nilq/baby-python
|
python
|
'''
Clase principal, contiene la logica de ejecución del servidor y rutas para consumo de la API
'''
from entities.profile import Profile
from flask import Flask, jsonify, request
from flask_restful import Resource, Api
from flask_httpauth import HTTPBasicAuth
from datetime import datetime
import pandas as pd
import numpy as np
from sessionManager import SessionManager as sm
from dbManager import Querys
from formManager import FormManager
from entities.user import User
from csv1.csvcleaner import Csvcleaner
from entities.opinionSheet import OpinionSheet
from entities.dataSheet import Datasheet
from entities.attribute import Attribute
from recommendationManger import RecommendationManager
from recommenderCore.contentBased import ContentBased
from entities.requestResult import RequestResult
from entities.history import History
from entities.automobile import Automobile
from dataExportManager import DataExportManager
from clusteringModel.kmodesManager import KmodesManager
from csv1.csvcleaner import Csvcleaner
from comprehend.analyzer import Analyzer
from sqlalchemy import create_engine
import pymysql
# VARIABLES
app = Flask(__name__)
api = Api(app)
auth = HTTPBasicAuth()
MyConnection = Querys(app)
# Metodo de verificacion de password para autenticación basica
@auth.verify_password
def verify_password(username, password):
userQ=MyConnection.getUserByUsername(username)
if(userQ):
user = User("0",userQ[2],"0","0")
user.setPasswordHash(userQ[4])
if not user or not user.verify_password(password):
print("usuario '{0}' no autorizado".format(username))
return False
print("usuario '{0}' autorizado".format(username))
return userQ[1:3]
print("usuario '{0}' no autorizado".format(username))
return False
# Principal
class home(Resource):
def get(self):
#lis=MyConnection.getCursorParams()
#db_connection_str = 'mysql+pymysql://'+lis[1]+':'+lis[2]+'@'+lis[0]+'/'+lis[3]
#db_connection = create_engine(db_connection_str)
return jsonify({"message": "Bienvenido a recommendautos"})
# Bienvenida a usuario
class wellcome(Resource):
@auth.login_required
def get(self):
return jsonify({"message":"{}".format(auth.current_user()[0])})
# Registro de nuevos usaurios
class addUser(Resource):
def post(self):
user1=User(request.json['personname'],request.json['username'],request.json['email'],request.json['password'])
user1.hash_password()
if(MyConnection.addNewUser(user1)):
print("El usuario '{}' se agrego satisfactoriamente".format(user1.getUserName()))
return jsonify({"message":"Usuario agregado satisfactoriamente", "user": user1.get_userBasic()})
print("Error al agregar al usuario '{}'".format(user1.getUserName()))
return jsonify({"message":"Error al agregar nuevo usuario", "user": user1.get_userBasic()})
# Consultar si un nombre de usuario esta regisytrado
class checkUser(Resource):
def get(self,user_name):
user=MyConnection.getUserByUsername(user_name)
if(user):
print("El nombre de usuario '{}' ya existe".format(user_name))
return jsonify({"message":"El usuario ya existe"})
print("El nombre de usuario '{}' no existe".format(user_name))
return jsonify({"message":"El usuario no existe"})
# Consultar si un correo electronico ya esta registrado
class checkEmail(Resource):
def get(self):
pass
# Realizar el inicio de sesión
class verifyUser(Resource):
def post(self):
fakeUser=User("person",request.json['username'],"email",request.json['password'])
GUID=request.json['id']
user=MyConnection.getUserByUsername(fakeUser.getUserName())
if(user):
fakeUser.setPasswordHash(user[4])
if(fakeUser.verify_password(request.json['password'])):
sk=sm.generateSessionkey(user[0],GUID)
if(MyConnection.addSk(user[0],sk,"ACTIVOS")):
fakeUser.setId(sk)
fakeUser.setPersonName(user[1])
fakeUser.setEmail(user[3])
print("El usuario {} accedio satisfactoriamente".format(fakeUser.getUserName()))
return jsonify({"message":"El usuario accedio satisfactoriamente", "user": fakeUser.get_user()})
print("Error al agregar sk en db")
print("el usuario no existe o contraseña icorecta")
return jsonify({"message":"Error de autenticación", "user": fakeUser.get_user()})
# Obtener o actualizar la informacion de un usuario
class dataUser(Resource):
@auth.login_required
def post(self):
fakeUser=User("0","0","0","0")
fakeUser.setId(request.json['id'])
user=MyConnection.getUserBySessionKey(fakeUser.getId()) # el id que maneja la app es el sessionkey (es cambiante)
if(user):
fakeUser.setPersonName(user[1])
fakeUser.setUserName(user[2])
fakeUser.setPassword("password")# El password nunca se envia como uan respuesta de servidor
fakeUser.setEmail(user[4])
print("Datos del usuario {} encontrados correctamente".format(fakeUser.getUserName()))
return jsonify({"message":"Autenticacion correcta, usuario encontrado", "user": fakeUser.get_user()})
print("Error al obtener los datos del usuario con sk: '{}'".format(fakeUser.getId()))
return jsonify({"message":"Error: No se autentico correctamente o el usuario no existe", "user": fakeUser.get_user()})
def patch(self):
fakeUser=User(request.json['personname'],request.json['username'],request.json['email'],request.json['password'])
fakeUser.hash_password()
sk=request.json['id'] # id en la app es el session key
fakeUser.setId(sk)
id=MyConnection.getIdBySessionKey(sk)
if(id):
if(MyConnection.updateUser(fakeUser, id[0])):
user=MyConnection.getUserById(id[0])
if(user):
fakeUser.setPersonName(user[1])
fakeUser.setUserName(user[2])
fakeUser.setPassword("password") #el password nunca se envia como una respuesta
fakeUser.setEmail(user[3])
print("El usuario {},ha sido actualizado correctamente".format(fakeUser.getUserName()))
return jsonify({"message":"Usuario actualizado correctamente", "user": fakeUser.get_user()})
print("El usuario {},ha sido actualizado correctamente, error al retornar nuevos datos".format(fakeUser.getUserName()))
return jsonify({"message":"Usuario actualizado, error al retornar nuevo usuario", "user": fakeUser.get_user()})
print("Error al actualizar datos del usuario{}, id no encontrado".format(fakeUser.getUserName()))
return jsonify({"message":"Error al actualizar datos de usuario", "user": fakeUser.get_user()})
# Obtener formulario
class getForm(Resource):
def get(self):
formulario=FormManager.buildForm(MyConnection)
return jsonify(formulario.getForm())
# Obtener recomendacion
class getRecom(Resource):
def post(self):
#myString = json.dumps(request.json, sort_keys=True, indent=4)
#print(myString)
now = datetime.now()
id=MyConnection.getIdBySessionKey(request.json['user']['id']) #obtengo id mediante su sessionKey #sk=request.json['user']['id']
if (id):
idReq=MyConnection.addRequest("FormA",now,id[0])# genero una nueva solicitud
if(idReq):
result=RecommendationManager.getRecommendation(request.json['form'],idReq[0],MyConnection)
if(result):
return jsonify(result)
else:
return jsonify({"idRecommendation":"100"})
else:
return jsonify({"idRecommendation":"100"})
# Obtener historial
class getHistory(Resource):
@auth.login_required
def get(self):
idUser=MyConnection.getIdByUsername(auth.current_user()[1])
hRequests=MyConnection.getHistoryRequestByIdUser(idUser)
print(hRequests)
print(len(hRequests))
if(hRequests):
arrRequests=[]
for hRequest in hRequests:
data_Autos=MyConnection.getAutosByIdReq(hRequest[0])
dataProfile=MyConnection.getProfileById(hRequest[2])
userprofile=Profile(dataProfile[0],dataProfile[1],dataProfile[2])
arrAutos=[]
for data_Auto in data_Autos:
arrAutos.append(Automobile(data_Auto[1],data_Auto[2],data_Auto[3],data_Auto[4],data_Auto[5]))
form=FormManager.buildFormResponse(MyConnection,hRequest[0])
arrRequests.append(RequestResult(hRequest[0],hRequest[1],userprofile,hRequest[3],arrAutos,form))
response=History(len(arrRequests),arrRequests)
return jsonify(response.getHistory())
else:
#response=History(0,RequestResult(0,0,0,0,0,0))
return jsonify({"requests":0})
# Obtener detalle de vehiculos
class getCarDetails(Resource):
def post(self):
print(request.json['id'])
attribs=MyConnection.getAttributesByIdAuto(request.json['id'])
if(attribs):
print(attribs)
arrAttribs=[]
for attrib in attribs:
arrAttribs.append(Attribute(attrib[0],attrib[1],attrib[2]))
opinions=MyConnection.getOpinions(request.json['id'])
if(opinions):
opinionsheet=OpinionSheet(request.json['id'],opinions[0],opinions[1],opinions[2])
else:
urlA=MyConnection.getUrlAuto(request.json['id'])
opinionsheet=OpinionSheet(request.json['id'],'','',urlA[0])
datasheet=Datasheet(request.json['id'],arrAttribs,opinionsheet)
print(datasheet.getDataSheet())
return jsonify(datasheet.getDataSheet())
return jsonify({'message':'error'})
# exportarDatos
class exportData(Resource):
def get(self):
msg='failed'
msg=DataExportManager.exportAttributes(MyConnection)
print('exportAttributes ok')
msg=ContentBased.generateOverview() #genera overview
print('generateOverview ok')
msg=DataExportManager.exportAutos(MyConnection)
print('exportAutos ok')
msg=DataExportManager.exportAutosAttributes(MyConnection)
print('exportAutosAttributes ok')
msg=DataExportManager.exportTags(MyConnection)
print('exportTags ok')
msg=DataExportManager.exportTagsAttributes(MyConnection)
print('exportTagsAttributes ok')
msg=DataExportManager.exportResponsesAttributes(MyConnection)
print('exportResponsesAttributes ok')
Csvcleaner.generateScoreSheet()
print('generateScoreSheet ok')
msg=DataExportManager.exportScoresheet(MyConnection)
print('exportScoresheet ok')
msg=DataExportManager.exportForms(MyConnection)#solo pasa a numeric, no a bd--
print('exportForms ok')
msg=Csvcleaner.generateScoreSheet()
print('generateScoreSheet ok')
msg=DataExportManager.exportScoresheet(MyConnection)
print('exportScoresheet ok')
print('Datos exportados con exito!! ')
return jsonify('status: '+msg)
# Entrenar modelo
class trainModel(Resource):
def get(self):
msg='ok'
k=6
#msg=KmodesManager.generateModel(k,MyConnection,'Cao')
msg=KmodesManager.defineProfiles(MyConnection,k)##===aun no se ejecuta
#ContentBased.generateOverview() #solo cuando hay cambios en los datos de coches
return msg
# Entrenar modelo
class updateProfiles(Resource):
def post(self):
msg='Error'
if(MyConnection.updateProfileByNcluster(request.json['nombrePerfil'],request.json['descripcionPerfil'],request.json['cluster'])):
msg='perfiles actualizados!'
return msg
# ASOCIACION DE RECURSOS Y RUTAS
api.add_resource(home,"/")
api.add_resource(wellcome,"/wellcome")
api.add_resource(addUser,"/signUp")
api.add_resource(checkUser,"/signUp/user/<string:user_name>")
api.add_resource(checkEmail,"/signUp/email/<string:user_email>")
api.add_resource(verifyUser,"/logIn")
api.add_resource(dataUser,"/user")
api.add_resource(getForm,"/form")
api.add_resource(getRecom,"/recom")
api.add_resource(getHistory,"/history")
api.add_resource(getCarDetails,"/details")
api.add_resource(exportData,"/exportData")
api.add_resource(trainModel,"/trainModel")
api.add_resource(updateProfiles,"/setProfile")
# CONFIGURACION DE EJCUCION
if __name__ == "__main__":
app.run(host= '0.0.0.0',debug=True)
|
nilq/baby-python
|
python
|
from glitchtip.permissions import ScopedPermission
class ReleasePermission(ScopedPermission):
scope_map = {
"GET": ["project:read", "project:write", "project:admin", "project:releases"],
"POST": ["project:write", "project:admin", "project:releases"],
"PUT": ["project:write", "project:admin", "project:releases"],
"DELETE": ["project:admin", "project:releases"],
}
def get_user_scopes(self, obj, user):
return obj.organization.get_user_scopes(user)
class ReleaseFilePermission(ReleasePermission):
def get_user_scopes(self, obj, user):
return obj.release.organization.get_user_scopes(user)
|
nilq/baby-python
|
python
|
# StandAlone Version
"""
Created on Thu Apr 2 19:28:15 2020
@author: Yao Tang
"""
import arcpy
import os
arcpy.env.overwriteOutput = True
arcpy.env.addOutputsToMap = True
## Specify workspace(Usually the folder of the .mxd file)
arcpy.env.workspace = "L:/Projects/3020/006-01/6-0 DRAWINGS AND FIGURES/6-2 GIS/GIS/shp"
## Specify the input folder of the photos
PhotosFolder = r"L:\Projects\3020\006-01\8-0 DESIGN PHASE\DATA AND INFORMATION\Task 3 Data Collection\Geotag2ndRound\MissingInShp"
## Specify the name and the path of the output layer (GeoPhotosToPoint is the name of the layer)
## Create a geodatabase
## (A database file, only one database file is needed for the project)
database_name = "Photos_YT_2.gdb"
try:
arcpy.CreateFileGDB_management(arcpy.env.workspace, database_name)
except:
print "File already created"
print "program proceed"
GridFolderList = os.listdir(PhotosFolder)
print GridFolderList
photoOption = "ALL_PHOTOS"
fieldName3 = "FacilityID"
fieldName4 = "Note"
for grid in GridFolderList:
PhotoFolderList = os.listdir(PhotosFolder +"/" + grid)
print PhotoFolderList
for folder in PhotoFolderList:
inFolder = PhotosFolder +"/" + grid + "/" + folder
outFeatures = database_name + "/" + grid + "_" + folder
badPhotosList = outFeatures + "_NoGPS"
arcpy.GeoTaggedPhotosToPoints_management(inFolder, outFeatures, badPhotosList, photoOption)
inFeatures = outFeatures
arcpy.AddXY_management(inFeatures)
arcpy.AddField_management(inFeatures, fieldName3, "TEXT")
arcpy.AddField_management(inFeatures, fieldName4, "TEXT")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="crudini",
version="0.9.3",
author="Pádraig Brady",
author_email="P@draigBrady.com",
description=("A utility for manipulating ini files"),
license="GPLv2",
keywords="ini config edit",
url="http://github.com/pixelb/crudini",
long_description=read('README'),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"Topic :: System :: Systems Administration",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Programming Language :: Python :: 2",
],
install_requires=['iniparse>=0.3.2'],
scripts=["crudini"]
)
|
nilq/baby-python
|
python
|
def abc209d():
from collections import deque
n, Q = map(int, input().split())
g = [list() for _ in range(n)]
for _ in range(n - 1):
a, b = map(int, input().split())
a, b = a - 1, b - 1
g[a].append(b)
g[b].append(a)
c = [-1] * n
q = deque([0])
c[0] = 0
while len(q) > 0:
node = q.popleft()
for nxt in g[node]:
if c[nxt] != -1: continue
c[nxt] = 1 - c[node]
q.append(nxt)
ans = []
for _ in range(Q):
a, b = map(int, input().split())
a, b = a - 1, b - 1
if c[a] == c[b]:
ans.append("Town")
else:
ans.append("Road")
for item in ans:
print(item)
abc209d()
|
nilq/baby-python
|
python
|
from sim.agents.agents import *
from sim.agents.multiagents import *
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, French National Center for Scientific Research (CNRS)
# Distributed under the (new) BSD License. See LICENSE for more info.
from pyqtgraph.Qt import QtCore
import pyqtgraph as pg
from pyqtgraph.util.mutex import Mutex
import numpy as np
from ..core import (Node, register_node_type, ThreadPollInput)
from ..core.stream.ringbuffer import RingBuffer
import distutils.version
try:
import scipy.signal
HAVE_SCIPY = True
# scpy.signal.sosfilt was introduced in scipy 0.16
assert distutils.version.LooseVersion(scipy.__version__)>'0.16'
except ImportError:
HAVE_SCIPY = False
try:
import pyopencl
mf = pyopencl.mem_flags
HAVE_PYOPENCL = True
except ImportError:
HAVE_PYOPENCL = False
class SosFiltfilt_Base:
def __init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize):
self.coefficients = coefficients
if self.coefficients.ndim==2:
self.nb_section =self. coefficients.shape[0]
if self.coefficients.ndim==3:
self.nb_section = self.coefficients.shape[1]
self.nb_channel = nb_channel
self.dtype = np.dtype(dtype)
self.chunksize = chunksize
self.overlapsize = overlapsize
shape = ((chunksize+overlapsize)*5, nb_channel)
self.forward_buffer = RingBuffer(shape, dtype, double=True)
self.backward_chunksize = self.chunksize+self.overlapsize
def compute_one_chunk(self, pos, data):
assert self.chunksize == data.shape[0], 'Chunksize is bad {} instead of{}'.format(data.shape[0], self.chunksize)
forward_chunk_filtered = self.compute_forward(data)
#~ forward_chunk_filtered = forward_chunk_filtered.astype(self.dtype)
self.forward_buffer.new_chunk(forward_chunk_filtered, index=pos)
start = pos-self.chunksize-self.overlapsize
if start>0:
backward_chunk = self.forward_buffer.get_data(start,pos)
backward_filtered = self.compute_backward(backward_chunk)
backward_filtered = backward_filtered[:self.chunksize]
return pos-self.overlapsize, backward_filtered
elif pos>self.overlapsize:
backward_chunk = self.forward_buffer.get_data(0,pos)
backward_filtered = self.compute_backward(backward_chunk)
backward_filtered = backward_filtered[:-self.overlapsize]
return pos-self.overlapsize, backward_filtered
else:
return None, None
def compute_forward(self, chunk):
raise NotImplementedError
def compute_backward(self, chunk):
raise NotImplementedError
class SosFiltfilt_Scipy(SosFiltfilt_Base):
"""
Implementation with scipy.
"""
def __init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize):
SosFiltfilt_Base.__init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize)
self.zi = np.zeros((self.nb_section, 2, self.nb_channel), dtype= dtype)
def compute_forward(self, chunk):
forward_chunk_filtered, self.zi = scipy.signal.sosfilt(self.coefficients, chunk, zi=self.zi, axis=0)
forward_chunk_filtered = forward_chunk_filtered.astype(self.dtype)
return forward_chunk_filtered
def compute_backward(self, chunk):
backward_filtered = scipy.signal.sosfilt(self.coefficients, chunk[::-1, :], zi=None, axis=0)
backward_filtered = backward_filtered[::-1, :]
backward_filtered = backward_filtered.astype(self.dtype)
return backward_filtered
class SosFiltfilt_OpenCl_Base(SosFiltfilt_Base):
def __init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize):
SosFiltfilt_Base.__init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize)
assert self.dtype == np.dtype('float32')
assert self.chunksize is not None, 'chunksize for opencl must be fixed'
self.coefficients = self.coefficients.astype(self.dtype)
if self.coefficients.ndim==2: #(nb_section, 6) to (nb_channel, nb_section, 6)
self.coefficients = np.tile(self.coefficients[None,:,:], (nb_channel, 1,1))
if not self.coefficients.flags['C_CONTIGUOUS']:
self.coefficients = self.coefficients.copy()
assert self.coefficients.shape[0]==self.nb_channel, 'wrong coefficients.shape'
assert self.coefficients.shape[2]==6, 'wrong coefficients.shape'
self.nb_section = self.coefficients.shape[1]
self.ctx = pyopencl.create_some_context()
#TODO : add arguments gpu_platform_index/gpu_device_index
#self.devices = [pyopencl.get_platforms()[self.gpu_platform_index].get_devices()[self.gpu_device_index] ]
#self.ctx = pyopencl.Context(self.devices)
self.queue = pyopencl.CommandQueue(self.ctx)
#host arrays
self.zi1 = np.zeros((nb_channel, self.nb_section, 2), dtype= self.dtype)
self.zi2 = np.zeros((nb_channel, self.nb_section, 2), dtype= self.dtype)
self.output1 = np.zeros((self.chunksize, self.nb_channel), dtype= self.dtype)
self.output2 = np.zeros((self.backward_chunksize, self.nb_channel), dtype= self.dtype)
#GPU buffers
self.coefficients_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.coefficients)
self.zi1_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi1)
self.zi2_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi2)
self.input1_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output1.nbytes)
self.output1_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output1.nbytes)
self.input2_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output2.nbytes)
self.output2_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output2.nbytes)
#nb works
kernel = self.kernel%dict(forward_chunksize=self.chunksize, backward_chunksize=self.backward_chunksize,
nb_section=self.nb_section, nb_channel=self.nb_channel)
prg = pyopencl.Program(self.ctx, kernel)
self.opencl_prg = prg.build(options='-cl-mad-enable')
class SosFilfilt_OpenCL_V1(SosFiltfilt_OpenCl_Base):
def __init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize):
SosFiltfilt_OpenCl_Base.__init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize)
self.global_size = (self.nb_channel, )
self.local_size = (self.nb_channel, )
def compute_forward(self, chunk):
if not chunk.flags['C_CONTIGUOUS']:
chunk = chunk.copy()
pyopencl.enqueue_copy(self.queue, self.input1_cl, chunk)
kern_call = getattr(self.opencl_prg, 'forward_filter')
event = kern_call(self.queue, self.global_size, self.local_size,
self.input1_cl, self.output1_cl, self.coefficients_cl, self.zi1_cl)
event.wait()
pyopencl.enqueue_copy(self.queue, self.output1, self.output1_cl)
forward_chunk_filtered = self.output1
return forward_chunk_filtered
def compute_backward(self, chunk):
if not chunk.flags['C_CONTIGUOUS']:
chunk = chunk.copy()
self.zi2[:]=0
pyopencl.enqueue_copy(self.queue, self.zi2_cl, self.zi2)
if chunk.shape[0]==self.backward_chunksize:
pyopencl.enqueue_copy(self.queue, self.input2_cl, chunk)
else:
#side effect at the begining
chunk2 = np.zeros((self.backward_chunksize, self.nb_channel), dtype=self.dtype)
chunk2[-chunk.shape[0]:, :] = chunk
pyopencl.enqueue_copy(self.queue, self.input2_cl, chunk2)
kern_call = getattr(self.opencl_prg, 'backward_filter')
event = kern_call(self.queue, self.global_size, self.local_size,
self.input2_cl, self.output2_cl, self.coefficients_cl, self.zi2_cl)
event.wait()
pyopencl.enqueue_copy(self.queue, self.output2, self.output2_cl)
if chunk.shape[0]==self.backward_chunksize:
forward_chunk_filtered = self.output2
else:
#side effect at the begining
forward_chunk_filtered = self.output2[-chunk.shape[0]:, :]
return forward_chunk_filtered
kernel = """
#define forward_chunksize %(forward_chunksize)d
#define backward_chunksize %(backward_chunksize)d
#define nb_section %(nb_section)d
#define nb_channel %(nb_channel)d
__kernel void sos_filter(__global float *input, __global float *output, __constant float *coefficients,
__global float *zi, int chunksize, int direction) {
int chan = get_global_id(0); //channel indice
int offset_filt2; //offset channel within section
int offset_zi = chan*nb_section*2;
int idx;
float w0, w1,w2;
float res;
for (int section=0; section<nb_section; section++){
offset_filt2 = chan*nb_section*6+section*6;
w1 = zi[offset_zi+section*2+0];
w2 = zi[offset_zi+section*2+1];
for (int s=0; s<chunksize;s++){
if (direction==1) {idx = s*nb_channel+chan;}
else if (direction==-1) {idx = (chunksize-s-1)*nb_channel+chan;}
if (section==0) {w0 = input[idx];}
else {w0 = output[idx];}
w0 -= coefficients[offset_filt2+4] * w1;
w0 -= coefficients[offset_filt2+5] * w2;
res = coefficients[offset_filt2+0] * w0 + coefficients[offset_filt2+1] * w1 + coefficients[offset_filt2+2] * w2;
w2 = w1; w1 =w0;
output[idx] = res;
}
zi[offset_zi+section*2+0] = w1;
zi[offset_zi+section*2+1] = w2;
}
}
__kernel void forward_filter(__global float *input, __global float *output, __constant float *coefficients, __global float *zi){
sos_filter(input, output, coefficients, zi, forward_chunksize, 1);
}
__kernel void backward_filter(__global float *input, __global float *output, __constant float *coefficients, __global float *zi) {
sos_filter(input, output, coefficients, zi, backward_chunksize, -1);
}
"""
class SosFilfilt_OpenCL_V3(SosFiltfilt_OpenCl_Base):
def __init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize):
SosFiltfilt_OpenCl_Base.__init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize)
self.global_size = (self.nb_channel, self.nb_section)
self.local_size = (1, self.nb_section)
def compute_forward(self, chunk):
if not chunk.flags['C_CONTIGUOUS']:
chunk = chunk.copy()
pyopencl.enqueue_copy(self.queue, self.input1_cl, chunk)
kern_call = getattr(self.opencl_prg, 'forward_filter')
event = kern_call(self.queue, self.global_size, self.local_size,
self.input1_cl, self.output1_cl, self.coefficients_cl, self.zi1_cl)
event.wait()
pyopencl.enqueue_copy(self.queue, self.output1, self.output1_cl)
forward_chunk_filtered = self.output1
return forward_chunk_filtered
def compute_backward(self, chunk):
if not chunk.flags['C_CONTIGUOUS']:
chunk = chunk.copy()
self.zi2[:]=0
pyopencl.enqueue_copy(self.queue, self.zi2_cl, self.zi2)
if chunk.shape[0]==self.backward_chunksize:
pyopencl.enqueue_copy(self.queue, self.input2_cl, chunk)
else:
#side effect at the begining
chunk2 = np.zeros((self.backward_chunksize, self.nb_channel), dtype=self.dtype)
chunk2[-chunk.shape[0]:, :] = chunk
pyopencl.enqueue_copy(self.queue, self.input2_cl, chunk2)
kern_call = getattr(self.opencl_prg, 'backward_filter')
event = kern_call(self.queue, self.global_size, self.local_size,
self.input2_cl, self.output2_cl, self.coefficients_cl, self.zi2_cl)
event.wait()
pyopencl.enqueue_copy(self.queue, self.output2, self.output2_cl)
if chunk.shape[0]==self.backward_chunksize:
forward_chunk_filtered = self.output2
else:
#side effect at the begining
forward_chunk_filtered = self.output2[-chunk.shape[0]:, :]
return forward_chunk_filtered
kernel = """
#define forward_chunksize %(forward_chunksize)d
#define backward_chunksize %(backward_chunksize)d
#define nb_section %(nb_section)d
#define nb_channel %(nb_channel)d
__kernel void sos_filter(__global float *input, __global float *output, __constant float *coefficients,
__global float *zi, int chunksize, int direction) {
int chan = get_global_id(0); //channel indice
int section = get_global_id(1); //section indice
int offset_filt2; //offset channel within section
int offset_zi = chan*nb_section*2;
int idx;
float w0, w1,w2;
float res;
int s2;
w1 = zi[offset_zi+section*2+0];
w2 = zi[offset_zi+section*2+1];
for (int s=0; s<chunksize+(3*nb_section);s++){
barrier(CLK_GLOBAL_MEM_FENCE);
s2 = s-section*3;
if (s2>=0 && (s2<chunksize)){
offset_filt2 = chan*nb_section*6+section*6;
if (direction==1) {idx = s2*nb_channel+chan;}
else if (direction==-1) {idx = (chunksize-s2-1)*nb_channel+chan;}
if (section==0) {w0 = input[idx];}
else {w0 = output[idx];}
w0 -= coefficients[offset_filt2+4] * w1;
w0 -= coefficients[offset_filt2+5] * w2;
res = coefficients[offset_filt2+0] * w0 + coefficients[offset_filt2+1] * w1 + coefficients[offset_filt2+2] * w2;
w2 = w1; w1 =w0;
output[idx] = res;
}
}
zi[offset_zi+section*2+0] = w1;
zi[offset_zi+section*2+1] = w2;
}
__kernel void forward_filter(__global float *input, __global float *output, __constant float *coefficients, __global float *zi){
sos_filter(input, output, coefficients, zi, forward_chunksize, 1);
}
__kernel void backward_filter(__global float *input, __global float *output, __constant float *coefficients, __global float *zi) {
sos_filter(input, output, coefficients, zi, backward_chunksize, -1);
}
"""
sosfiltfilt_engines = { 'scipy' : SosFiltfilt_Scipy, 'opencl' : SosFilfilt_OpenCL_V1, 'opencl3' : SosFilfilt_OpenCL_V3 }
class SosFiltfiltThread(ThreadPollInput):
def __init__(self, input_stream, output_stream, timeout = 200, parent = None):
ThreadPollInput.__init__(self, input_stream, timeout = timeout, return_data=True, parent = parent)
self.output_stream = output_stream
self.mutex = Mutex()
def process_data(self, pos, data):
with self.mutex:
pos2, chunk_filtered = self.filter_engine.compute_one_chunk(pos, data)
if pos2 is not None:
self.output_stream.send(chunk_filtered, index=pos2)
def set_params(self, engine, coefficients, nb_channel, dtype, chunksize, overlapsize):
assert engine in sosfiltfilt_engines
EngineClass = sosfiltfilt_engines[engine]
with self.mutex:
self.filter_engine = EngineClass(coefficients, nb_channel, dtype, chunksize, overlapsize)
class OverlapFiltfilt(Node, QtCore.QObject):
"""
Node for filtering with forward-backward method (filtfilt).
This use sliding overlap technics.
The chunksize and the overlapsize are important for the accuracy of filtering.
You need to study them carfully, otherwise the result should be the same as a
real filtfilt ona long term signal. You must check the residual between real offline filtfitl
and this online OverlapFiltfilt.
Note that the chunksize have a strong effect on low frequency.
This uses Second Order (sos) coeeficient.
It internally use scipy.signal.sosfilt which is available only on scipy >0.16
The chunksize need to be fixed.
For overlapsize there are 2 cases:
1- overlapsize<chunksize/2 : natural case. each chunk partailly overlap.
The overlap are on sides, the central part come from one chunk.
2 - overlapsize>chunksize/2: chunk are fully averlapping. There is no central part.
In the 2 cases, for each arrival of new chunk at [-chunksize:],
the computed chunk at [-(chunksize+overlapsize):-overlapsize] is released.
The coefficients.shape must be (nb_section, 6).
If pyopencl is avaible you can do SosFilter.configure(engine='opencl')
In that cases the coefficients.shape can also be (nb_channel, nb_section, 6)
this help for having different filter on each channels.
The opencl engine prefer inernally (channel, sample) ordered.
In case not a copy is done. So the input ordering do impact performences.
"""
_input_specs = {'signals' : dict(streamtype = 'signals')}
_output_specs = {'signals' : dict(streamtype = 'signals')}
def __init__(self, parent = None, **kargs):
QtCore.QObject.__init__(self, parent)
Node.__init__(self, **kargs)
assert HAVE_SCIPY, "SosFilter need scipy>0.16"
def _configure(self, chunksize=1024, overlapsize=512, coefficients = None, engine='scipy'):
"""
Set the coefficient of the filter.
See http://scipy.github.io/devdocs/generated/scipy.signal.sosfilt.html for details.
"""
self.chunksize = chunksize
self.overlapsize = overlapsize
self.engine = engine
self.set_coefficients(coefficients)
def after_input_connect(self, inputname):
self.nb_channel = self.input.params['shape'][1]
for k in ['sample_rate', 'dtype', 'shape', ]:
self.output.spec[k] = self.input.params[k]
def _initialize(self):
self.thread = SosFiltfiltThread(self.input, self.output)
self.thread.set_params(self.engine, self.coefficients, self.nb_channel,
self.output.params['dtype'], self.chunksize, self.overlapsize)
def _start(self):
self.thread.last_pos = None
self.thread.start()
def _stop(self):
self.thread.stop()
self.thread.wait()
def set_coefficients(self, coefficients):
self.coefficients = coefficients
if self.initialized():
self.thread.set_params(self.engine, self.coefficients, self.nb_channel,
self.output.params['dtype'], self.chunksize, self.overlapsize)
register_node_type(OverlapFiltfilt)
|
nilq/baby-python
|
python
|
"""
Copyright 2018 The Johns Hopkins University Applied Physics Laboratory.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Trains a dense (per-pixel) classifier on EM data.
"""
from __future__ import print_function
__author__ = 'mjp, Nov 2016'
__license__ = 'Apache 2.0'
import os
import sys
import time
import json
import numpy as np
np.random.seed(9999)
from keras import backend as K
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
from cnn_tools import *
from data_tools import *
K.set_image_dim_ordering('th')
if __name__ == '__main__':
with open('/jobs/train_job_params.json') as f:
params = json.load(f)
# -------------------------------------------------------------------------
rmt = BossRemote('/jobs/boss_config.cfg')
img_chan = ChannelResource(params['img_channel'],
params['collection'],
params['experiment'],
type='image',
datatype='uint8')
lbl_chan = ChannelResource(params['lbl_channel'],
params['collection'],
params['experiment'],
type='annotation',
datatype='uint64')
# Get the image data from the BOSS
x_train = rmt.get_cutout(img_chan, params['resolution'],
params['x_rng'],
params['y_rng'],
params['z_rng'])
y_train = rmt.get_cutout(lbl_chan, params['resolution'],
params['x_rng'],
params['y_rng'],
params['z_rng'])
# Data must be [slices, chan, row, col] (i.e., [Z, chan, Y, X])
x_train = x_train[:, np.newaxis, :, :].astype(np.float32)
y_train = y_train[:, np.newaxis, :, :].astype(np.float32)
# Pixel values must be in [0,1]
x_train /= 255.
y_train = (y_train > 0).astype('float32')
tile_size = tuple(params['tile_size'])
train_pct = params['train_pct']
# -------------------------------------------------------------------------
# Data must be [slices, chan, row, col] (i.e., [Z, chan, Y, X])
# split into train and valid
train_slices = range(int(train_pct * x_train.shape[0]))
x_train = x_train[train_slices, ...]
y_train = y_train[train_slices, ...]
valid_slices = range(int(train_pct * x_train.shape[0]), x_train.shape[0])
x_valid = x_train[valid_slices, ...]
y_valid = y_train[valid_slices, ...]
print('[info]: training data has shape: %s' % str(x_train.shape))
print('[info]: training labels has shape: %s' % str(y_train.shape))
print('[info]: validation data has shape: %s' % str(x_valid.shape))
print('[info]: validation labels has shape: %s' % str(y_valid.shape))
print('[info]: tile size: %s' % str(tile_size))
# train model
tic = time.time()
model = create_unet((1, tile_size[0], tile_size[1]))
if params['do_synapse']:
model.compile(optimizer=Adam(lr=1e-4),
loss=pixelwise_crossentropy_loss_w,
metrics=[f1_score])
else:
model.compile(optimizer=Adam(lr=1e-4),
loss=pixelwise_crossentropy_loss,
metrics=[f1_score])
# if weights_file:
# model.load_weights(weights_file)
train_model(x_train, y_train, x_valid, y_valid, model,
params['output_dir'], do_augment=params['do_augment'],
n_epochs=params['n_epochs'], mb_size=params['mb_size'],
n_mb_per_epoch=params['n_mb_per_epoch'],
save_freq=params['save_freq'])
print('[info]: total time to train model: %0.2f min' %
((time.time() - tic)/60.))
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
|
nilq/baby-python
|
python
|
"Thread safe RLock defined for lru cache."
# https://stackoverflow.com/questions/16567958/when-and-how-to-use-pythons-rlock
def RLock():
"""
Make the container thread safe if running in a threaded context.
"""
import threading
return threading.RLock()
|
nilq/baby-python
|
python
|
import logging
_LOGGER = logging.getLogger(__name__)
def decode(packet):
"""
https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolEverflourish.cpp
"""
data = packet["data"]
house = data & 0xFFFC00
house >>= 10
unit = data & 0x300
unit >>= 8
unit += 1
method = data & 0xF
# _LOGGER.debug("Everflourish (data=%x, house=%d, "
# "unit=%d, method=%d)",
# data, house, unit, method)
if house > 16383 or unit < 1 or unit > 4:
# not everflourish
return
if method == 0:
method = "turnoff"
elif method == 15:
method = "turnon"
elif method == 10:
method = "learn"
else:
# not everflourish
return
return dict(
packet,
_class="command",
model="selflearning",
house=house,
unit=unit,
method=method,
)
def encode(method):
"""
https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolEverflourish.cpp
"""
raise NotImplementedError()
|
nilq/baby-python
|
python
|
# script to copy history from a FITS table to the FITS header
# FITS images only, works in current directory
# Argument:
# 1) Name of input FITS
# example:
# Python scriptHi2Header.py myImage.fits
import sys, Obit, Image, History, OSystem, OErr
# Init Obit
err=OErr.OErr()
ObitSys=OSystem.OSystem ("Hi2Header", 1, 100, 1, ["None"], 1, ["./"], 1, 0, err)
OErr.printErrMsg(err, "Error with Obit startup")
# Files (FITS)
inFile = sys.argv[1]
inDisk = 0
# Set data
inImage = Image.newPFImage("Input image", inFile, inDisk, 1, err)
OErr.printErrMsg(err, "Error initializing")
# For debugging
#Obit.Bomb()
# Make history
inInfo = Image.PGetList(inImage)
outInfo = Image.PGetList(inImage)
inHistory = History.History("history", inInfo, err)
outHistory = History.History("history", outInfo, err)
OErr.printErrMsg(err, "Error initializing history")
History.PCopy2Header(inHistory, outHistory, err)
OErr.printErrMsg(err, "Error copying history to FITS header")
# Say something
print "Copied History table to FITS header for",inFile
# Shutdown Obit
OErr.printErr(err)
|
nilq/baby-python
|
python
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'superDigit' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. STRING n
# 2. INTEGER k
#
def superDigit(n, k):
if((len(n) == 1)and(k>1)):
n,k = str(int(n)*k),1
len_n = len(n)
if(len_n==1):
return (int(n))
else:
suma = 0
isImp = len_n%2
for i in range(0,int((len_n/2))+isImp):
pos_f = i
pos_b = len_n-i-1
if(pos_f != pos_b):
suma += (int(n[pos_f])+int(n[pos_b]))
else:
suma += int(n[pos_f])
return superDigit(str(suma),k)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = first_multiple_input[0]
k = int(first_multiple_input[1])
result = superDigit(n, k)
fptr.write(str(result) + '\n')
fptr.close()
|
nilq/baby-python
|
python
|
#
# Copyright 2015-2020 Andrey Galkin <andrey@futoin.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function, absolute_import
import unittest
import subprocess
import os
import sys
import stat
import shutil
import json
import platform
from collections import OrderedDict
from futoin.cid.util import executil
CIDTEST_BIN = os.environ.get('CIDTEST_BIN', None)
if CIDTEST_BIN:
CIDTEST_BIN_EXT = False
else :
CIDTEST_BIN_EXT = True
CIDTEST_BIN = os.path.dirname( __file__ ) + '/../bin/cid'
class cid_UTBase ( unittest.TestCase ) :
IS_LINUX = platform.system() == 'Linux'
IS_MACOS = platform.system() == 'Darwin'
NO_COMPILE = os.environ.get('CIDTEST_NO_COMPILE', '0') == '1'
ALLOW_SRC_BUILDS = not NO_COMPILE
CIDTEST_BIN = CIDTEST_BIN
TEST_DIR = 'invalid'
TEST_RUN_DIR = os.environ.get('CIDTEST_RUN_DIR', os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', 'testrun')
))
_create_test_dir = False
__test__ = False
_dev_null = open(os.devnull, 'w')
_stdout_log = open(os.path.join(TEST_RUN_DIR, 'stdout.log'), 'a+')
#_stderr_log = open(os.path.join(TEST_RUN_DIR, 'stderr.log'), 'a+')
_stderr_log = _stdout_log
@classmethod
def setUpClass( cls ):
print('Python: ' + sys.executable)
try:
os.makedirs( cls.TEST_RUN_DIR )
except:
pass
os.chdir( cls.TEST_RUN_DIR )
os.environ['HOME'] = cls.TEST_RUN_DIR
cache_dir = os.path.join(os.environ['HOME'], '.cache', 'futoin-cid')
for cleanup_dir in (cache_dir, cls.TEST_DIR):
if os.path.exists( cleanup_dir ) :
for ( path, dirs, files ) in os.walk( cleanup_dir ) :
for id in dirs + files :
try:
os.chmod( os.path.join( path, id ), stat.S_IRWXU )
except:
pass
shutil.rmtree( cleanup_dir )
if cls._create_test_dir:
os.mkdir(cls.TEST_DIR)
os.chdir(cls.TEST_DIR)
def _goToBase( self ):
os.chdir( self.TEST_DIR )
def setUp( self ):
self._goToBase()
@classmethod
def _call_cid( cls, args, stdin=None, stdout=None, returncode=0, ignore=False, retout=False, merge_stderr=False ) :
cmd = []
if CIDTEST_BIN_EXT:
cmd.append(sys.executable)
if retout:
(r, w) = os.pipe()
stdout = w
cmd.append( CIDTEST_BIN )
cmd += args
if stdout is None:
stdout = cls._stdout_log
stderr = cls._stderr_log
if merge_stderr:
stderr=subprocess.STDOUT
print( 'Test Call: ' + subprocess.list2cmdline(cmd), file=cls._stderr_log )
cls._stderr_log.flush()
p = subprocess.Popen(
cmd,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr
)
if stdin is not None:
p.stdin.write( stdin )
p.wait()
if retout:
os.close(w)
res = os.read(r, 32*1024)
os.close(r)
if ignore:
return p.returncode == returncode
if p.returncode != returncode:
raise RuntimeError( "Failed" )
if retout:
return executil.toString(res)
return True
@classmethod
def _writeFile( cls, file_name, content ):
with open(file_name, 'w') as content_file:
content_file.write( content )
content_file.write( "\n" )
@classmethod
def _writeJSON( cls, file_name, content ):
cls._writeFile( file_name, json.dumps( content ) )
@classmethod
def _readFile( cls, file_name ):
with open(file_name, 'r') as content_file:
content = content_file.read()
return content
@classmethod
def _readJSON( cls, file_name ):
content = cls._readFile(file_name)
object_pairs_hook = lambda pairs: OrderedDict( pairs )
return json.loads( content, object_pairs_hook=object_pairs_hook )
@classmethod
def _redirectAsyncStdIO( cls ):
os.dup2(cls._dev_null.fileno(), 0)
os.dup2(cls._stdout_log.fileno(), 1)
os.dup2(cls._stderr_log.fileno(), 2)
def _firstGet(self, url):
import requests, time
for i in range(15):
try:
res = requests.get(url, timeout=3)
if res.ok:
return res
else:
time.sleep(1)
except:
time.sleep(1)
else:
self.assertTrue(False)
class cid_Tool_UTBase ( cid_UTBase ) :
__test__ = False
TOOL_NAME = 'invalid'
TOOL_ENV = {}
_env_backup = None
@classmethod
def setUpClass( cls ):
cls._env_backup = {}
cls.TEST_DIR = os.path.join(cls.TEST_RUN_DIR, 'tool_'+cls.TOOL_NAME)
super(cid_Tool_UTBase, cls).setUpClass()
os.mkdir( cls.TEST_DIR )
os.chdir( cls.TEST_DIR )
for k, v in cls.TOOL_ENV.items():
cls._env_backup[k] = os.environ.get(k, None)
os.environ[k] = v
@classmethod
def tearDownClass( cls ):
for k, v in cls._env_backup.items():
if v:
os.environ[k] = v
else:
del os.environ[k]
|
nilq/baby-python
|
python
|
import stringcase
from importlib import import_module
from .metadata import Metadata
from .resource import Resource
from .package import Package
from . import helpers
from . import errors
class Pipeline(Metadata):
"""Pipeline representation
API | Usage
-------- | --------
Public | `from frictionless import Pipeline`
For now, only the `package` type is supported where `steps` should
conform to the `dataflows`s processors. The File class inherits
from the Metadata class all the metadata's functionality
```python
pipeline = Pipeline(
{
"type": "package",
"steps": [
{"type": "load", "spec": {"loadSource": "data/table.csv"}},
{"type": "set_type", "spec": {"name": "id", "type": "string"}},
{"type": "dump_to_path", "spec": {"outPath": tmpdir}},
],
}
)
pipeline.run()
```
Parameters:
descriptor (str|dict): pipeline descriptor
name? (str): pipeline name
type? (str): pipeline type
steps? (dict[]): pipeline steps
"""
def __init__(self, descriptor=None, *, name=None, type=None, source=None, steps=None):
self.setinitial("name", name)
self.setinitial("type", type)
self.setinitial("source", source)
self.setinitial("steps", steps)
super().__init__(descriptor)
@Metadata.property
def name(self):
"""
Returns:
str?: pipeline name
"""
return self.get("name")
@Metadata.property
def type(self):
"""
Returns:
str?: pipeline type
"""
return self.get("type", "resource")
@Metadata.property
def source(self):
"""
Returns:
dict[]?: pipeline source
"""
return self.get("source")
@Metadata.property
def steps(self):
"""
Returns:
dict[]?: pipeline steps
"""
return self.get("steps")
# Run
def run(self):
"""Run the pipeline"""
steps = import_module("frictionless.steps")
transforms = import_module("frictionless.transform")
# TODO: it will not work for nested steps like steps.resource_transform
items = []
for step in self.steps:
func = getattr(steps, stringcase.snakecase(step["type"]))
items.append(func(**helpers.create_options(step["spec"])))
if self.type == "resource":
source = Resource(self.source)
return transforms.transform_resource(source, steps=items)
else:
source = Package(self.source)
return transforms.transform_package(source, steps=items)
# Metadata
metadata_Error = errors.PipelineError
metadata_profile = { # type: ignore
"type": "object",
"required": ["type", "source", "steps"],
"properties": {
"name": {"type": "string"},
"type": {"type": "string"},
"source": {"type": "object"},
"steps": {
"type": "array",
"items": {"type": "object", "required": ["type", "spec"]},
},
},
}
|
nilq/baby-python
|
python
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
from contextlib import contextmanager
from dataclasses import dataclass
from textwrap import dedent
from typing import Any
from pants.engine.internals.engine_testutil import (
assert_equal_with_printing,
remove_locations_from_traceback,
)
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import Get, rule
from pants.engine.unions import UnionRule, union
from pants.testutil.rule_runner import QueryRule
from pants.testutil.test_base import TestBase
@dataclass(frozen=True)
class A:
pass
@dataclass(frozen=True)
class B:
pass
def fn_raises(x):
raise Exception(f"An exception for {type(x).__name__}")
@rule(desc="Nested raise")
def nested_raise(b: B) -> A:
fn_raises(b)
return A()
@rule
def consumes_a_and_b(a: A, b: B) -> str:
return str(f"{a} and {b}")
@dataclass(frozen=True)
class C:
pass
@rule
def transitive_b_c(c: C) -> B:
return B()
@dataclass(frozen=True)
class D:
b: B
@rule
async def transitive_coroutine_rule(c: C) -> D:
b = await Get(B, C, c)
return D(b)
@union
class UnionBase:
pass
@union
class UnionWithNonMemberErrorMsg:
@staticmethod
def non_member_error_message(subject):
return f"specific error message for {type(subject).__name__} instance"
class UnionWrapper:
def __init__(self, inner):
self.inner = inner
class UnionA:
@staticmethod
def a() -> A:
return A()
@rule
def select_union_a(union_a: UnionA) -> A:
return union_a.a()
class UnionB:
@staticmethod
def a() -> A:
return A()
@rule
def select_union_b(union_b: UnionB) -> A:
return union_b.a()
# TODO: add MultiGet testing for unions!
@rule
async def a_union_test(union_wrapper: UnionWrapper) -> A:
union_a = await Get(A, UnionBase, union_wrapper.inner)
return union_a
class UnionX:
pass
@rule
async def error_msg_test_rule(union_wrapper: UnionWrapper) -> UnionX:
# NB: We install a UnionRule to make UnionWrapper a member of this union, but then we pass the
# inner value, which is _not_ registered.
_ = await Get(A, UnionWithNonMemberErrorMsg, union_wrapper.inner)
raise AssertionError("The statement above this one should have failed!")
class TypeCheckFailWrapper:
"""This object wraps another object which will be used to demonstrate a type check failure when
the engine processes an `await Get(...)` statement."""
def __init__(self, inner):
self.inner = inner
@rule
async def a_typecheck_fail_test(wrapper: TypeCheckFailWrapper) -> A:
# This `await` would use the `nested_raise` rule, but it won't get to the point of raising since
# the type check will fail at the Get.
_ = await Get(A, B, wrapper.inner) # noqa: F841
return A()
@dataclass(frozen=True)
class CollectionType:
# NB: We pass an unhashable type when we want this to fail at the root, and a hashable type
# when we'd like it to succeed.
items: Any
@rule
async def c_unhashable(_: CollectionType) -> C:
# This `await` would use the `nested_raise` rule, but it won't get to the point of raising since
# the hashability check will fail.
_result = await Get(A, B, list()) # noqa: F841
return C()
@rule
def boolean_and_int(i: int, b: bool) -> A:
return A()
@contextmanager
def assert_execution_error(test_case, expected_msg):
with test_case.assertRaises(ExecutionError) as cm:
yield
test_case.assertIn(expected_msg, remove_locations_from_traceback(str(cm.exception)))
class SchedulerTest(TestBase):
@classmethod
def rules(cls):
return (
*super().rules(),
consumes_a_and_b,
QueryRule(str, (A, B)),
transitive_b_c,
QueryRule(str, (A, C)),
transitive_coroutine_rule,
QueryRule(D, (C,)),
UnionRule(UnionBase, UnionA),
UnionRule(UnionWithNonMemberErrorMsg, UnionWrapper),
select_union_a,
UnionRule(union_base=UnionBase, union_member=UnionB),
select_union_b,
a_union_test,
QueryRule(A, (UnionWrapper,)),
error_msg_test_rule,
QueryRule(UnionX, (UnionWrapper,)),
boolean_and_int,
QueryRule(A, (int, bool)),
)
def test_use_params(self):
# Confirm that we can pass in Params in order to provide multiple inputs to an execution.
a, b = A(), B()
result_str = self.request(str, [a, b])
self.assertEqual(result_str, consumes_a_and_b(a, b))
# And confirm that a superset of Params is also accepted.
result_str = self.request(str, [a, b, self])
self.assertEqual(result_str, consumes_a_and_b(a, b))
# But not a subset.
expected_msg = "No installed QueryRules can compute str given input Params(A), but"
with self.assertRaisesRegex(Exception, re.escape(expected_msg)):
self.request(str, [a])
def test_transitive_params(self):
# Test that C can be provided and implicitly converted into a B with transitive_b_c() to satisfy
# the selectors of consumes_a_and_b().
a, c = A(), C()
result_str = self.request(str, [a, c])
self.assertEqual(
remove_locations_from_traceback(result_str),
remove_locations_from_traceback(consumes_a_and_b(a, transitive_b_c(c))),
)
# Test that an inner Get in transitive_coroutine_rule() is able to resolve B from C due to
# the existence of transitive_b_c().
self.request(D, [c])
def test_consumed_types(self):
assert {A, B, C, str} == set(
self.scheduler.scheduler.rule_graph_consumed_types([A, C], str)
)
def test_strict_equals(self):
# With the default implementation of `__eq__` for boolean and int, `1 == True`. But in the
# engine that behavior would be surprising, and would cause both of these Params to intern
# to the same value, triggering an error. Instead, the engine additionally includes the
# type of a value in equality.
assert A() == self.request(A, [1, True])
@contextmanager
def _assert_execution_error(self, expected_msg):
with assert_execution_error(self, expected_msg):
yield
def test_union_rules(self):
self.request(A, [UnionWrapper(UnionA())])
self.request(A, [UnionWrapper(UnionB())])
# Fails due to no union relationship from A -> UnionBase.
with self._assert_execution_error("Type A is not a member of the UnionBase @union"):
self.request(A, [UnionWrapper(A())])
def test_union_rules_no_docstring(self):
with self._assert_execution_error("specific error message for UnionA instance"):
self.request(UnionX, [UnionWrapper(UnionA())])
class SchedulerWithNestedRaiseTest(TestBase):
@classmethod
def rules(cls):
return (
*super().rules(),
a_typecheck_fail_test,
c_unhashable,
nested_raise,
QueryRule(A, (TypeCheckFailWrapper,)),
QueryRule(A, (B,)),
QueryRule(C, (CollectionType,)),
)
def test_get_type_match_failure(self):
"""Test that Get(...)s are now type-checked during rule execution, to allow for union
types."""
with self.assertRaises(ExecutionError) as cm:
# `a_typecheck_fail_test` above expects `wrapper.inner` to be a `B`.
self.request(A, [TypeCheckFailWrapper(A())])
expected_regex = "WithDeps.*did not declare a dependency on JustGet"
self.assertRegex(str(cm.exception), expected_regex)
def test_unhashable_root_params_failure(self):
"""Test that unhashable root params result in a structured error."""
# This will fail at the rust boundary, before even entering the engine.
with self.assertRaisesRegex(TypeError, "unhashable type: 'list'"):
self.request(C, [CollectionType([1, 2, 3])])
def test_unhashable_get_params_failure(self):
"""Test that unhashable Get(...) params result in a structured error."""
# This will fail inside of `c_unhashable_dataclass`.
with self.assertRaisesRegex(ExecutionError, "unhashable type: 'list'"):
self.request(C, [CollectionType(tuple())])
def test_trace_includes_rule_exception_traceback(self):
# Execute a request that will trigger the nested raise, and then directly inspect its trace.
request = self.scheduler.execution_request([A], [B()])
_, throws = self.scheduler.execute(request)
with self.assertRaises(ExecutionError) as cm:
self.scheduler._raise_on_error([t for _, t in throws])
trace = remove_locations_from_traceback(str(cm.exception))
assert_equal_with_printing(
self,
dedent(
f"""\
1 Exception encountered:
Engine traceback:
in select
in {self.__module__}.{nested_raise.__name__}
Traceback (most recent call last):
File LOCATION-INFO, in nested_raise
fn_raises(b)
File LOCATION-INFO, in fn_raises
raise Exception(f"An exception for {{type(x).__name__}}")
Exception: An exception for B
"""
),
trace,
)
|
nilq/baby-python
|
python
|
# Copyright 2016 Pavle Jonoski
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from troup.node import Node
import logging
def configure_node_parser():
parser = ArgumentParser(prog='troup', description='Run single node')
# Node
parser.add_argument('--node', help='Node ID')
parser.add_argument('--neighbours', default='', nargs='+', help='Neighbour nodes')
# Async IO server props
parser.add_argument('--host', default='', help='Async IO server hostname')
parser.add_argument('--port', default=7000, help='Async IO server port')
# Store
parser.add_argument('--storage-root', default='.data', help='Root path of the storage directory')
# System statistics
parser.add_argument('--stats-update-interval', default=30000, help='Statistics update interval in milliseconds')
parser.add_argument('--log-level', '-l', default='info', help='Logging level')
parser.add_argument('--lock', action='store_true', help='Write node info in global lock file')
parser.add_argument('--debug', action='store_true', help='Activate the debug command-line interactive interface')
parser.add_argument('-v', '--version', action='store_true', help='Print version and exit')
return parser
def run_node():
import signal
parser = configure_node_parser()
args = parser.parse_args()
if args.version:
from troup.metadata import __version__
print(__version__)
return
logging.basicConfig(level=getattr(logging, args.log_level.upper()))
config = {
'store': {
'path': args.storage_root
},
'server': {
'hostname': args.host,
'port': args.port
},
'stats': {
'update_interval': args.stats_update_interval
},
'neighbours': args.neighbours,
'lock': args.lock
}
node = Node(node_id=args.node, config=config)
def handle_node_shutdown(signal, frame):
node.stop()
signal.signal(signal.SIGINT, handle_node_shutdown)
if args.debug:
from troup.debug import run_debug_cli
run_debug_cli()
node.start()
return node
|
nilq/baby-python
|
python
|
f = open('3_input.txt').read().splitlines()
def life_support_rating(o2, co2):
return o2 * co2
def filter(list, i=0, co2=False): # where `i` is the bit position
if len(list) == 1:
return list[0]
count = 0
for item in list:
count += int(item[i])
dom_num = 1 if count >= len(list) / 2 else 0 # prefers 1 in case of ties by default
pref_num = dom_num if co2 == False else abs(dom_num - 1) # pref_num is opposite if co2 reading sought
list = [x for x in list if int(x[i]) == pref_num]
return filter(list, i + 1, co2)
o2_rating = filter(f) # oxygen generator rating
co2_rating = filter(f, co2 = True) # co2 scrubber rating
answer = life_support_rating(int(o2_rating, 2), int(co2_rating, 2))
|
nilq/baby-python
|
python
|
import pytest
import numpy
from thinc.layers import Embed
from ...layers.uniqued import uniqued
from numpy.testing import assert_allclose
from hypothesis import given
from hypothesis.strategies import integers, lists, composite
ROWS = 10
# This test uses a newer hypothesis feature than the skanky flatmap-style
# I used previously. This is much nicer, although it still takes some getting
# used to. The key feature is this composite decorator. It injects a function,
# 'draw'.
@composite
def lists_of_integers(draw, columns=2, lo=0, hi=ROWS - 1):
# We call draw to get example values, which we can manipulate.
# Here we get a list of integers, where each member of the list
# should be between a min and max value.
int_list = draw(lists(integers(min_value=lo, max_value=hi)))
# Now we can use this int list to make an array, and it'll be the arrays
# that our functions receive.
# We trim the list, so we're of length divisible by columns.
int_list = int_list[len(int_list) % columns :]
# And make the array and reshape it.
array = numpy.array(int_list, dtype="uint64")
return array.reshape((-1, columns))
@pytest.fixture
def model(nO=128):
return Embed(nO, ROWS, column=0).initialize()
def test_uniqued_calls_init():
calls = []
embed = Embed(5, 5, column=0)
embed.init = lambda *args, **kwargs: calls.append(True)
embed.initialize()
assert calls == [True]
uembed = uniqued(embed)
uembed.initialize()
assert calls == [True, True]
@given(X=lists_of_integers(lo=0, hi=ROWS - 1))
def test_uniqued_doesnt_change_result(model, X):
umodel = uniqued(model, column=model.attrs["column"]).initialize()
Y, bp_Y = model(X, is_train=True)
Yu, bp_Yu = umodel(X, is_train=True)
assert_allclose(Y, Yu)
dX = bp_Y(Y)
dXu = bp_Yu(Yu)
assert_allclose(dX, dXu)
if X.size:
pass
# TODO: This test is a problem, because we exceed the embedding table.
# Fix it with a better cap.
# Check that different inputs do give different results
# Z, bp_Z = model(X + 1, is_train=True)
# with pytest.raises(AssertionError):
# assert_allclose(Y, Z)
|
nilq/baby-python
|
python
|
from datasets.SOT.dataset import SingleObjectTrackingDatasetSequence_MemoryMapped
from ._common import _check_bounding_box_validity
class SOTSequenceSequentialSampler:
def __init__(self, sequence: SingleObjectTrackingDatasetSequence_MemoryMapped):
assert len(sequence) > 0
self.sequence = sequence
self.index = 0
def get_name(self):
return self.sequence.get_name()
def move_next(self):
if self.index + 1 >= len(self.sequence):
return False
self.index += 1
return True
def current(self):
frame = self.sequence[self.index]
assert any(v > 0 for v in frame.get_image_size())
image_path = frame.get_image_path()
bounding_box = frame.get_bounding_box()
bounding_box_validity_flag = frame.get_bounding_box_validity_flag()
bounding_box = _check_bounding_box_validity(bounding_box, bounding_box_validity_flag, frame.get_image_size())
return image_path, bounding_box
def reset(self):
self.index = 0
def length(self):
return len(self.sequence)
|
nilq/baby-python
|
python
|
from django.db import models
from django.urls import reverse
class ImportantDate(models.Model):
date = models.DateField()
desc = models.CharField(max_length=100)
def __str__(self):
return "{} - {}".format(self.date, self.desc)
def get_absolute_url(self):
return reverse('formschapter:impdate_detail', args=[str(self.pk)])
class Meta:
ordering = ('-date',)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from purchasing.models import PurchasedOrder
class PurchasedOrderAdmin(admin.ModelAdmin):
readonly_fields = ['expiration_date']
admin.site.register(PurchasedOrder)
|
nilq/baby-python
|
python
|
from typing import Dict, Optional
from sqlalchemy import column, literal_column, select
from panoramic.cli.husky.core.sql_alchemy_util import (
quote_identifier,
safe_identifier,
sort_columns,
)
from panoramic.cli.husky.service.blending.blending_taxon_manager import (
BlendingTaxonManager,
)
from panoramic.cli.husky.service.blending.dataframe_joins import blend_dataframes
from panoramic.cli.husky.service.blending.dimension_phase_builder import (
DimensionPhaseBuilder,
)
from panoramic.cli.husky.service.blending.features.override_mapping.manager import (
OverrideMappingManager,
)
from panoramic.cli.husky.service.blending.metric_phase_builder import MetricPhaseBuilder
from panoramic.cli.husky.service.blending.tel_planner import TelPlanner
from panoramic.cli.husky.service.context import HuskyQueryContext
from panoramic.cli.husky.service.filter_builder.enums import (
FilterClauseType,
SimpleFilterOperator,
)
from panoramic.cli.husky.service.filter_builder.filter_clauses import (
TaxonValueFilterClause,
)
from panoramic.cli.husky.service.query_builder import QueryBuilder
from panoramic.cli.husky.service.select_builder.exceptions import (
UnsupportedAggregationType,
)
from panoramic.cli.husky.service.types.api_data_request_types import (
ApiDataRequest,
BlendingDataRequest,
ComparisonConfig,
InternalDataRequest,
)
from panoramic.cli.husky.service.types.api_scope_types import ComparisonScopeType
from panoramic.cli.husky.service.types.types import (
BlendingQueryInfo,
Dataframe,
DataframeColumn,
QueryInfo,
)
from panoramic.cli.husky.service.utils.taxon_slug_expression import TaxonExpressionStr
class ComparisonRequestBuilder:
"""
Helper class for building Husky comparison subrequests.
"""
@classmethod
def _build_comparison_subrequest(
cls, original_subrequest: ApiDataRequest, comparison: ComparisonConfig, taxon_manager: BlendingTaxonManager
) -> InternalDataRequest:
subrequest: InternalDataRequest = original_subrequest.to_internal_model()
# Reset all filters. Getting comparison can only be filtered by project filters or company id.
subrequest.preaggregation_filters = None
# Reset limit and order by. Does not make sense for comparison.
subrequest.limit = None
subrequest.order_by = []
# Get taxon slugs we need for comparison subrequest.
subrequest.taxons = sorted(list(taxon_manager.get_comparison_subrequest_raw_taxons(subrequest, comparison)))
if comparison.scope == ComparisonScopeType.company:
# If company scope, we add a filter on the company id and remove project filters and accounts
# Eventually, we could fetch list of all accounts under a company and filter on that, since that will
# probably be faster.
subrequest.scope.preaggregation_filters = TaxonValueFilterClause(
{
'type': FilterClauseType.TAXON_VALUE.value,
'taxon': 'company_id',
'operator': SimpleFilterOperator.EQ.value,
'value': subrequest.scope.company_id,
}
)
return subrequest
@classmethod
def _build_comparison_blend_query(
cls,
ctx: HuskyQueryContext,
config_arg: BlendingDataRequest,
taxon_manager: BlendingTaxonManager,
query_info: BlendingQueryInfo,
) -> Optional[Dataframe]:
"""
Builds comparison query for each subrequest and then blends them all into one comparison dataframe.
"""
dataframes = []
config = BlendingDataRequest(config_arg.to_native()) # Clone, coz we will be modifying subqueries
assert config.comparison, 'Comparison must be defined when trying to build comparison query..'
comparison: ComparisonConfig = config.comparison
for _subrequest in config.data_subrequests:
subrequest = cls._build_comparison_subrequest(_subrequest, comparison, taxon_manager)
data_source = subrequest.properties.data_source
# if no comparison taxons were found for this subrequest, skip creating comparison query for it as well
if len(subrequest.taxons) == 0:
continue
bm_sub_query_info = QueryInfo.create(subrequest)
query_info.comparison_subrequests_info.append(bm_sub_query_info)
# Build comparison dataframe and add it to a list.
# TODO pass down TelPlan for comparisons
# ComparisonRequestBuilder might have added filters (typically for company id project id)
# Me create new filter templates for this comparison subrequest.
filter_templates = TelPlanner.get_preaggregation_filter_templates(
ctx,
[subrequest.preaggregation_filters, subrequest.scope.preaggregation_filters],
taxon_manager.taxon_map,
data_source,
)
dataframes.append(
QueryBuilder.build_query(
ctx,
subrequest,
bm_sub_query_info,
taxon_manager.used_taxons,
dimension_templates=taxon_manager.plan.comparison_data_source_formula_templates[data_source],
filter_templates=filter_templates,
)
)
# if no comparison subrequests were created, there is no need to blend data frames
if len(dataframes) == 0:
return None
# Blend all comparison dataframes into one
# TODO pass down TelPlan for comparisons
data_source_formula_templates = taxon_manager.plan.comparison_data_source_formula_templates
dataframe = blend_dataframes(ctx, dataframes, data_source_formula_templates)
# Prefix all comparison metric columns with 'comparison@' and create comparison taxon for it.
query = dataframe.query
final_columns = []
aliased_taxon_by_slug: Dict[TaxonExpressionStr, DataframeColumn] = dict()
for slug, df_column in dataframe.slug_to_column.items():
# Alias metrics with comparison@ prefix, and select dimensions..
if df_column.taxon.is_dimension:
new_taxon = df_column.taxon.copy(deep=True)
new_slug = TaxonExpressionStr(f'{slug}')
else:
new_slug, new_taxon = BlendingTaxonManager.create_comparison_taxon(df_column.taxon)
final_columns.append(query.c[safe_identifier(slug)].label(new_taxon.slug_safe_sql_identifier))
aliased_taxon_by_slug[new_slug] = DataframeColumn(new_slug, new_taxon, df_column.quantity_type)
for pre_formulas in data_source_formula_templates.values():
# and also select the dim columns from dim templates.
for pre_formula in pre_formulas:
final_columns.append(literal_column(quote_identifier(pre_formula.label, ctx.dialect)))
renamed_cols_query = select(sort_columns(final_columns)).select_from(dataframe.query)
return Dataframe(renamed_cols_query, aliased_taxon_by_slug, dataframe.used_model_names)
@classmethod
def build_comparison_query(
cls,
ctx: HuskyQueryContext,
config_arg: BlendingDataRequest,
taxon_manager: BlendingTaxonManager,
override_mapping_manager: OverrideMappingManager,
query_info: BlendingQueryInfo,
) -> Optional[Dataframe]:
comp_df = cls._build_comparison_blend_query(ctx, config_arg, taxon_manager, query_info)
if comp_df is None or len(taxon_manager.plan.comparison_dimension_formulas) == 0:
# There are no comparison dim formulas, means the rows are already grouped correctly
return comp_df
comp_df = DimensionPhaseBuilder.calculate_dataframe(
taxon_manager.plan.comparison_dimension_formulas,
override_mapping_manager.comparison_override_mapping_tel_data,
override_mapping_manager.cte_map,
comp_df,
)
# After dimension join, there could have been a merge (coalesce). We need to group them by the merged column
# once more, to keep single row per dimension.. otherwise we will get row fanout when left joining with
# data dataframe
group_by_cols = []
selectors = []
for dim_formula in taxon_manager.plan.comparison_dimension_formulas:
group_by_cols.append(column(dim_formula.label))
for df_column in comp_df.slug_to_column.values():
taxon = df_column.taxon
col = column(df_column.name)
if taxon.is_dimension:
group_by_cols.append(col)
else:
agg_type = taxon.tel_metadata_aggregation_type
agg_fn = None
if agg_type:
agg_fn = MetricPhaseBuilder.AGGREGATION_FUNCTIONS_MAP.get(agg_type)
if agg_fn is None:
raise UnsupportedAggregationType(taxon)
col = agg_fn(col).label(df_column.name)
selectors.append(col)
selectors.extend(group_by_cols)
query = select(sort_columns(selectors)).select_from(comp_df.query).group_by(*group_by_cols)
return Dataframe(query, comp_df.slug_to_column, comp_df.used_model_names)
|
nilq/baby-python
|
python
|
import theano
import theano.tensor as T
import treeano
from treeano.sandbox.nodes import bttf_mean
fX = theano.config.floatX
@treeano.register_node("bachelor_normalization")
class BachelorNormalizationNode(treeano.NodeImpl):
hyperparameter_names = ("bttf_alpha",
"alpha",
"epsilon",
"normalization_axes",
"update_averages",
"deterministic")
def compute_output(self, network, in_vw):
alpha = network.find_hyperparameter(["bttf_alpha", "alpha"], 0.95)
epsilon = network.find_hyperparameter(["epsilon"], 1e-4)
normalization_axes = network.find_hyperparameter(["normalization_axes"],
(1,))
# HACK: using "deterministic" to mean test time
deterministic = network.find_hyperparameter(["deterministic"], False)
update_averages = network.find_hyperparameter(["update_averages"],
not deterministic)
alpha = treeano.utils.as_fX(alpha)
if update_averages:
backprop_to_the_future_mean = bttf_mean.backprop_to_the_future_mean_with_updates
else:
backprop_to_the_future_mean = bttf_mean.backprop_to_the_future_mean_no_updates
state_shape = tuple([in_vw.shape[axis] for axis in normalization_axes])
state_pattern = ["x"] * in_vw.ndim
for idx, axis in enumerate(normalization_axes):
state_pattern[axis] = idx
def make_state(name, tags, default_inits=None):
if default_inits is None:
default_inits = []
return network.create_vw(
name=name,
is_shared=True,
shape=state_shape,
tags=tags,
default_inits=default_inits,
).variable
gamma = make_state("gamma", {"parameter", "weight"})
beta = make_state("beta", {"parameter", "bias"})
# mean of input
mean = make_state("mean", {"state"})
# gradient of mean of input
mean_grad = make_state("mean_grad", {"state"})
# mean of input^2
squared_mean = make_state("squared_mean", {"state"},
# initializing to 1, so that std = 1
default_inits=[treeano.inits.ConstantInit(1.)])
# gradient of mean of input^2
squared_mean_grad = make_state("squared_mean_grad", {"state"})
in_var = in_vw.variable
mean_axes = tuple([axis for axis in range(in_var.ndim)
if axis not in normalization_axes])
batch_mean = in_var.mean(axis=mean_axes)
squared_batch_mean = T.sqr(in_var).mean(axis=mean_axes)
# expectation of input (x)
E_x = backprop_to_the_future_mean(batch_mean,
mean,
mean_grad,
alpha)
# TODO try mixing batch mean with E_x
# expectation of input squared
E_x_squared = backprop_to_the_future_mean(squared_batch_mean,
squared_mean,
squared_mean_grad,
alpha)
# HACK mixing batch and rolling means
# E_x = 0.5 * E_x + 0.5 * batch_mean
# E_x_squared = 0.5 * E_x_squared + 0.5 * squared_batch_mean
if 1:
mu = E_x
sigma = T.sqrt(E_x_squared - T.sqr(E_x) + epsilon)
mu = mu.dimshuffle(state_pattern)
sigma = sigma.dimshuffle(state_pattern)
gamma = gamma.dimshuffle(state_pattern)
beta = beta.dimshuffle(state_pattern)
else:
# HACK mixing current value
E_x = E_x.dimshuffle(state_pattern)
E_x_squared = E_x_squared.dimshuffle(state_pattern)
gamma = gamma.dimshuffle(state_pattern)
beta = beta.dimshuffle(state_pattern)
E_x = 0.1 * in_var + 0.9 * E_x
E_x_squared = 0.1 * T.sqr(in_var) + 0.9 * E_x_squared
mu = E_x
sigma = T.sqrt(E_x_squared - T.sqr(E_x) + epsilon)
if 0:
# HACK don't backprop through sigma
sigma = T.consider_constant(sigma)
if 1:
# HACK using batch mean
mu = batch_mean
mu = mu.dimshuffle(state_pattern)
if 0:
# HACK using batch variance
sigma = T.sqrt(in_var.var(axis=mean_axes) + epsilon)
sigma = sigma.dimshuffle(state_pattern)
out_var = (in_var - mu) * (T.exp(gamma) / sigma) + beta
network.create_vw(
name="default",
variable=out_var,
shape=in_vw.shape,
tags={"output"},
)
if 1:
# HACK monitoring state
network.create_vw(
name="mu_mean",
variable=mu.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="sigma_mean",
variable=sigma.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="gamma_mean",
variable=gamma.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="beta_mean",
variable=beta.mean(),
shape=(),
tags={"monitor"},
)
@treeano.register_node("bachelor_normalization2")
class BachelorNormalization2Node(treeano.NodeImpl):
hyperparameter_names = ("bttf_alpha",
"alpha",
"epsilon",
"normalization_axes",
"update_averages",
"deterministic")
def compute_output(self, network, in_vw):
alpha = network.find_hyperparameter(["bttf_alpha", "alpha"], 0.95)
epsilon = network.find_hyperparameter(["epsilon"], 1e-4)
normalization_axes = network.find_hyperparameter(["normalization_axes"],
(1,))
# HACK: using "deterministic" to mean test time
deterministic = network.find_hyperparameter(["deterministic"], False)
update_averages = network.find_hyperparameter(["update_averages"],
not deterministic)
alpha = treeano.utils.as_fX(alpha)
if update_averages:
backprop_to_the_future_mean = bttf_mean.backprop_to_the_future_mean_with_updates
else:
backprop_to_the_future_mean = bttf_mean.backprop_to_the_future_mean_no_updates
state_shape = tuple([in_vw.shape[axis] for axis in normalization_axes])
state_pattern = ["x"] * in_vw.ndim
for idx, axis in enumerate(normalization_axes):
state_pattern[axis] = idx
def make_state(name, tags, default_inits=None):
if default_inits is None:
default_inits = []
return network.create_vw(
name=name,
is_shared=True,
shape=state_shape,
tags=tags,
default_inits=default_inits,
).variable
gamma = make_state("gamma", {"parameter", "weight"})
beta = make_state("beta", {"parameter", "bias"})
# mean of input
mean = make_state("mean", {"state"})
# gradient of mean of input
mean_grad = make_state("mean_grad", {"state"})
var_state_mean = make_state("var_state_mean", {"state"},
# initializing to 1, so that std = 1
default_inits=[treeano.inits.ConstantInit(1.)])
var_state_mean_grad = make_state("var_state_mean_grad", {"state"})
in_var = in_vw.variable
mean_axes = tuple([axis for axis in range(in_var.ndim)
if axis not in normalization_axes])
batch_mean = in_var.mean(axis=mean_axes)
# expectation of input (x)
E_x = backprop_to_the_future_mean(batch_mean,
mean,
mean_grad,
alpha)
# TODO try mixing batch mean with E_x
if 1:
batch_var_state = 1. / T.sqrt(in_var.var(axis=mean_axes) + epsilon)
var_state = backprop_to_the_future_mean(batch_var_state,
var_state_mean,
var_state_mean_grad,
alpha)
inv_std = var_state
# HACK mixing batch and rolling means
# E_x = 0.5 * E_x + 0.5 * batch_mean
# E_x_squared = 0.5 * E_x_squared + 0.5 * squared_batch_mean
mu = E_x
mu = mu.dimshuffle(state_pattern)
inv_std = inv_std.dimshuffle(state_pattern)
gamma = gamma.dimshuffle(state_pattern)
beta = beta.dimshuffle(state_pattern)
out_var = (in_var - mu) * (T.exp(gamma) * inv_std) + beta
network.create_vw(
name="default",
variable=out_var,
shape=in_vw.shape,
tags={"output"},
)
if 1:
# HACK monitoring state
network.create_vw(
name="mu_mean",
variable=mu.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="var_state_effective_mean",
variable=var_state.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="gamma_mean",
variable=gamma.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="beta_mean",
variable=beta.mean(),
shape=(),
tags={"monitor"},
)
|
nilq/baby-python
|
python
|
print("My name is John")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2013 Simonas Kazlauskas
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation
from os import path, makedirs
from hashlib import sha1
from gi.repository import GObject, GLib
from quodlibet.util.path import escape_filename, xdg_get_cache_home
class CoverSourcePlugin(GObject.Object):
"""
Plugins that given a song should provide a cover art.
The plugin should override following methods and properties:
@staticmethod priority()
@property cover_path(self)
fetch_cover(self)
Refer to default function implementation's documentation in order to
understand their role.
"""
__gsignals__ = {
'fetch-success': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'fetch-failure': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'search-complete': (GObject.SignalFlags.RUN_LAST, None, (object,))
}
def __init__(self, song, cancellable=None):
self.song = song
self.cancellable = cancellable
super(CoverSourcePlugin, self).__init__()
@staticmethod
def priority():
"""
Should return float in range [0.0, 1.0] suggesting priority of the
cover source. Whether value returned by this method is respected or
not is not guaranteed.
As a rule of thumb, source's reliability and quality should be
compared with other sources and given score between two sources that
come close in quality and reliability.
There's a table of value ranges sources should respect:
* (0.9, 1.0] - user's preferred methods (set by configuration; example:
preferring embed cover art);
* (0.7, 0.9] - local covers;
* (0.4, 0.7] - accurate (> 99%) source of high quality (>= 200x200)
covers;
* (0.2, 0.4] - accurate (> 99%) source of low quality (< 200x200)
covers;
* (0.0, 0.2] - not very accurate (<= 99%) source of covers, even if
they're high quality;
* 0.0 - reserved for the fallback cover source.
"""
return 0.0
@property
def cover_directory(self):
return cover_dir
@property
def cover_filename(self):
"""
Return the filename of the cover which hopefully should not change
between songs in the same album and still be unique enough to
uniquely identify most (or even better – all) of the albums.
The string returned must not contain any characters illegal in
most common filesystems. These include /, ?, <, >, \, :, *, |, ” and ^.
Staying in the bounds of ASCII is highly encouraged.
Perchance the song lacks data to generate the filename of cover for
this provider, None shall be returned.
"""
key = sha1()
# Should be fine as long as the same interpreter is used.
key.update(repr(self.song.album_key))
return escape_filename(key.hexdigest())
@property
def cover_path(self):
"""
Should return the path where cover is expected to be cached. The
location should be based in common cache location available in variable
`cover_dir` of this module.
It doesn't necessarily mean the cover is actually at the returned
location neither that it will be stored there at any later time.
"""
return path.join(self.cover_directory, self.cover_filename)
@property
def cover(self):
"""
Method to get cover file from cover provider for a specific song.
Should always return a file-like object opened as read-only if any
and None otherwise.
"""
cp = self.cover_path
try:
return open(cp, 'rb') if cp and path.isfile(cp) else None
except IOError:
print_w('Failed reading album art "%s"'.format(path))
def search(self):
"""
Start searching for cover art from a source.
After search is completed the `search-complete` event must be emitted
regardless of search outcome with a list of dictionaries containing
`album`, `artist` and `cover` keys as an argument. If search was
unsuccessful, empty list should be returned.
By convention better quality and more accurate covers are expected to
appear first in the list.
"""
self.emit('search-complete', [])
def fetch_cover(self):
"""
Method to ask source fetch the cover from its source into location at
`self.cover_path`.
If this method succeeds in putting the image from its source into
`self.cover_path`, `fetch-success` signal shall be emitted and
`fetch-failure` otherwise.
Return value of this function doesn't have any meaning whatsoever.
"""
self.fail('This source is incapable of fetching covers')
def fail(self, message):
"""
Shorthand method for emitting `fetch-failure` signals.
Most common use pattern would be:
return self.fail("Failure message")
"""
self.emit('fetch-failure', message)
cover_dir = path.join(xdg_get_cache_home(), 'quodlibet', 'covers')
try:
makedirs(cover_dir)
except OSError:
pass
|
nilq/baby-python
|
python
|
# Edit by Tianyu Ma
# coding: utf-8
"""
=====
Third step: merge csv files
=====
"""
|
nilq/baby-python
|
python
|
import json
import string
import csv
fname = './data/obama_speech.txt'
fhand = open(fname, 'r')
text = fhand.read()
lines = text.split('\n')
line_count = len(lines)
word_count = 0
for line in lines:
words = line.split()
for word in words:
if word == " ":
continue
word_count += 1
print(f"File name: {fname}")
print(f"Line Count: {line_count}")
print(f"Word Count: {word_count}")
fname = './data/michelle_obama_speech.txt'
fhand = open(fname, 'r')
text = fhand.read()
lines = text.split('\n')
line_count = len(lines)
word_count = 0
for line in lines:
words = line.split()
for word in words:
if word == " ":
continue
word_count += 1
print(f"File name: {fname}")
print(f"Line Count: {line_count}")
print(f"Word Count: {word_count}")
fname = './data/donald_speech.txt'
fhand = open(fname, 'r')
text = fhand.read()
lines = text.split('\n')
line_count = len(lines)
word_count = 0
for line in lines:
words = line.split()
for word in words:
if word == " ":
continue
word_count += 1
print(f"File name: {fname}")
print(f"Line Count: {line_count}")
print(f"Word Count: {word_count}")
fname = './data/melina_trump_speech.txt'
fhand = open(fname, 'r')
text = fhand.read()
lines = text.split('\n')
line_count = len(lines)
word_count = 0
for line in lines:
words = line.split()
for word in words:
if word == " ":
continue
word_count += 1
print(f"File name: {fname}")
print(f"Line Count: {line_count}")
print(f"Word Count: {word_count}")
def most_spoken_languages(fname, n):
fhand = open(fname, 'r')
data = fhand.read()
countries = json.loads(data)
count_dic = {}
output = []
for country in countries:
languages = country["languages"]
for language in languages:
if language not in count_dic:
count_dic[language] = 1
else:
count_dic[language] += 1
for k, v in count_dic.items():
tup = (v, k)
output.append(tup)
output.sort(key=lambda x: x[0], reverse=True)
required_output = []
count = 0
for item in output:
if count == n:
break
required_output.append(item)
count += 1
return required_output
print(most_spoken_languages('./data/countries_data.json', 10))
print(most_spoken_languages('./data/countries_data.json', 3))
def most_populated_countries(fname, n):
fhand = open(fname, 'r')
data = fhand.read()
countries = json.loads(data)
output = []
for country in countries:
new_dic = {}
new_dic['country'] = country['name']
new_dic['population'] = country['population']
output.append(new_dic)
output.sort(key=lambda x: x['population'], reverse=True)
required_output = []
count = 0
for item in output:
if count == n:
break
required_output.append(item)
count += 1
return required_output
print(most_populated_countries('./data/countries_data.json', 10))
print(most_populated_countries('./data/countries_data.json', 3))
fname = './data/email_exchanges_big.txt'
fhand = open(fname, 'r')
data = fhand.read()
lst = data.split('\n')
count = 0
for line in lst:
if line.startswith('From'):
count += 1
print(f"There are {count} incoming email addresses")
def find_most_common_words(fname, n):
fhand = open(fname, 'r')
data = fhand.read()
lines = data.split('\n')
word_dic = {}
output = []
for line in lines:
words = line.split()
for word in words:
if word == ' ':
continue
if word in word_dic:
word_dic[word] += 1
else:
word_dic[word] = 1
for k, v in word_dic.items():
tup = (v, k)
output.append(tup)
output.sort(key=lambda x: x[0], reverse=True)
required_output = []
count = 0
for item in output:
if count == n:
break
required_output.append(item)
count += 1
return required_output
print(
f"10 most frequent words in obama_speech.txt are: \n{find_most_common_words('./data/obama_speech.txt', 10)} ")
print(
f"10 most frequent words in michelle_obama_speech.txt are: \n{find_most_common_words('./data/michelle_obama_speech.txt', 10)} ")
print(
f"10 most frequent words in donald_speech.txt are: \n{find_most_common_words('./data/donald_speech.txt', 10)} ")
print(
f"10 most frequent words in melina_trump_speech.txt are: \n{find_most_common_words('./data/melina_trump_speech.txt', 10)} ")
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up',
'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"]
def clean_text(fname):
fhand = open(fname, 'r')
data = fhand.read()
lines = data.split('\n')
word_lst = []
for line in lines:
words = line.split()
for word in words:
if word in stop_words or word in string.punctuation:
continue
else:
word_lst.append(word)
return word_lst
def check_text_similarity(lst1, lst2):
output = []
for word in lst1:
if word in lst2:
output.append(word)
print(f"Total number of similar words are {len(output)}")
print(f"Similar words are: \n{output}")
michelle_lst = clean_text('./data/michelle_obama_speech.txt')
melina_lst = clean_text('./data/melina_trump_speech.txt')
check_text_similarity(michelle_lst, melina_lst)
print(
f"10 most frequent words in romeo_and_juliet.txt are: \n{find_most_common_words('./data/romeo_and_juliet.txt', 10)} ")
fname = './data/hacker_news.csv'
fhand = open(fname, 'r')
lines = csv.reader(fhand, delimiter=',')
python_count = 0
js_count = 0
java_count = 0
for line in lines:
for item in line:
words = item.split()
if 'python' in words or 'Python' in words:
python_count += 1
if 'javascript' in words or 'Javascript' in words or 'JavaScript' in words:
js_count += 1
if 'java' in words or 'Java' in words:
java_count += 1
print(f"Number of Lines having python are {python_count}")
print(f"Number of Lines having javascript are {js_count}")
print(f"Number of Lines having java are {java_count}")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-23 21:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Words',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('color', models.CharField(blank=True, max_length=10)),
('words', models.TextField()),
('countwords', models.IntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
]
|
nilq/baby-python
|
python
|
# for循环是一种遍历列表的有效方式, 但在for循环中不应修改列表, 否则将导致Python难以跟踪其中的元素.
# 要在遍历列表的同时对其进行修改, 可使用while循环.
# 在列表之间移动元素
unconfirmed_users = ['alice', 'brian', 'candace'] # 待验证用户列表
confirmed_users = [] # 已验证用户列表
# 遍历列表对用户进行验证
while unconfirmed_users: # 当列表不为空时返回True, 当列表为空时, 返回False
current_user = unconfirmed_users.pop() # 取出需要验证的用户
print('验证用户: ' + current_user.title())
confirmed_users.append(current_user) # 将已验证的移到已验证用户列表
print('\n以下用户已经经过验证: ')
for user in confirmed_users:
print('\t' + user)
print('\n未完成验证的用户为: ')
print(unconfirmed_users)
# 为什么不能用for代替while
print('\n')
print('采用for循环的方式来实现上述操作: ')
unconfirmed_users = ['alice', 'brian', 'candace'] # 待验证用户列表
confirmed_users = [] # 已验证用户列表
for unconfirmed_user in unconfirmed_users:
# current_user = unconfirmed_users.pop() # for循环中, 不能采用pop来删除元素, 会出现遍历问题
print('验证用户: ' + unconfirmed_user.title())
confirmed_users.append(unconfirmed_user) # 将已验证的移到已验证用户列表
# unconfirmed_users.remove(unconfirmed_user) # for循环中, 不能采用remove来删除元素, 会出现遍历问题
print('\n以下用户已经经过验证: ')
for user in confirmed_users:
print('\t' + user)
print('\n未完成验证的用户为: ')
print(unconfirmed_users)
# 删除包含特定值的所有列表元素
# 通过while循环不断判断列表中是否存在特定元素, 存在就将它删除
print('\n')
pets = ['dog', 'cat', 'dog', 'goldfish', 'cat', 'rabbit', 'cat']
print('初始化的数据为: ')
print(pets)
cat_name = 'cat'
while cat_name in pets:
pets.remove(cat_name)
print('删除名称为' + cat_name + '的宠物后, 宠物列表为: ')
print(pets)
# 使用用户输入来填充字典
responses = {}
polling_active = True
while polling_active:
name = input("\nWhat is your name? ") # 获取用户输入的姓名
response = input("Which mountain would you like to climb someday? ") # 获取用户想要爬的山
responses[name] = response # 将用户的数据存入字典
repeat = input('Would you like to let another person respond? (yes/ no) ')
if repeat == 'no': # 判断调查是否结束
polling_active = False
print('\n---Poll Result---') # 打印调查结果
for name, response in responses.items():
print(name + ' would like to climb ' + response + '.')
|
nilq/baby-python
|
python
|
from datetime import datetime
from lib import d,t
def main():
r = t.t()
r = (r+(' '+(d.d())))
print(f'{r} :)')
return(0x01)
if(__name__==('__main__')): main()
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Script to help in managing Usenet hierarchies. It generates control
# articles and handles PGP keys (generation and management).
#
# signcontrol.py -- v. 1.4.0 -- 2014/10/26
#
# Written and maintained by Julien ÉLIE.
#
# This script is distributed under the MIT License. Please see the LICENSE
# section below for more information.
#
# Feel free to use it. I would be glad to know whether you find it useful for
# your hierarchy. Any bug reports, bug fixes, and improvements are very much
# welcome.
#
# Contact:
# <http://www.trigofacile.com/maths/contact/index.htm>
# Issue tracker:
# <https://github.com/Julien-Elie/usenet-signcontrol/issues>
#
# Upstream web site:
# <http://www.trigofacile.com/divers/usenet/clefs/signcontrol.htm>
# Github repository:
# <https://github.com/Julien-Elie/usenet-signcontrol>
# Please also read:
# <http://www.eyrie.org/~eagle/faqs/usenet-hier.html>
#
# History:
#
# v. 1.4.0: 2014/10/26 -- add the --no-tty flag to gpg when --passphrase is
# also used. Otherwise, an error occurs when running signcontrol
# from cron. Thanks to Matija Nalis for the bug report.
# - Add the PGP2_COMPATIBILITY parameter to generate control
# articles compatible with MIT PGP 2.6.2 (or equivalent).
# - When managing PGP keys, their full uid is now expected, instead
# of only a subpart.
# - Listing secret keys now also shows their fingerprint.
# - Improve documentation, along with the creation of a Git
# repository on Github.
#
# v. 1.3.3: 2011/07/11 -- automatically generate an Injection-Date: header
# field, and sign it. It will prevent control articles from being
# maliciously reinjected into Usenet, and replayed by news servers
# compliant with RFC 5537 (that is to say without cutoff on the
# Date: header field when an Injection-Date: header field exists).
#
# v. 1.3.2: 2009/12/23 -- use local time instead of UTC (thanks to Adam
# H. Kerman for the suggestion).
# - Add flags to gpg when called: --emit-version, --no-comments,
# --no-escape-from-lines and --no-throw-keyids. Otherwise, the
# signature may not be valid (thanks to Robert Spier for the
# bug report).
#
# v. 1.3.1: 2009/12/20 -- compliance with RFC 5322 (Internet Message Format):
# use "-0000" instead of "+0000" to indicate a time zone at Universal
# Time ("-0000" means that the time is generated on a system that
# may be in a local time zone other than Universal Time); also remove
# the Sender: header field.
# - When a line in the body of a control article started with
# "Sender", a bug in signcontrol prevented the article from being
# properly signed.
#
# v. 1.3.0: 2009/07/28 -- remove the charset for a multipart/mixed block
# in newgroup articles, change the default serial number from 0 to 1
# in checkgroups articles, allow the user to interactively modify
# his message (thanks to Matija Nalis for the idea).
#
# v. 1.2.1: 2008/12/07 -- ask for confirmation when "(Moderated)" is misplaced
# in a newsgroup description.
#
# v. 1.2.0: 2008/11/17 -- support for USEPRO: checkgroups scope, checkgroups
# serial numbers and accurate Content-Type: header fields.
#
# v. 1.1.0: 2007/05/09 -- fix the newgroups line when creating a newsgroup,
# use a separate config file, possibility to import signcontrol from
# other scripts and use its functions.
#
# v. 1.0.0: 2007/05/01 -- initial release.
# THERE IS NOTHING USEFUL TO PARAMETER IN THIS FILE.
# The file "signcontrol.conf" contains all your parameters
# and it will be parsed.
CONFIGURATION_FILE = 'signcontrol.conf'
import os
import re
import sys, traceback
import time
import shlex
# Current time.
TIME = time.localtime()
def treat_exceptions(type, value, stacktrace):
""" Pretty print stack traces of this script, in case an error occurs.
Arguments: type (the type of the exception)
value (the value of the exception)
stacktrace (the traceback of the exception)
No return value (the script exits with status 2)
"""
print "-----------------------------------------------------------"
print "\n".join(traceback.format_exception(type, value, stacktrace))
print "-----------------------------------------------------------"
raw_input('An error has just occurred.')
sys.exit(2)
sys.excepthook = treat_exceptions
def print_error(error):
""" Pretty print error messages.
Argument: error (the error to print)
No return value
"""
print
print '--> ' + error + ' <--'
print
def pretty_time(localtime):
""" Return the Date: header field.
Argument: localtime (a time value, representing local time)
Return value: a string suitable to be used in a Date: header field
"""
# As "%z" does not work on every platform with strftime(), we compute
# the time zone offset.
# You might want to use UTC with either "+0000" or "-0000", also changing
# time.localtime() to time.gmtime() for the definition of TIME above.
if localtime.tm_isdst > 0 and time.daylight:
offsetMinutes = - int(time.altzone / 60)
else:
offsetMinutes = - int(time.timezone / 60)
offset = "%+03d%02d" % (offsetMinutes / 60.0, offsetMinutes % 60)
return time.strftime('%a, %d %b %Y %H:%M:%S ' + offset, localtime)
def serial_time(localtime):
""" Return a checkgroups serial number.
Argument: localtime (a time value, representing local time)
Return value: a string suitable to be used as a serial number
"""
# Note that there is only one serial per day.
return time.strftime('%Y%m%d', localtime)
def epoch_time(localtime):
""" Return the number of seconds since epoch.
Argument: localtime (a time value, representing local time)
Return value: the number of seconds since epoch, as a string
"""
return str(int(time.mktime(localtime)))
def read_configuration(file):
""" Parse the configuration file.
Argument: file (path to the signcontrol.conf configuration file)
Return value: a dictionary {parameter: value} representing
the contents of the configuration file
"""
TOKENS = ['PROGRAM_GPG', 'PGP2_COMPATIBILITY', 'ID', 'MAIL', 'HOST',
'ADMIN_GROUP', 'NAME',
'CHECKGROUPS_SCOPE', 'URL',
'NEWGROUP_MESSAGE_MODERATED', 'NEWGROUP_MESSAGE_UNMODERATED',
'RMGROUP_MESSAGE', 'PRIVATE_HIERARCHY', 'CHECKGROUPS_FILE',
'ENCODING']
if not os.path.isfile(file):
print 'The configuration file is absent.'
raw_input('Please install it before using this script.')
sys.exit(2)
config_file = shlex.shlex(open(file, 'r'), posix=True)
config = dict()
parameter = None
while True:
token = config_file.get_token()
if not token:
break
if token[0] in '"\'':
token = token[1:-1]
if token in TOKENS:
parameter = token
elif token != '=' and parameter:
if parameter == 'PGP2_COMPATIBILITY':
if token == 'True' or token == 'true':
config[parameter] = [('--pgp2', '-pgp2'), ('', '')]
elif token == 'Only' or token == 'only':
config[parameter] = [('--pgp2', '-pgp2')]
else:
config[parameter] = [('', '')]
elif parameter == 'PRIVATE_HIERARCHY':
if token == 'True' or token == 'true':
config[parameter] = True
else:
config[parameter] = False
else:
config[parameter] = token
parameter = None
for token in TOKENS:
if not config.has_key(token):
print 'You must update the configuration file.'
print 'The parameter ' + token + ' is missing.'
raw_input('Please download the latest version of the configuration file and parameter it before using this script.')
sys.exit(2)
return config
def read_checkgroups(path):
""" Parse a checkgroups file.
Argument: path (path of the checkgroups file)
Return value: a dictionary {newsgroup: description} representing
the contents of the checkgroups
"""
# Usually for the first use of the script.
if not os.path.isfile(path):
print 'No checkgroups file found.'
print 'Creating an empty checkgroups file...'
write_checkgroups(dict(), path)
groups = dict()
for line in file(path):
line2 = line.strip()
while line2.find('\t\t') != -1:
line2 = line2.replace('\t\t', '\t')
try:
group, description = line2.split('\t')
groups[group] = description
except:
print_error('The current checkgroups is badly formed.')
print 'The offending line is:'
print line
print
raw_input('Please correct it before using this script.')
sys.exit(2)
return groups
def write_checkgroups(groups, path):
""" Write the current checkgroups file.
Arguments: groups (a dictionary representing a checkgroups)
path (path of the checkgroups file)
No return value
"""
keys = groups.keys()
keys.sort()
checkgroups_file = file(path, 'wb')
for key in keys:
if len(key) < 8:
checkgroups_file.write(key + '\t\t\t' + groups[key] + '\n')
elif len(key) < 16:
checkgroups_file.write(key + '\t\t' + groups[key] + '\n')
else:
checkgroups_file.write(key + '\t' + groups[key] + '\n')
checkgroups_file.close()
print 'Checkgroups file written.'
print
def choice_menu():
""" Print the initial menu, and waits for the user to make a choice.
Return value: the number representing the user's choice
"""
while True:
print
print 'What do you want to do?'
print '-----------------------'
print '1. Generate a newgroup control article (create or change a newsgroup)'
print '2. Generate an rmgroup control article (remove a newsgroup)'
print '3. Generate a checkgroups control article (list of newsgroups)'
print '4. Manage my PGP keys (generate/import/export/remove/revoke)'
print '5. Quit'
print
try:
choice = int(raw_input('Your choice (1-5): '))
if int(choice) not in range(1,6):
raise ValueError()
print
return choice
except:
print_error('Please enter a number between 1 and 5.')
def manage_menu():
""" Print the menu related to the management of PGP keys, and waits
for the user to make a choice.
Return value: the number representing the user's choice
"""
while True:
print
print 'What do you want to do?'
print '-----------------------'
print '1. See the current installed keys'
print '2. Generate a new pair of secret/public keys'
print '3. Export a public key'
print '4. Export a secret key'
print '5. Import a secret key'
print '6. Remove a pair of secret/public keys'
print '7. Revoke a secret key'
print '8. Quit'
print
try:
choice = int(raw_input('Your choice (1-8): '))
if int(choice) not in range(1,9):
raise ValueError()
print
return choice
except:
print_error('Please enter a number between 1 and 8.')
def generate_signed_message(config, file_message, group, message_id, type, passphrase=None, flag=''):
""" Generate signed control articles.
Arguments: config (the dictionary of parameters from signcontrol.conf)
file_message (the file name of the message to sign)
group (the name of the newsgroup)
message_id (the Message-ID of the message)
type (the type of the control article)
passphrase (if given, the passphrase of the private key)
flag (if given, the additional flag(s) to pass to gpg)
No return value
"""
signatureWritten = False
if passphrase:
os.system(config['PROGRAM_GPG'] + ' --emit-version --no-comments --no-escape-from-lines --no-throw-keyids --armor --detach-sign --local-user "='+ config['ID'] + '" --no-tty --passphrase "' + passphrase + '" --output ' + file_message + '.pgp ' + flag + ' ' + file_message + '.txt')
else:
os.system(config['PROGRAM_GPG'] + ' --emit-version --no-comments --no-escape-from-lines --no-throw-keyids --armor --detach-sign --local-user "='+ config['ID'] + '" --output ' + file_message + '.pgp ' + flag + ' ' + file_message + '.txt')
if not os.path.isfile(file_message + '.pgp'):
print_error('Signature generation failed.')
print 'Please verify the availability of the secret key.'
return
result = file(file_message + '.sig', 'wb')
for line in file(file_message + '.txt', 'rb'):
if signatureWritten:
result.write(line)
continue
if not line.startswith('X-Signed-Headers'):
# From: is the last signed header field.
if not line.startswith('From'):
result.write(line)
else:
# Rewrite the From: line exactly as we already wrote it.
result.write('From: ' + config['NAME'] + ' <' + config['MAIL'] + '>\n')
result.write('Approved: ' + config['MAIL'] + '\n')
if type == 'checkgroups' and not config['PRIVATE_HIERARCHY']:
result.write('Newsgroups: ' + group + ',news.admin.hierarchies\n')
result.write('Followup-To: ' + group + '\n')
else:
result.write('Newsgroups: ' + group + '\n')
result.write('Path: not-for-mail\n')
result.write('X-Info: ' + config['URL'] + '\n')
result.write('\tftp://ftp.isc.org/pub/pgpcontrol/README.html\n')
result.write('MIME-Version: 1.0\n')
if type == 'newgroup':
result.write('Content-Type: multipart/mixed; boundary="signcontrol"\n')
elif type == 'checkgroups':
result.write('Content-Type: application/news-checkgroups; charset=' + config['ENCODING'] + '\n')
else: # if type == 'rmgroup':
result.write('Content-Type: text/plain; charset=' + config['ENCODING'] + '\n')
result.write('Content-Transfer-Encoding: 8bit\n')
for line2 in file(file_message + '.pgp', 'r'):
if line2.startswith('-'):
continue
if line2.startswith('Version:'):
version = line2.replace('Version: ', '')
version = version.replace(' ', '_')
result.write('X-PGP-Sig: ' + version.rstrip() + ' Subject,Control,Message-ID,Date,Injection-Date,From\n')
elif len(line2) > 2:
result.write('\t' + line2.rstrip() + '\n')
signatureWritten = True
result.close()
os.remove(file_message + '.pgp')
print
if flag:
print 'Do not worry if the program complains about detached signatures or MD5.'
print 'You can now post the file ' + file_message + '.sig using rnews'
print 'or a similar tool.'
print
#print 'Or you can also try to send it with IHAVE. If it fails, it means that the article'
#print 'has not been sent. You will then have to manually use rnews or a similar program.'
#if raw_input('Do you want to try? (y/n) ') == 'y':
# import nntplib
# news_server = nntplib.NNTP(HOST, PORT, USER, PASSWORD)
# news_server.ihave(message_id, file_message + '.sig')
# news_server.quit()
# print 'The control article has just been sent!'
def sign_message(config, file_message, group, message_id, type, passphrase=None):
""" Sign a control article.
Arguments: config (the dictionary of parameters from signcontrol.conf)
file_message (the file name of the message to sign)
group (the name of the newsgroup)
message_id (the Message-ID of the message)
type (the type of the control article)
passphrase (if given, the passphrase of the private key)
No return value
"""
articles_to_generate = len(config['PGP2_COMPATIBILITY'])
i = 1
for (flag, suffix) in config['PGP2_COMPATIBILITY']:
if articles_to_generate > 1:
print
print 'Generation of control article ' + str(i) + '/' + str(articles_to_generate)
i += 1
if suffix:
additional_file = file(file_message + suffix + '.txt', 'wb')
additional_message_id = message_id.replace('@', suffix + '@', 1)
for line in file(file_message + '.txt', 'rb'):
if line == 'Message-ID: ' + message_id + '\n':
line = 'Message-ID: ' + additional_message_id + '\n'
additional_file.write(line)
additional_file.close()
generate_signed_message(config, file_message + suffix, group, additional_message_id, type, passphrase, flag)
os.remove(file_message + suffix + '.txt')
else:
generate_signed_message(config, file_message, group, message_id, type, passphrase, flag)
def generate_newgroup(groups, config, group=None, moderated=None, description=None, message=None, passphrase=None):
""" Create a new group.
Arguments: groups (the dictionary representing the checkgroups)
config (the dictionary of parameters from signcontrol.conf)
group (if given, the name of the newsgroup)
moderated (if given, whether the newsgroup is moderated)
description (if given, the description of the newsgroup)
message (if given, the text to write in the control article)
passphrase (if given, the passphrase of the private key)
No return value
"""
while not group:
group = raw_input('Name of the newsgroup to create: ').lower()
components = group.split('.')
if len(components) < 2:
group = None
print_error('The group must have at least two components.')
elif not components[0][0:1].isalpha():
group = None
print_error('The first component must start with a letter.')
elif components[0] in ['control', 'example', 'to']:
group = None
print_error('The first component must not be "control", "example" or "to".')
elif re.search('[^a-z0-9+_.-]', group):
group = None
print_error('The group must not contain characters other than [a-z0-9+_.-].')
for component in components:
if component in ['all', 'ctl']:
group = None
print_error('Sequences "all" and "ctl" must not be used as components.')
elif not component[0:1].isalnum():
group = None
print_error('Each component must start with a letter or a digit.')
elif component.isdigit():
group = None
print_error('Each component must contain at least one non-digit character.')
if groups.has_key(group):
print
print 'The newsgroup ' + group + ' already exists.'
print 'These new settings (status and description) will override the current ones.'
print
if moderated is None:
if raw_input('Is ' + group + ' a moderated newsgroup? (y/n) ' ) == 'y':
moderated = True
print
print 'There is no need to add " (Moderated)" at the very end of the description.'
print 'It will be automatically added, if not already present.'
print
else:
moderated = False
while not description:
print
print 'The description should start with a capital and end in a period.'
description = raw_input("Description of " + group + ": ")
if len(description) > 56:
print_error('The description is too long. You should shorten it.')
if raw_input('Do you want to continue despite this recommendation? (y/n) ') != 'y':
description = None
continue
moderated_count = description.count('(Moderated)')
if moderated_count > 0:
if not moderated:
if description.endswith(' (Moderated)'):
description = None
print_error('The description must not end with " (Moderated)".')
continue
else:
print_error('The description must not contain "(Moderated)".')
if raw_input('Do you want to continue despite this recommendation? (y/n) ') != 'y':
description = None
continue
elif moderated_count > 1 or not description.endswith(' (Moderated)'):
print_error('The description must not contain "(Moderated)".')
if raw_input('Do you want to continue despite this recommendation? (y/n) ') != 'y':
description = None
continue
if not message:
print
print 'The current message which will be sent is:'
print
if moderated:
message = config['NEWGROUP_MESSAGE_MODERATED'].replace('$GROUP$', group)
else:
message = config['NEWGROUP_MESSAGE_UNMODERATED'].replace('$GROUP$', group)
print message
print
if raw_input('Do you want to change it? (y/n) ') == 'y':
print
print 'Please enter the message you want to send.'
print 'End it with a line containing only "." (a dot).'
print
message = ''
buffer = raw_input('Message: ') + '\n'
while buffer != '.\n':
message += buffer.rstrip() + '\n'
buffer = raw_input('Message: ') + '\n'
print
print
print 'Here is the information about the newsgroup:'
print 'Name: ' + group
if moderated:
print 'Status: moderated'
if not description.endswith(' (Moderated)'):
description += ' (Moderated)'
else:
print 'Status: unmoderated'
print 'Description: ' + description
print 'Message: '
print
print message
print
if raw_input('Do you want to generate a control article for ' + group + '? (y/n) ') == 'y':
print
file_newgroup = group + '-' + epoch_time(TIME)
result = file(file_newgroup + '.txt', 'wb')
result.write('X-Signed-Headers: Subject,Control,Message-ID,Date,Injection-Date,From\n')
if moderated:
result.write('Subject: cmsg newgroup ' + group + ' moderated\n')
result.write('Control: newgroup ' + group + ' moderated\n')
else:
result.write('Subject: cmsg newgroup ' + group + '\n')
result.write('Control: newgroup ' + group + '\n')
message_id = '<newgroup-' + group + '-' + epoch_time(TIME) + '@' + config['HOST'] + '>'
result.write('Message-ID: ' + message_id + '\n')
result.write('Date: ' + pretty_time(TIME) + '\n')
result.write('Injection-Date: ' + pretty_time(TIME) + '\n')
result.write('From: ' + config['NAME'] + ' <' + config['MAIL'] + '>\n\n')
result.write('This is a MIME NetNews control message.\n')
result.write('--signcontrol\n')
result.write('Content-Type: text/plain; charset=' + config['ENCODING'] + '\n\n')
result.write(message + '\n')
result.write('\n\n--signcontrol\n')
result.write('Content-Type: application/news-groupinfo; charset=' + config['ENCODING'] + '\n\n')
result.write('For your newsgroups file:\n')
if len(group) < 8:
result.write(group + '\t\t\t' + description + '\n')
elif len(group) < 16:
result.write(group + '\t\t' + description + '\n')
else:
result.write(group + '\t' + description + '\n')
result.write('\n--signcontrol--\n')
result.close()
sign_message(config, file_newgroup, group, message_id, 'newgroup', passphrase)
os.remove(file_newgroup + '.txt')
if raw_input('Do you want to update the current checkgroups file? (y/n) ') == 'y':
groups[group] = description
write_checkgroups(groups, config['CHECKGROUPS_FILE'])
def generate_rmgroup(groups, config, group=None, message=None, passphrase=None):
""" Remove a group.
Arguments: groups (the dictionary representing the checkgroups)
config (the dictionary of parameters from signcontrol.conf)
group (if given, the name of the newsgroup)
message (if given, the text to write in the control article)
passphrase (if given, the passphrase of the private key)
No return value
"""
while not group:
group = raw_input('Name of the newsgroup to remove: ' ).lower()
if not groups.has_key(group):
print
print 'The newsgroup ' + group + ' does not exist.'
print 'Yet, you can send an rmgroup message for it if you want.'
print
if raw_input('Do you want to generate a control article to *remove* ' + group + '? (y/n) ') == 'y':
print
if not message:
print 'The current message which will be sent is:'
print
message = config['RMGROUP_MESSAGE'].replace('$GROUP$', group)
print message
print
if raw_input('Do you want to change it? (y/n) ') == 'y':
print
print 'Please enter the message you want to send.'
print 'End it with a line containing only "." (a dot).'
print
message = ''
buffer = raw_input('Message: ') + '\n'
while buffer != '.\n':
message += buffer.rstrip() + '\n'
buffer = raw_input('Message: ') + '\n'
print
file_rmgroup = group + '-' + epoch_time(TIME)
result = file(file_rmgroup + '.txt', 'wb')
result.write('X-Signed-Headers: Subject,Control,Message-ID,Date,Injection-Date,From\n')
result.write('Subject: cmsg rmgroup ' + group + '\n')
result.write('Control: rmgroup ' + group + '\n')
message_id = '<rmgroup-' + group + '-' + epoch_time(TIME) + '@' + config['HOST'] + '>'
result.write('Message-ID: ' + message_id + '\n')
result.write('Date: ' + pretty_time(TIME) + '\n')
result.write('Injection-Date: ' + pretty_time(TIME) + '\n')
result.write('From: ' + config['NAME'] + ' <' + config['MAIL'] + '>\n\n')
result.write(message + '\n')
result.close()
sign_message(config, file_rmgroup, group, message_id, 'rmgroup', passphrase)
os.remove(file_rmgroup + '.txt')
if groups.has_key(group):
if raw_input('Do you want to update the current checkgroups file? (y/n) ') == 'y':
del groups[group]
write_checkgroups(groups, config['CHECKGROUPS_FILE'])
def generate_checkgroups(config, passphrase=None, serial=None):
""" List the groups of the hierarchy.
Arguments: config (the dictionary of parameters from signcontrol.conf)
passphrase (if given, the passphrase of the private key)
serial (if given, the serial value to use)
No return value
"""
while serial not in range(0,100):
try:
print 'If it is your first checkgroups for today, leave it blank (default is 1).'
print 'Otherwise, increment this revision number by one.'
serial = int(raw_input('Revision to use (1-99): '))
print
except:
serial = 1
serial = '%02d' % serial
file_checkgroups = 'checkgroups-' + epoch_time(TIME)
result = file(file_checkgroups + '.txt', 'wb')
result.write('X-Signed-Headers: Subject,Control,Message-ID,Date,Injection-Date,From\n')
result.write('Subject: cmsg checkgroups ' + config['CHECKGROUPS_SCOPE'] + ' #' + serial_time(TIME) + serial + '\n')
result.write('Control: checkgroups ' + config['CHECKGROUPS_SCOPE'] + ' #' + serial_time(TIME) + serial + '\n')
message_id = '<checkgroups-' + epoch_time(TIME) + '@' + config['HOST'] + '>'
result.write('Message-ID: ' + message_id + '\n')
result.write('Date: ' + pretty_time(TIME) + '\n')
result.write('Injection-Date: ' + pretty_time(TIME) + '\n')
result.write('From: ' + config['NAME'] + ' <' + config['MAIL'] + '>\n\n')
for line in file(config['CHECKGROUPS_FILE'], 'r'):
result.write(line.rstrip() + '\n')
result.close()
sign_message(config, file_checkgroups, config['ADMIN_GROUP'], message_id, 'checkgroups', passphrase)
os.remove(file_checkgroups + '.txt')
def manage_keys(config):
""" Useful wrappers around the gpg program to manage PGP keys
(generate, import, export, remove, and revoke).
Argument: config (the dictionary of parameters from signcontrol.conf)
No return value
"""
choice = 0
while choice != 8:
choice = manage_menu()
if choice == 1:
print 'You currently have the following secret keys installed:'
print
os.system(config['PROGRAM_GPG'] + ' --list-secret-keys --with-fingerprint')
print 'Please note that the uid of your secret key and the value of'
print 'the ID parameter set in signcontrol.conf should be the same.'
elif choice == 2:
print
print '-----------------------------------------------------------------------'
print 'Please put the e-mail address from which you will send control articles'
print 'in the key ID (the real name field). And leave the other fields blank,'
print 'for better compatibility with Usenet software.'
print 'Choose a 2048-bit RSA key which never expires.'
print 'You should also provide a passphrase, for security reasons.'
print 'There is no need to edit the key after it has been generated.'
print
print 'Please note that the key generation may not finish if it is launched'
print 'on a remote server, owing to a lack of enough entropy. Use your own'
print 'computer instead and import the key on the remote one afterwards.'
print '-----------------------------------------------------------------------'
print
os.system(config['PROGRAM_GPG'] + ' --gen-key --allow-freeform-uid')
print
print 'After having generated these keys, you should export your PUBLIC key'
print 'and make it public (in the web site of your hierarchy, along with'
print 'a current checkgroups, and also announce it in news.admin.hierarchies).'
print 'You can also export your PRIVATE key for backup only.'
elif choice == 3:
print 'The key will be written to the file public-key.asc.'
key_name = raw_input('Please enter the uid of the public key to export: ')
os.system(config['PROGRAM_GPG'] + ' --armor --output public-key.asc --export "=' + key_name + '"')
elif choice == 4:
print 'The key will be written to the file private-key.asc.'
key_name = raw_input('Please enter the uid of the secret key to export: ')
os.system(config['PROGRAM_GPG'] + ' --armor --output private-key.asc --export-secret-keys "=' + key_name + '"')
if os.path.isfile('private-key.asc'):
os.chmod('private-key.asc', 0400)
print
print 'Be careful: it is a security risk to export your private key.'
print 'Please make sure that nobody has access to it.'
elif choice == 5:
raw_input('Please put it in a file named secret-key.asc and press enter.')
os.system(config['PROGRAM_GPG'] + ' --import secret-key.asc')
print
print 'Make sure that both the secret and public keys have properly been imported.'
print 'Their uid should be put as the value of the ID parameter set in signcontrol.conf.'
elif choice == 6:
key_name = raw_input('Please enter the uid of the key to *remove*: ')
os.system(config['PROGRAM_GPG'] + ' --delete-secret-and-public-key "=' + key_name + '"')
elif choice == 7:
key_name = raw_input('Please enter the uid of the secret key to revoke: ')
os.system(config['PROGRAM_GPG'] + ' --gen-revoke "=' + key_name + "'")
print
if __name__ == "__main__":
""" The main function.
"""
config = read_configuration(CONFIGURATION_FILE)
if not os.path.isfile(config['PROGRAM_GPG']):
print 'You must install GnuPG <http://www.gnupg.org/> and edit this script to put'
print 'the path to the gpg binary.'
raw_input('Please install it before using this script.')
sys.exit(2)
choice = 0
while choice != 5:
groups = read_checkgroups(config['CHECKGROUPS_FILE'])
# Update time whenever we come back to the main menu.
TIME = time.localtime()
choice = choice_menu()
if choice == 1:
generate_newgroup(groups, config)
elif choice == 2:
generate_rmgroup(groups, config)
elif choice == 3:
generate_checkgroups(config)
elif choice == 4:
manage_keys(config)
# Embedded documentation.
POD = """
=head1 NAME
signcontrol.py - Generate PGP-signed control articles for Usenet hierarchies
=head1 SYNOPSIS
B<python signcontrol.py>
=head1 DESCRIPTION
B<signcontrol.py> is a Python script aimed at Usenet hierarchy
administrators so as to help them in maintaining the canonical lists
of newsgroups in the hierarchies they administer.
This script is also useful to manage PGP keys: generation, import,
export, removal, and revokal. It works on every platform on which
Python and GnuPG are available (Windows, Linux, etc.).
It enforces best practices regarding the syntax of Usenet control
articles.
Getting started is as simple as:
=over 4
=item 1.
Downloading and installing Python (L<http://www.python.org/>). However,
make sure to use S<Python 2.x> because B<signcontrol.py> is not compatible
yet with S<Python 3.x>.
=item 2.
Downloading and installing GnuPG (L<http://www.gnupg.org/>).
=item 3.
Downloading both the B<signcontrol.py> script and its F<signcontrol.conf>
configuration file.
=item 4.
Editing the F<signcontrol.conf> configuration file so that the parameters
it defines properly fit your installation.
=item 5.
Running C<python signcontrol.py>.
=back
=head1 SUPPORT
The B<signcontrol.py> home page is:
http://www.trigofacile.com/divers/usenet/clefs/signcontrol.htm
It will always point to the current version of the script, and contains
instructions written in French.
For bug tracking, please use the issue tracker provided by Github:
https://github.com/Julien-Elie/usenet-signcontrol
=head1 SOURCE REPOSITORY
B<signcontrol.py> is maintained using Git. You can access the current
source by cloning the repository at:
https://github.com/Julien-Elie/usenet-signcontrol.git
or access it via the web at:
https://github.com/Julien-Elie/usenet-signcontrol
When contributing modifications, either patches or Git pull requests
are welcome.
=head1 CONFIGURATION FILE
The following parameters can be modified in the F<signcontrol.conf>
configuration file:
=over 4
=item B<PROGRAM_GPG>
The path to the GPG executable. It is usually
C<C:\Progra~1\GNU\GnuPG\gpg.exe> or C</usr/bin/gpg>.
=item B<PGP2_COMPATIBILITY>
Whether compatibility with MIT S<PGP 2.6.2> (or equivalent) should
be kept. Though this is now fairly obsolete, a few news servers still
haven't been updated to be able to process newer and more secure signing
algorithms. Such servers do not recognize recent signing algorithms;
however, current news servers may refuse to process messages signed
with the insecure MD5 algorithm.
Possible values are C<True>, C<False> or C<Only> (default is C<False>).
When set to C<True>, B<signcontrol> will generate two control articles:
one in a format compatible with MIT S<PGP 2.6.2> (or equivalent) and
another with a newer and more secure format. Sending these two control
articles will then ensure a widest processing.
When set to C<False>, B<signcontrol> will generate control articles in
only a newer and more secure format.
When set to C<Only>, B<signcontrol> will generate control articles in
only a format compatible with MIT S<PGP 2.6.2> (or equivalent).
=item B<ID>
The ID of the PGP key used to sign control articles. Note that if you
do not already have a PGP key, it can be generated by B<signcontrol.py>.
As for Usenet hierarchy management is concerned, the ID is usually a
mere e-mail.
=item B<MAIL>
The e-mail from which control articles are sent. It is usually the ID
of the PGP key used to sign them.
=item B<HOST>
The host which appears in the second part of the Message-ID of control
articles generated. It is usually the name of a news server.
=item B<ADMIN_GROUP>
An existing newsgroup of the hierarchy (where checkgroups control
articles will be fed). If an administrative newsgroup exists, put it.
Otherwise, any other newsgroup of the hierarchy will be fine.
=item B<NAME>
The name which appears in the From: header field. You should only use
ASCII characters. Otherwise, you have to MIME-encode it (for instance:
C<=?ISO-8859-15?Q?Julien_=C9LIE?=>).
=item B<CHECKGROUPS_SCOPE>
The scope of the hierarchy according to Section
5.2.3 of RFC 5537 (also known as USEPRO, available at
L<https://tools.ietf.org/html/rfc5537#section-5.2.3>). For instance:
C<fr> (for fr.*), C<de !de.alt> (for de.* excepting de.alt.*) or
C<de.alt> (for de.alt.*).
=item B<URL>
The URL where the public PGP key can be found. If you do not have any,
leave C<ftp://ftp.isc.org/pub/pgpcontrol/README>. If you want to add
more URLs (like the home page of the hierarchy), use a multi-line text
where each line, except for the first, begins with a tabulation.
=item B<NEWGROUP_MESSAGE_MODERATED>, B<NEWGROUP_MESSAGE_UNMODERATED>,
B<RMGROUP_MESSAGE>
The message which will be written in the corresponding control article.
All occurrences of C<$GROUP$> will be replaced by the name of the
newsgroup.
=item B<PRIVATE_HIERARCHY>
Whether the hierarchy is public or private. If it is private (that is
to say if it is intended to remain in a local server with private access
and if it is not fed to other Usenet news servers), the value should
be C<True>, so that checkgroups control articles are not crossposted
to the news.admin.hierarchies newsgroup. Possible values are C<True>
or C<False> (default is C<False>).
=item B<CHECKGROUPS_FILE>
The file which contains the current checkgroups.
=item B<ENCODING>
The encoding of control articles. The default value is C<ISO-8859-1>.
=back
=head1 USEFUL RESOURCES
Here are some resources that can be useful to be aware of:
=over 4
=item Usenet Hierarchy Administration FAQ
L<http://www.eyrie.org/~eagle/faqs/usenet-hier.html>
=item Usenet hierarchy information
L<http://usenet.trigofacile.com/hierarchies/>
=item Hosting service for hierarchy administrators
L<http://www.news-admin.org/>
=back
=head1 LICENSE
The B<signcontrol.py> package as a whole is covered by the following
copyright statement and license:
Copyright (c) 2007, 2008, 2009, 2011, 2014 Julien ÉLIE
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
=head1 HISTORY
B<signcontrol.py> was written by Julien ÉLIE.
=head1 SEE ALSO
gpg(1).
=cut
"""
|
nilq/baby-python
|
python
|
with open('p11_grid.txt', 'r') as file:
lines = file.readlines()
n = []
for line in lines:
a = line.split(' ')
b = []
for i in a:
b.append(int(i))
n.append(b)
N = 0
for i in range(20):
for j in range(20):
horizontal, vertical, diag1, diag2 = 0, 0, 0, 0
if j < 17:
horizontal = n[i][j]*n[i][j+1]*n[i][j+2]*n[i][j+3]
if horizontal > N:
N = horizontal
if i < 17:
vertical = n[i][j]*n[i+1][j]*n[i+2][j]*n[i+3][j]
if vertical > N:
N = vertical
if i < 17 and j < 17:
diag1 = n[i][j]*n[i+1][j+1]*n[i+2][j+2]*n[i+3][j+3]
if diag1 > N:
N = diag1
if i > 3 and j < 17:
diag2 = n[i-1][j]*n[i-2][j+1]*n[i-3][j+2]*n[i-4][j+3]
if diag2 > N:
N = diag2
print(N)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# COMMON
#
page_action_basket = "Корзина"
page_action_enter = "Войти"
page_action_add = "Добавить"
page_action_cancel = "Отмена"
page_action_yes = "Да"
page_action_save = "Сохранить"
page_action_action = "Действие"
page_action_modify = "изменить"
page_action_remove = "удалить"
page_message_error = "Ошибка!"
page_remove_question = "Вы действительно хотите удалить"
admin_options_manage_category = "Редактировать категории книг"
admin_options_manage_cover = "Редактировать переплёт"
admin_options_manage_quality = "Редактировать качество"
admin_options_manage_language = "Редактировать язык издания"
admin_options_manage_books = "Редактировать книги"
admin_options_statistics = "Статистические данные"
#
# CATEGORY PAGE
#
page_manage_category_title = "Название твоего сайта: Редактирование категорий книг"
page_manage_category_banner = "Название сайта"
page_manage_category_sub_title = "Редактирование категорий книг"
page_manage_category_modal_title_add = "Добавить новую категорию"
page_manage_category_modal_title_edit = "Изменить категорию"
page_manage_category_name = "Категория"
page_manage_category_desc = "Описание категории"
page_manage_category_super_category = "Главная категория"
page_manage_category_it_is_main = "это главная категория"
page_manage_category_remove_object_name = "категорию"
page_manage_category_remove_success = "Категория успешно удалена."
page_manage_category_remove_error = "Не удалось удалить категорию."
page_manage_category_add_exists_alert = "Категория с таким именем уже существует!"
page_manage_category_add_name_input = "Название категории"
page_manage_category_add_desc_input = "Краткое описание категории"
page_manage_category_add_name_chose_super_cat = "Выберете главную категорию"
page_manage_category_add_note_1 = "Если данная категория является главной, то оставьте это поле белым."
#
# COVER PAGE
#
page_manage_cover_title = "Название твоего сайта: Редактирование типов переплёта"
page_manage_cover_banner = "Название сайта"
page_manage_cover_sub_title = "Редактирование типов переплёта"
page_manage_cover_modal_title_add = "Добавить новый переплёт"
page_manage_cover_modal_title_edit = "Изменить переплёт"
page_manage_cover_name = "Переплёт"
page_manage_cover_remove_object_name = "переплёт"
page_manage_cover_remove_success = "Переплёт успешно удалён."
page_manage_cover_remove_error = "Не удалось удалить переплёт."
page_manage_cover_add_exists_alert = "Переплёт с таким именем уже существует!"
page_manage_cover_add_name_input = "Переплёт"
#
# QUALITY PAGE
#
page_manage_quality_title = "Название твоего сайта: Редактирование качества"
page_manage_quality_banner = "Название сайта"
page_manage_quality_sub_title = "Редактирование качества"
page_manage_quality_modal_title_add = "Добавить новое качество"
page_manage_quality_modal_title_edit = "Изменить качество"
page_manage_quality_name = "Качество"
page_manage_quality_desc = "Описание качества"
page_manage_quality_remove_object_name = "качество"
page_manage_quality_remove_success = "Качество успешно удалено."
page_manage_quality_remove_error = "Не удалось удалить качество."
page_manage_quality_add_exists_alert = "Качество с таким именем уже существует!"
page_manage_quality_add_name_input = "Качество"
page_manage_quality_add_desc_input = "Краткое описание качества"
#
# LANGUAGE PAGE
#
page_manage_language_title = "Название твоего сайта: Редактирование языка"
page_manage_language_banner = "Название сайта"
page_manage_language_sub_title = "Редактирование языка"
page_manage_language_modal_title_add = "Добавить новый язык"
page_manage_language_modal_title_edit = "Изменить язык"
page_manage_language_name = "Язык"
page_manage_language_remove_object_name = "язык"
page_manage_language_remove_success = "Язык успешно удалён."
page_manage_language_remove_error = "Не удалось удалить язык."
page_manage_language_add_exists_alert = "Язык с таким именем уже существует!"
page_manage_language_add_name_input = "Язык"
#
# BOOKS PAGE
#
page_manage_book_title = "Название твоего сайта: Редактирование книг"
page_manage_book_banner = "Название сайта"
page_manage_book_sub_title = "Редактирование книг"
page_manage_book_title_add = "Добавить новую книгу"
page_manage_book_title_edit = "Изменить данные книги"
page_manage_book_add_name_input = "Название книги"
page_manage_book_add_author_input = "Автор"
page_manage_book_add_desc_input = "Описание книги"
page_manage_book_add_name_chose_category = "Категория"
page_manage_book_add_name_chose_cover = "Переплёт"
page_manage_book_add_name_chose_quality = "Качество"
page_manage_book_add_name_chose_language = "Язык"
page_manage_book_add_price_input = "Цена книги в формате ####.##"
page_manage_book_add_price_label = "Цена"
page_manage_book_add_discount_input = "Скидка на книгу в формате ####.##"
page_manage_book_add_currency_input = "грн."
page_manage_book_add_priory_check = "Выделить эту книгу"
page_manage_book_add_upload_files = "Загрузить фотографии книг"
page_manage_book_name = "Список книг"
page_manage_book_remove_object_name = "книгу"
page_manage_book_remove_success = "Книга успешно удалена."
page_manage_book_remove_error = "Не удалось удалить книгу."
page_manage_book_reference_num_label = "Номер ссылки"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*-coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on October 14, 2014
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import os
from veles.config import root
from veles.tests import timeout
from veles.znicz.tests.functional import StandardTest
import veles.znicz.tests.research.SpamKohonen.spam_kohonen as spam_kohonen
# FIXME(v.markovtsev): remove this when Kohonen is ported to CUDA
root.common.engine.backend = "ocl"
class TestSpamKohonen(StandardTest):
@classmethod
def setUpClass(cls):
root.spam_kohonen.loader.validation_ratio = 0.0
root.spam_kohonen.update({
"forward": {"shape": (8, 8),
"weights_stddev": 0.05,
"weights_filling": "uniform"},
"decision": {"epochs": 5},
"downloader": {
"url":
"https://s3-eu-west-1.amazonaws.com/veles.forge/"
"SpamKohonen/spam.tar",
"directory": root.common.dirs.datasets,
"files": [os.path.join("spam", "spam.txt.xz")]},
"loader": {"minibatch_size": 80,
"force_numpy": True,
"ids": True,
"classes": False,
"file":
os.path.join(root.common.dirs.datasets,
"spam", "spam.txt.xz")},
"train": {"gradient_decay": lambda t: 0.001 / (1.0 + t * 0.0002),
"radius_decay": lambda t: 1.0 / (1.0 + t * 0.0002)},
"exporter": {"file": "classified_fast4.txt"}})
@timeout(700)
def test_spamkohonen(self):
self.info("Will test spam kohonen workflow")
workflow = spam_kohonen.SpamKohonenWorkflow(self.parent)
workflow.initialize(device=self.device)
workflow.run()
self.assertIsNone(workflow.thread_pool.failure)
diff = workflow.decision.weights_diff
self.assertAlmostEqual(diff, 3.577783, places=6)
self.assertEqual(5, workflow.loader.epoch_number)
self.info("All Ok")
if __name__ == "__main__":
StandardTest.main()
|
nilq/baby-python
|
python
|
from typing import List
# ------------------------------- solution begin -------------------------------
class Solution:
def canWinNim(self, n: int) -> bool:
return n % 4 == 0
# ------------------------------- solution end - --------------------------------
if __name__ == '__main__':
input = 4
print("Input: {}".format(input))
solution = Solution()
print("Output: {}".format(solution.canWinNim(input)))
|
nilq/baby-python
|
python
|
bat = int(input('bateria = '))
def batery (bat):
if bat == 0:
print('morri')
elif bat > 0 and bat < 21:
print('conecte o carreador')
elif bat > 20 and bat < 80:
print('carregando...')
elif bat > 79 and bat < 100:
print('estou de boa')
elif bat == 100:
print('pode tirar o carregador')
elif bat > 100:
print('estou ligadasso')
return bat
print(batery(bat))
batery(bat)
|
nilq/baby-python
|
python
|
#coding:utf8
'''
Created on 2016年4月20日
@author: wb-zhaohaibo
'''
import MySQLdb
print MySQLdb
conn = MySQLdb.Connect(
host="127.0.0.1",
port=3306,
user="root",
passwd="admin",
db="testsql",
charset="utf8"
)
cursor = conn.cursor()
sql = "select * from student"
cursor.execute(sql)
print cursor.rowcount
rs = cursor.fetchone()
print rs
rs = cursor.fetchmany(3)
print rs
rs = cursor.fetchall()
print rs
cursor.close()
conn.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script for updating pymoprhy2 dictionaries (Russian and Ukrainian).
Please note that it is resource-heavy: it requires > 3GB free RAM and about
1GB on HDD for temporary files.
Usage:
update.py (ru|uk) (download|compile|package|cleanup) ...
update.py (ru|uk) all
update.py -h | --help
"""
from __future__ import print_function
import os
import time
import shutil
import subprocess
from docopt import docopt
from cookiecutter.main import cookiecutter
from pymorphy2 import opencorpora_dict
OUT_PATH = "compiled-dicts"
RU_DICT_URL = "http://opencorpora.org/files/export/dict/dict.opcorpora.xml.bz2"
RU_CORPORA_URL = "http://opencorpora.org/files/export/annot/annot.opcorpora.xml.bz2"
RU_DICT_XML = "dict.opcorpora.xml"
RU_CORPORA_XML = "annot.corpus.xml"
UK_DICT_URL = "https://drive.google.com/uc?id=0B4mUAylazDVbUXFIRGJ2S01ibGM&export=download"
UK_DICT_XML = "full-uk.xml"
def _download_bz2(url, out_name):
subprocess.check_call("curl --progress-bar '%s' | bunzip2 > '%s'" % (url, out_name), shell=True)
class RussianBuilder(object):
def download(self):
print("Downloading OpenCorpora dictionary...")
_download_bz2(RU_DICT_URL, RU_DICT_XML)
print("Downloading OpenCorpora corpus...")
_download_bz2(RU_CORPORA_URL, RU_CORPORA_XML)
print("")
def compile(self):
print("Compiling the dictionary")
subprocess.check_call(["./build-dict.py", RU_DICT_XML, OUT_PATH,
"--lang", "ru",
"--corpus", RU_CORPORA_XML,
"--clear"])
print("")
def package(self):
print("Creating Python package")
cookiecutter(
template="cookiecutter-pymorphy2-dicts",
no_input=True,
overwrite_if_exists=True,
extra_context={
'lang': 'ru',
'lang_full': 'Russian',
'version': get_version(corpus=True, timestamp=False),
}
)
def cleanup(self):
shutil.rmtree(OUT_PATH, ignore_errors=True)
if os.path.exists(RU_DICT_XML):
os.unlink(RU_DICT_XML)
if os.path.exists(RU_CORPORA_XML):
os.unlink(RU_CORPORA_XML)
class UkrainianBuilder(object):
def download(self):
print("Downloading and converting LanguageTool dictionary...")
subprocess.check_call(['lt_convert.py', UK_DICT_URL, UK_DICT_XML])
print("")
def compile(self):
print("Compiling the dictionary")
subprocess.check_call(["./build-dict.py", UK_DICT_XML, OUT_PATH,
"--lang", "uk",
"--clear"])
print("")
def package(self):
print("Creating Python package")
cookiecutter("cookiecutter-pymorphy2-dicts", no_input=True, extra_context={
'lang': 'uk',
'lang_full': 'Ukrainian',
'version': get_version(corpus=False, timestamp=True),
})
def cleanup(self):
shutil.rmtree(OUT_PATH, ignore_errors=True)
if os.path.exists(RU_DICT_XML):
os.unlink(RU_DICT_XML)
def get_version(corpus=False, timestamp=False):
meta = dict(opencorpora_dict.load(OUT_PATH).meta)
if corpus:
tpl = "{format_version}.{source_revision}.{corpus_revision}"
else:
tpl = "{format_version}.{source_revision}.1"
if timestamp:
tpl += ".%s" % (int(time.time()))
return tpl.format(**meta)
if __name__ == '__main__':
args = docopt(__doc__)
if args['all']:
args['download'] = args['compile'] = args['package'] = True
if args['ru']:
builder = RussianBuilder()
elif args['uk']:
builder = UkrainianBuilder()
else:
raise ValueError("Language is not known")
if args['download']:
builder.download()
if args['compile']:
builder.compile()
if args['package']:
builder.package()
if args['cleanup']:
builder.cleanup()
|
nilq/baby-python
|
python
|
import Selenium_module as zm
print("Zillow Downloader")
url = input("URL: ")
image_links, title = zm.get_links(url)
zm.get_images(image_links, title)
zm.cleanup_exit()
|
nilq/baby-python
|
python
|
from flask_wtf import FlaskForm
from datetime import datetime
from wtforms import BooleanField, DateTimeField, HiddenField, SelectField, StringField, SubmitField, ValidationError
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import Required, Optional
from .. models import Element, EventFrame
class EventFrameForm(FlaskForm):
element = QuerySelectField("Element", validators = [Required()], get_label = "Name")
eventFrameTemplate = QuerySelectField("Event Frame Template", validators = [Required()], get_label = "Name")
sourceEventFrameTemplate = SelectField("Source Event Frame Template Filter", validators = [Optional()], coerce = int)
activeSourceEventFramesOnly = BooleanField("Active Event Frames Sources Only")
sourceEventFrame = SelectField("Source Event Frame", validators = [Optional()], coerce = int)
startTimestamp = DateTimeField("Start Timestamp", default = datetime.utcnow, validators = [Required()])
startUtcTimestamp = HiddenField()
endTimestamp = DateTimeField("End Timestamp", validators = [Optional()])
endUtcTimestamp = HiddenField()
name = StringField("Name", default = lambda : int(datetime.utcnow().timestamp()), validators = [Required()])
eventFrameId = HiddenField()
eventFrameTemplateId = HiddenField()
parentEventFrameId = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_endTimestamp(self, field):
if self.startTimestamp.data is not None and self.endTimestamp.data is not None:
if self.endTimestamp.data < self.startTimestamp.data:
raise ValidationError("The End Timestamp must occur after the Start Timestamp.")
if self.parentEventFrameId.data:
parentEventFrame = EventFrame.query.get_or_404(self.parentEventFrameId.data)
if parentEventFrame.EndTimestamp:
endUtcTimestamp = datetime.strptime(self.endUtcTimestamp.data, "%Y-%m-%d %H:%M:%S")
if endUtcTimestamp > parentEventFrame.EndTimestamp:
raise ValidationError("This timestamp is outside of the parent event frame.")
def validate_startTimestamp(self, field):
if self.startTimestamp.data is not None:
startUtcTimestamp = datetime.strptime(self.startUtcTimestamp.data, "%Y-%m-%d %H:%M:%S")
if self.parentEventFrameId.data:
parentEventFrame = EventFrame.query.get_or_404(self.parentEventFrameId.data)
error = False
if parentEventFrame.EndTimestamp:
if startUtcTimestamp < parentEventFrame.StartTimestamp or startUtcTimestamp > parentEventFrame.EndTimestamp:
error = True
else:
if startUtcTimestamp < parentEventFrame.StartTimestamp:
error = True
if error:
raise ValidationError("This timestamp is outside of the parent event frame.")
else:
validationError = False
eventFrame = EventFrame.query.filter_by(ElementId = self.element.data.ElementId,
EventFrameTemplateId = self.eventFrameTemplateId.data, StartTimestamp = self.startUtcTimestamp.data).first()
if eventFrame:
if self.eventFrameId.data == "":
# Trying to add a new eventFrame using a startTimestamp that already exists.
validationError = True
else:
if int(self.eventFrameId.data) != eventFrame.EventFrameId:
# Trying to change the startTimestamp of an eventFrame to a startTimestamp that already exists.
validationError = True
if validationError:
raise ValidationError('The start timestamp "{}" already exists.'.format(field.data))
|
nilq/baby-python
|
python
|
import numpy as np
import os
import cv2
import sys
import time
import dlib
import glob
import argparse
import voronoi as v
def checkDeepFake(regions):
return True
def initialize_predictor():
# Predictor
ap = argparse.ArgumentParser()
if len(sys.argv) > 1:
predictor_path = sys.argv[1]
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
return predictor,detector
else:
print("ERROR : Please give the model as argument.")
return None,None
def extract_features(fileDirectory,videos,labels,show_results = False,frame_rate = 50):
predictor,detector = initialize_predictor()
if predictor is None:
return
for filename in videos:
currentfile = os.path.join(fileDirectory,filename)
if currentfile:
print('Opening the file with name ' + currentfile)
cap = cv2.VideoCapture(currentfile)
face_id = 0
while(cap.isOpened() and not(cv2.waitKey(1) & 0xFF == ord('q'))):
prev_features = []
ret, frame = cap.read()
features = []
if frame is None:
break
img = v.preprocessing(frame)
regions = detector(img, 0)
if regions:
# loop over the face detections
for (i, rect) in enumerate(regions):
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
vor_features = v.createVoronoi(img,predictor,rect,face_id + i,show_results=show_results)
features.append(vor_features)
if show_results and cv2.waitKey(1) & 0xFF == ord('q'):
break
face_id =+ 1
if show_results:
cv2.imshow("Frame",img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if show_results:
cap.release()
cv2.destroyAllWindows()
else:
print("Could not find the directory")
def pad_images(fileDirectory):
max_w = 80
max_h = 100
# for filename in os.listdir(fileDirectory):
# currentfile = os.path.join(fileDirectory,filename)
# if currentfile:
# img = cv2.imread(currentfile)
# ht, wd, cc = img.shape
# if ht > max_h:
# max_h = ht
# if wd > max_w:
# max_w = wd
print("Max_w #{} Max_h #{}",max_w, max_h)
for filename in os.listdir(fileDirectory):
currentfile = os.path.join(fileDirectory,filename)
if currentfile:
img = cv2.imread(currentfile)
ht, wd, cc= img.shape
result = np.full((max_h,max_w,cc), (0,0,0), dtype=np.uint8)
# compute center offset
xx = (max_w - wd) // 2
yy = (max_h - ht) // 2
# copy img image into center of result image
result[yy:yy+ht, xx:xx+wd] = img
cv2.imwrite("features/"+ filename, result)
|
nilq/baby-python
|
python
|
r""" FSS-1000 few-shot semantic segmentation dataset """
import os
import glob
from torch.utils.data import Dataset
import torch.nn.functional as F
import torch
import PIL.Image as Image
import numpy as np
class DatasetFSS(Dataset):
def __init__(self, datapath, fold, transform, split, shot, use_original_imgsize):
self.split = split
self.benchmark = 'fss'
self.shot = shot
self.base_path = os.path.join(datapath, 'FSS-1000')
# Given predefined test split, load randomly generated training/val splits:
# (reference regarding trn/val/test splits: https://github.com/HKUSTCV/FSS-1000/issues/7))
with open('./data/splits/fss/%s.txt' % split, 'r') as f:
self.categories = f.read().split('\n')[:-1]
self.categories = sorted(self.categories)
self.class_ids = self.build_class_ids()
self.img_metadata = self.build_img_metadata()
self.transform = transform
def __len__(self):
return len(self.img_metadata)
def __getitem__(self, idx):
query_name, support_names, class_sample = self.sample_episode(idx)
query_img, query_mask, support_imgs, support_masks = self.load_frame(query_name, support_names)
query_img, query_mask = self.transform(query_img, query_mask)
query_mask = F.interpolate(query_mask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[-2:], mode='nearest').squeeze()
support_transformed = [self.transform(support_img, support_cmask) for support_img, support_cmask in zip(support_imgs, support_masks)]
support_masks = [x[1] for x in support_transformed]
support_imgs = torch.stack([x[0] for x in support_transformed])
support_masks_tmp = []
for smask in support_masks:
smask = F.interpolate(smask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[-2:], mode='nearest').squeeze()
support_masks_tmp.append(smask)
support_masks = torch.stack(support_masks_tmp)
batch = {'query_img': query_img,
'query_mask': query_mask,
'query_name': query_name,
'support_imgs': support_imgs,
'support_masks': support_masks,
'support_names': support_names,
'class_id': torch.tensor(class_sample)}
return batch
def load_frame(self, query_name, support_names):
query_img = Image.open(query_name).convert('RGB')
support_imgs = [Image.open(name).convert('RGB') for name in support_names]
query_id = query_name.split('/')[-1].split('.')[0]
query_name = os.path.join(os.path.dirname(query_name), query_id) + '.png'
support_ids = [name.split('/')[-1].split('.')[0] for name in support_names]
support_names = [os.path.join(os.path.dirname(name), sid) + '.png' for name, sid in zip(support_names, support_ids)]
query_mask = self.read_mask(query_name)
support_masks = [self.read_mask(name) for name in support_names]
return query_img, query_mask, support_imgs, support_masks
def read_mask(self, img_name):
mask = torch.tensor(np.array(Image.open(img_name).convert('L')))
mask[mask < 128] = 0
mask[mask >= 128] = 1
return mask
def sample_episode(self, idx):
query_name = self.img_metadata[idx]
class_sample = self.categories.index(query_name.split('/')[-2])
if self.split == 'val':
class_sample += 520
elif self.split == 'test':
class_sample += 760
support_names = []
while True: # keep sampling support set if query == support
support_name = np.random.choice(range(1, 11), 1, replace=False)[0]
support_name = os.path.join(os.path.dirname(query_name), str(support_name)) + '.jpg'
if query_name != support_name: support_names.append(support_name)
if len(support_names) == self.shot: break
return query_name, support_names, class_sample
def build_class_ids(self):
if self.split == 'trn':
class_ids = range(0, 520)
elif self.split == 'val':
class_ids = range(520, 760)
elif self.split == 'test':
class_ids = range(760, 1000)
return class_ids
def build_img_metadata(self):
img_metadata = []
for cat in self.categories:
img_paths = sorted([path for path in glob.glob('%s/*' % os.path.join(self.base_path, cat))])
for img_path in img_paths:
if os.path.basename(img_path).split('.')[1] == 'jpg':
img_metadata.append(img_path)
return img_metadata
|
nilq/baby-python
|
python
|
# The Python print statement is often used to output variables.
# To combine both text and a variable, Python uses the '+' character:
x = "awesome"
print("Python is " + x)
# You can also use the '+' character to add a variable to another variable:
x = "Python is "
y = "awesome"
z = x + y
print(z)
# For numbers, the '+' character works as a mathematical operators:
x = 5
y = 10
print(x + y)
# If you try to combine a string and a number, Python will give you an error:
x = 5
y = "John"
print(x + y)
|
nilq/baby-python
|
python
|
from keras.applications.resnet50 import ResNet50 as RN50
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Flatten
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard
from keras import optimizers
from keras import backend
import matplotlib.pyplot as plt
import os
end='activation_37'
#end='activation_'+str(idx)
BOARD_PATH = 'boards/'
EXPERIMENT_NAME = f'training_50epoch_LRFull'
MODEL_FNAME = f'models/modelRN50_{EXPERIMENT_NAME}.h5'
EPOCH_ARR=[50, 100, 200]
train_data_dir='../datasets/MIT_split/train'
val_data_dir='../datasets/MIT_split/test'
test_data_dir='../datasets/MIT_split/test'
img_width = 224
img_height=224
batch_size=32
validation_samples=807
def preprocess_input(x, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_data_format()
assert dim_ordering in {'channels_first', 'channels_last'}
if dim_ordering == 'channels_first':
# 'RGB'->'BGR'
x = x[ ::-1, :, :]
# Zero-center by mean pixel
x[ 0, :, :] -= 103.939
x[ 1, :, :] -= 116.779
x[ 2, :, :] -= 123.68
else:
# 'RGB'->'BGR'
x = x[:, :, ::-1]
# Zero-center by mean pixel
x[:, :, 0] -= 103.939
x[:, :, 1] -= 116.779
x[:, :, 2] -= 123.68
return x
LR_list = [0.1, 0.01, 0.0001]
# LR_list = [0.1]
LR_results_dict = {}
d = {}
for EPOCHS in EPOCH_ARR:
for LR in LR_list:
results_dir=f'learningRateDiffs/epochs_{EPOCHS}_LR_{LR}'
results_txt_file = f"{results_dir}/results_{EPOCHS}_LR_{LR}"
if not os.path.exists(results_dir):
os.makedirs(results_dir)
with open(f"{results_txt_file}.txt", "a") as fi:
fi.write("Epochs\tLearning_Rate\tAccuracy\tValidation_accuracy\tLoss\tValidation_loss\n")
# create the base pre-trained model
base_model = RN50(weights='imagenet')
plot_model(base_model, to_file=f'{results_dir}/RN50_base.png', show_shapes=True, show_layer_names=True)
# base_model.summary()
#cropping the model
x = base_model.layers[-2].output
intermediate = 'inter'
x = Dense(8, activation='softmax',name=intermediate)(x)
model = Model(base_model.input, x)
plot_model(model, to_file=f'{results_dir}/modelRN50_{EXPERIMENT_NAME}.png', show_shapes=True, show_layer_names=True)
#Freezing layers
#for layer in base_model.layers:
# layer.trainable = False
#Unfreezeing layers
#for idx in range(-2,end,-1):
# base_model.layers[idx].trainable=True
new_opt = optimizers.Adadelta(learning_rate= LR)
model.compile(loss='categorical_crossentropy',optimizer=new_opt, metrics=['accuracy'])
for layer in model.layers:
print(layer.name, layer.trainable)
#preprocessing_function=preprocess_input,
datagen = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
preprocessing_function=preprocess_input,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None)
train_generator = datagen.flow_from_directory(train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
test_generator = datagen.flow_from_directory(test_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = datagen.flow_from_directory(val_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
tbCallBack = TensorBoard(log_dir=BOARD_PATH+EXPERIMENT_NAME, histogram_freq=0, write_graph=True)
history=model.fit_generator(train_generator,
steps_per_epoch=(int(1881//batch_size)+1),
epochs=EPOCHS,
validation_data=validation_generator,
validation_steps= (int(validation_samples//batch_size)+1), callbacks=[tbCallBack])
result = model.evaluate_generator(test_generator, validation_samples)
print( result)
#saving model
model.save(f'{results_dir}/modelRN50_{EXPERIMENT_NAME}.h5')
# list all data in history
if True:
# summarize history for accuracy
print(history.history.keys())
accuracy = history.history['accuracy']
validation_accuracy = history.history['val_accuracy']
loss = history.history['loss']
validation_loss = history.history['val_loss']
LR_results_structured = [accuracy, validation_accuracy, loss, validation_loss]
LR_results_dict[f'{LR}'] = LR_results_structured
print(LR_results_dict)
with open(f"{results_txt_file}.txt", "a") as fi:
fi.write(f'{EPOCHS}\t{LR}\t{accuracy[-1]}\t{validation_accuracy[-1]}\t{loss[-1]}\t{validation_loss[-1]}\n')
with open(f"{results_txt_file}_raw.txt", "a") as fi:
fi.write(f'accuracy\tvalidation_accuracy\tloss\tvalidation_loss\n')
for a, va, l, vl in zip(accuracy, validation_accuracy, loss, validation_loss):
fi.write(f'{a}\t{va}\t{l}\t{vl}\n')
plt.plot(accuracy)
plt.plot(validation_accuracy)
plt.title(f'Learning_rate = {LR} accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(f'{results_dir}/acc_{LR}.jpg')
plt.close()
# summarize history for loss
plt.plot(loss)
plt.plot(validation_loss)
plt.title(f'Learning_rate = {LR} model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(f'{results_dir}/loss_{LR}.jpg')
plt.close()
backend.clear_session()
for tmpLR in LR_list:
plt.plot(LR_results_dict[f'{tmpLR}'][0])
plt.plot(LR_results_dict[f'{tmpLR}'][1])
plt.title(f'{EPOCHS} Epochs Accuracy Aggregate')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_0.1', 'validation_0.1', 'train_0.01', 'validation_0.01',
'train_0.001', 'validation_0.001'], loc='upper left')
plt.savefig(f'learningRateDiffs/graph_{EPOCHS}.jpg')
plt.close()
|
nilq/baby-python
|
python
|
import ctypes
# Implements the Array ADT using array capabilities of the ctypes module.
class Array :
# Creates an array with size elements.
def __init__( self, size ):
assert size > 0, "Array size must be > 0"
self._size = size
# Create the array structure using the ctypes module.
PyArrayType = ctypes.py_object * size
self._elements = PyArrayType()
# Initialize each element.
self.clear(None)
# Returns the size of the array.
def __len__( self ):
return self._size
# Gets the contents of the index element.
def __getitem__( self, index ):
assert index >= 0 and index < len(self), "Array subscript out of range"
return self._elements[ index ]
# Puts the value in the array element at index position.
def __setitem__( self, index, value ):
assert index >= 0 and index < len(self), "Array subscript out of range"
self._elements[ index ] = value
# Clears the array by setting each element to the given value.
def clear( self, value ):
for i in range( len(self) ) :
self._elements[i] = value
# Returns the array's iterator for traversing the elements.
def __iter__( self ):
return _ArrayIterator( self. _elements )
# An iterator for the Array ADT.
class _ArrayIterator :
def __init__( self, the_array ):
self._array_ref = the_array
self._cur_index = 0
def __iter__( self ):
return self
def __next__( self ):
if self._cur_index < len( self._array_ref ) :
entry = self._array_ref[ self._cur_index ]
self._cur_index += 1
return entry
else:
raise StopIteration
# Implementation of the Array2D ADT using an array of arrays.
class Array2D :
# Creates a 2 -D array of size numRows x numCols.
def __init__( self, num_rows, num_cols ):
# Create a 1 -D array to store an array reference for each row.
self.rows = Array( num_rows )
# Create the 1 -D arrays for each row of the 2 -D array.
for i in range( num_rows ) :
self.rows[i] = Array( num_cols )
# Returns the number of rows in the 2 -D array.
def num_rows( self ):
return len( self.rows )
# Returns the number of columns in the 2 -D array.
def num_cols( self ):
return len( self.rows[0] )
# Clears the array by setting every element to the given value.
def clear( self, value ):
for row in range( self.num_rows() ):
row.clear( value )
# Gets the contents of the element at position [i, j]
def __getitem__( self, index_tuple ):
assert len(index_tuple) == 2, "Invalid number of array subscripts."
row = index_tuple[0]
col = index_tuple[1]
assert row >= 0 and row < self.num_rows() \
and col >= 0 and col < self.num_cols(), \
"Array subscript out of range."
array_1d = self.rows[row]
return array_1d[col]
# Sets the contents of the element at position [i,j] to value.
def __setitem__( self, index_tuple, value ):
assert len(index_tuple) == 2, "Invalid number of array subscripts."
row = index_tuple[0]
col = index_tuple[1]
assert row >= 0 and row < self.num_rows() \
and col >= 0 and col < self.num_cols(), \
"Array subscript out of range."
array_1d = self.rows[row]
array_1d[col] = value
class DynamicArray:
"""A dynamic array class akin to a simplified Python list."""
def __init__(self):
"""Create an empty array."""
self._n = 0 # count actual elements
self._capacity = 1 # default array capacity
self._A = self._make_array(self._capacity) # low-level array
def __len__(self):
"""Return number of elements stored in the array."""
return self._n
def __getitem__(self, k):
"""Return element at index k."""
if not 0 <= k < self. n:
raise IndexError( 'invalid index' )
return self._A[k] # retrieve from array
def append(self, obj):
"""Add object to end of the array."""
if self._n == self._capacity: # not enough room
self._resize(2 * self._capacity) # so double capacity
self._A[self._n] = obj
self._n += 1
def _resize(self, c): # nonpublic utitity
"""Resize internal array to capacity c."""
B = self._make_array(c) # new (bigger) array
for k in range(self._n): # for each existing value
B[k] = self._A[k]
self._A = B # use the bigger array
self._capacity = c
def _make_array(self, c): # nonpublic utitity
"""Return new array with capacity c."""
return (c * ctypes.py_object)( ) # see ctypes documentation
def insert(self, k, value):
"""Insert value at index k, shifting subsequent values rightward."""
# (for simplicity, we assume 0 <= k <= n in this verion)
if self. n == self._capacity: # not enough room
self._resize(2 * self._capacity) # so double capacity
for j in range(self._n, k, -1): # shift rightmost first
self._A[j] = self._A[j - 1]
self._A[k] = value # store newest element
self._n += 1
def remove(self, value):
"""Remove first occurrence of value( or raise ValueError)."""
# note: we do not consider shrinking the dynamic array in this version
for k in range(self._n):
if self._A[k] == value: # found a match!
for j in range(k, self._n - 1): # shift others to fill gap
self._A[j] = self._A[j + 1]
self._A[self._n - 1] = None # help garbage collection
self._n -= 1 # we have one less item
return # exit immediately
raise ValueError( "value not found" ) # only reached if no match
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""format manifest with more metadata."""
import argparse
import functools
import json
import jsonlines
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.frontend.utility import load_cmvn
from paddlespeech.s2t.io.utility import feat_type
from paddlespeech.s2t.utils.utility import add_arguments
from paddlespeech.s2t.utils.utility import print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('cmvn_path', str,
'examples/librispeech/data/mean_std.json',
"Filepath of cmvn.")
add_arg('unit_type', str, "char", "Unit type, e.g. char, word, spm")
add_arg('vocab_path', str,
'examples/librispeech/data/vocab.txt',
"Filepath of the vocabulary.")
add_arg('manifest_paths', str,
None,
"Filepaths of manifests for building vocabulary. "
"You can provide multiple manifest files.",
nargs='+',
required=True)
# bpe
add_arg('spm_model_prefix', str, None,
"spm model prefix, spm_model_%(bpe_mode)_%(count_threshold), only need when `unit_type` is spm")
add_arg('output_path', str, None, "filepath of formated manifest.", required=True)
# yapf: disable
args = parser.parse_args()
def main():
print_arguments(args, globals())
fout = open(args.output_path, 'w', encoding='utf-8')
# get feat dim
filetype = args.cmvn_path.split(".")[-1]
mean, istd = load_cmvn(args.cmvn_path, filetype=filetype)
feat_dim = mean.shape[0] #(D)
print(f"Feature dim: {feat_dim}")
text_feature = TextFeaturizer(args.unit_type, args.vocab_path, args.spm_model_prefix)
vocab_size = text_feature.vocab_size
print(f"Vocab size: {vocab_size}")
# josnline like this
# {
# "input": [{"name": "input1", "shape": (100, 83), "feat": "xxx.ark:123"}],
# "output": [{"name":"target1", "shape": (40, 5002), "text": "a b c de"}],
# "utt2spk": "111-2222",
# "utt": "111-2222-333"
# }
count = 0
for manifest_path in args.manifest_paths:
with jsonlines.open(str(manifest_path), 'r') as reader:
manifest_jsons = list(reader)
for line_json in manifest_jsons:
output_json = {
"input": [],
"output": [],
'utt': line_json['utt'],
'utt2spk': line_json.get('utt2spk', 'global'),
}
# output
line = line_json['text']
if isinstance(line, str):
# only one target
tokens = text_feature.tokenize(line)
tokenids = text_feature.featurize(line)
output_json['output'].append({
'name': 'target1',
'shape': (len(tokenids), vocab_size),
'text': line,
'token': ' '.join(tokens),
'tokenid': ' '.join(map(str, tokenids)),
})
else:
# isinstance(line, list), multi target in one vocab
for i, item in enumerate(line, 1):
tokens = text_feature.tokenize(item)
tokenids = text_feature.featurize(item)
output_json['output'].append({
'name': f'target{i}',
'shape': (len(tokenids), vocab_size),
'text': item,
'token': ' '.join(tokens),
'tokenid': ' '.join(map(str, tokenids)),
})
# input
line = line_json['feat']
if isinstance(line, str):
# only one input
feat_shape = line_json['feat_shape']
assert isinstance(feat_shape, (list, tuple)), type(feat_shape)
filetype = feat_type(line)
if filetype == 'sound':
feat_shape.append(feat_dim)
else: # kaldi
raise NotImplementedError('no support kaldi feat now!')
output_json['input'].append({
"name": "input1",
"shape": feat_shape,
"feat": line,
"filetype": filetype,
})
else:
# isinstance(line, list), multi input
raise NotImplementedError("not support multi input now!")
fout.write(json.dumps(output_json) + '\n')
count += 1
print(f"{args.manifest_paths} Examples number: {count}")
fout.close()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Unit test set_out_sample_residuals ForecasterAutoreg
# ==============================================================================
import numpy as np
import pandas as pd
from skforecast.ForecasterAutoreg import ForecasterAutoreg
from sklearn.linear_model import LinearRegression
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_True():
'''
Test output when regressor is LinearRegression and one step ahead is predicted
using in sample residuals.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=11, step=1)
)
results = forecaster.predict_interval(steps=1, in_sample_residuals=True)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_True():
'''
Test output when regressor is LinearRegression and two step ahead is predicted
using in sample residuals.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10. ,20., 20.],
[11., 24.33333333, 24.33333333]
]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=12, step=1)
)
results = forecaster.predict_interval(steps=2, in_sample_residuals=True)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_False():
'''
Test output when regressor is LinearRegression and one step ahead is predicted
using out sample residuals.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.out_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=11, step=1)
)
results = forecaster.predict_interval(steps=1, in_sample_residuals=False)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_False():
'''
Test output when regressor is LinearRegression and two step ahead is predicted
using out sample residuals.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.out_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10. ,20., 20.],
[11., 24.33333333, 24.33333333]
]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=12, step=1)
)
results = forecaster.predict_interval(steps=2, in_sample_residuals=False)
pd.testing.assert_frame_equal(results, expected)
|
nilq/baby-python
|
python
|
import os
import gzip
import cPickle
from config import config
for fold in range(5):
filename = os.path.join(config.data_dir, 'atis.fold' + str(fold) + '.pkl.gz')
with gzip.open(filename, 'rb') as f:
train_set, valid_set, test_set, dicts = cPickle.load(f)
labels2idx_, tables2idx_, words2idx_ = dicts['labels2idx'], dicts['tables2idx'], dicts['words2idx']
idx2labels = {v: k for k, v in labels2idx_.items()}
idx2tables = {v: k for k, v in tables2idx_.items()}
idx2words = {v: k for k, v in words2idx_.items()}
train_x, train_ne, train_label = train_set
for sentence, ne, label in zip(train_x, train_ne, train_label):
print(sentence, ne, label)
print (' '.join([idx2labels[i] for i in label])); print ('\n')
print (' '.join([idx2tables[i] for i in ne])); print ('\n')
print (' '.join([idx2words[i] for i in sentence])); print ('\n')
exit()
|
nilq/baby-python
|
python
|
import unittest
from bsim.connection import *
class TestConnectionMethods(unittest.TestCase):
def test_data(self):
c = Connection(debug=False)
c.delay_start = [0, 0, 3, 0, 1, 0, 0, 0, 0, 2, 0, 0]
c.delay_num = [1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0]
c.rev_delay_start = [0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 3]
c.rev_delay_num = [0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]
c.rev_map2sid = [0, 3, 1, 2]
gpu = c.to_gpu()
cpu = c.from_gpu(gpu, only_struct=False)
self.assertListEqual(c.delay_start, list(cast(cpu.delay_start, POINTER(c_int*cpu.n_len)).contents))
self.assertListEqual(c.delay_num, list(cast(cpu.delay_num, POINTER(c_int*cpu.n_len)).contents))
self.assertListEqual(c.rev_delay_start, list(cast(cpu.rev_delay_start, POINTER(c_int*cpu.r_n_len)).contents))
self.assertListEqual(c.rev_delay_num, list(cast(cpu.rev_delay_num, POINTER(c_int*cpu.r_n_len)).contents))
self.assertListEqual(c.rev_map2sid, list(cast(cpu.rev_map2sid, POINTER(c_int*cpu.s_len)).contents))
if __name__ == '__main__':
print('Testing {}: '.format(__file__[:-3]))
unittest.main()
print('\n')
|
nilq/baby-python
|
python
|
class History(object):
def __init__(self, name, userID):
self.name = name
self.userID = userID
self.history = []
def logMessage(self, lastMessage):
if len(self.history) > 10:
self.history.pop()
self.history.append(lastMessage)
def getLastMessages(self, num):
historyWanted = int(num)
if historyWanted > len(self.history):
historyWanted = len(self.history)
lastMessages = self.history[--historyWanted:]
return lastMessages
|
nilq/baby-python
|
python
|
import logging
__title__ = 'django_nine.tests.base'
__author__ = 'Artur Barseghyan'
__copyright__ = '2015-2019 Artur Barseghyan'
__license__ = 'GPL-2.0-only OR LGPL-2.1-or-later'
__all__ = (
'LOG_INFO',
'log_info',
)
logger = logging.getLogger(__name__)
LOG_INFO = True
def log_info(func):
"""Logs some useful info."""
if not LOG_INFO:
return func
def inner(self, *args, **kwargs):
result = func(self, *args, **kwargs)
logger.info('\n\n%s' % func.__name__)
logger.info('============================')
if func.__doc__:
logger.info('""" %s """' % func.__doc__.strip())
logger.info('----------------------------')
if result is not None:
logger.info(result)
logger.info('\n++++++++++++++++++++++++++++')
return result
return inner
|
nilq/baby-python
|
python
|
import os
import datetime
import MySQLdb
con = MySQLdb.connect(host='DBSERVER', user='DBUSER', passwd='DBPASSWD', db='DB')
cur = con.cursor()
cur.execute("SHOW TABLES")
data = "SET FOREIGN_KEY_CHECKS = 0; \n"
tables = []
for table in cur.fetchall():
tables.append(table[0])
for table in tables:
if table != "fos_user" and table != 'udala':
# Begiratu ea udala_id eremua existitzen den
cur.execute("SHOW columns from `" + str(table) + "` where field='udala_id' \n")
badu = cur.rowcount
if badu == 1:
data += "-- BADU!! \n \n \n"
data += "DELETE FROM `" + str(table) + "` WHERE udala_id=64; \n"
cur.execute("SELECT * FROM `" + str(table) + "` WHERE udala_id=64;")
else:
data += "-- EZ DU !! \n \n"
data += "DELETE FROM `" + str(table) + "`; \n"
cur.execute("SELECT * FROM `" + str(table) + "`;")
for row in cur.fetchall():
data += "INSERT INTO `" + str(table) + "` VALUES("
first = True
for field in row:
if not first:
data += ', '
if (type(field) is long) or (type(field) is int) or (type(field) is float):
data += str(field)
first = False
elif field is None:
data += str('NULL')
first = False
else:
data += '"' + str(field).replace("\"", "\'") + '"'
first = False
data += ");\n"
data += "\n\n"
data += "SET FOREIGN_KEY_CHECKS = 1; \n"
FILE = open("export_zerbikat.sql","w")
FILE.writelines(data)
FILE.close()
|
nilq/baby-python
|
python
|
from .data_parallel import CustomDetDataParallel
from .sync_batchnorm import convert_model
|
nilq/baby-python
|
python
|
import os
from setuptools import setup
setup(name='NNApp01',
version='0.1.0',
description='NN Programming Assignment ',
author='Dzmitry Buhryk',
author_email='dzmitry.buhryk@gmail.com',
license='MIT',
install_requires=['flask', 'werkzeug'],
tests_require=['requests', 'flask', 'werkzeug', 'urllib3'],
packages=['app01', 'test'],
include_package_data=True,
package_data={
'app01': ['static/index_t.html', 'resources/Keyword.txt'],
'test': ['resources/*']
},
package_dir={
'app01': 'app01',
'test': 'test'
},
zip_safe=False)
|
nilq/baby-python
|
python
|
from multiprocessing import Process
import envServer
from distutils.dir_util import copy_tree
from random import shuffle
import sys
sys.path.append("../pyutil")
sys.path.append("..")
import signal
import parseNNArgs
import traceback
import threading
import pickle
import shutil
import glob
import os
import random
import time
import json
import math
import numpy as np
import scipy.ndimage
from dqnQNN import DQN
from replay_buffer import ReplayBuffer
from environment import Environment
import logDqn
import outDir
import tensorflow as tf
from tensorflow.python.framework import ops
# from tensorflow.python import debug as tf_debug
def printT(s):
sys.stdout.write(s + '\n')
class dqnRunner():
def __init__(self, sess, params, out_dir=None, agentB_sess= None):
self.params = params
self.sess = sess
self.agentB_sess = agentB_sess
self.lock = threading.Lock()
self.modelStoreIntv = 150
self.bufferStoreIntv = 150
self.annealSteps = params['annealSteps']
self.state_dim = params['pxRes']
if self.params['verbose']:
printT("tensorflow version: {}".format(tf.__version__))
# create environment
self.env = Environment(sess, params, self)
self.numActions = self.env.numActions
# load classifier for reward calculation
if self.params['classNN'] is not None:
with tf.device("/device:CPU:0"):
self.rewardClassNet = ClassConvNetEval(self.sess, params)
self.env.rewardClassNet = self.rewardClassNet
# just gets or resets global_step
self.global_step = None
variables = tf.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
for v in variables:
if "global_step" in v.name:
self.global_step = v
if self.global_step is None:
self.global_step = tf.Variable(0, name='global_step',
trainable=False)
self.resetGlStep = tf.assign(self.global_step, 0)
# load actual dqn
self.q = DQN(self.sess, self.params['out_dir'],
self.global_step, self.params, self.numActions)
self.evalMethods= ["agent","random"]
self.evalMethod="agent"
self.qAgentB=None
if (not self.params['agentB'] is None) and self.params['interEval']:
self.qAgentB = DQN(self.agentB_sess, self.params['out_dir'],
self.global_step, self.params, self.numActions,agentB=True)
self.evalMethod="agentA"
self.evalMethods= ["agentA","random", "fixed","agentB"]
self.sess.as_default()
# replay buffer (size and type)
if self.params['replaySz'] is None:
self.replayBufferSize = 1000000
else:
self.replayBufferSize = self.params['replaySz']
self.replay = ReplayBuffer(self.replayBufferSize)
# variables for exploration decay
self.action_step = tf.Variable(0, name='action_step',
trainable=False, dtype=tf.int32)
self.increment_ac_step_op = tf.assign(self.action_step,
self.action_step+1)
self.global_action_step = tf.Variable(0, name='global_action_step',
trainable=False, dtype=tf.int32)
self.increment_gac_step_op = tf.assign(self.global_action_step,
self.global_action_step+1)
self.episode_step = tf.Variable(0, name='episode_step',
trainable=False, dtype=tf.int32)
self.increment_ep_step_op = tf.assign(self.episode_step,
self.episode_step+1)
self.resetEpStep = tf.assign(self.episode_step, 0)
self.resetAcStep = tf.assign(self.action_step, 0)
self.resetGAcStep = tf.assign(self.global_action_step, 0)
# save state
self.saver = tf.train.Saver(max_to_keep=self.params['keepNewestModels'] )
fn = os.path.join(self.params['out_dir'], "mainLoopTime.txt")
self.mainLoopTimeFile = open(fn, "a")
fn_ = os.path.join(self.params['out_dir'], "learnLoopTime.txt")
self.learnLoopTimeFile = open(fn_, "a")
# main function, runs the learning process
def run(self):
# debugging variables, for tensorboard
if self.params['evaluation']:
# evaluation episodes, no exploration
eval_reward = tf.Variable(0., name="evalReward")
eval_reward_op = tf.summary.scalar("Eval-Reward", eval_reward)
eval_disc_reward = tf.Variable(0., name="evalDiscReward")
eval_disc_reward_op = tf.summary.scalar("Eval-Reward_discounted",
eval_disc_reward)
eval_stepCount = tf.Variable(0., name="evalStepCount")
eval_stepCount_op = tf.summary.scalar("Eval-StepCount", eval_stepCount)
eval_sum_vars = [eval_reward, eval_disc_reward, eval_stepCount]
eval_sum_op = tf.summary.merge([eval_reward_op,
eval_disc_reward_op,
eval_stepCount_op])
# (discounted) reward per episode
episode_reward = tf.Variable(0., name="episodeReward")
episode_reward_op = tf.summary.scalar("Reward", episode_reward)
episode_disc_reward = tf.Variable(0., name="episodeDiscReward")
episode_disc_reward_op = tf.summary.scalar("Reward_discounted",
episode_disc_reward)
# average (max q)
episode_ave_max_q = tf.Variable(0., name='epsideAvgMaxQ')
episode_ave_max_q_op = tf.summary.scalar("Qmax_Value",
episode_ave_max_q)
# number of steps for episode
stepCount = tf.Variable(0., name="stepCount")
stepCount_op = tf.summary.scalar("StepCount", stepCount)
# number of learning iterations(total number of mini batches so far)
global_step_op = tf.summary.scalar("GlobalStep", self.global_step)
# current exploration epsilon
epsilonVar = tf.Variable(0., name="epsilon")
epsilonVar_op = tf.summary.scalar("Epsilon", epsilonVar)
summary_vars = [episode_reward, episode_disc_reward, episode_ave_max_q,
stepCount, epsilonVar]
summary_ops = tf.summary.merge([episode_reward_op,
episode_disc_reward_op,
episode_ave_max_q_op,
stepCount_op, epsilonVar_op])
self.writer = tf.summary.FileWriter(os.path.join(self.params['out_dir'], "train"),
self.sess.graph)
self.action_vars = []
self.action_ops = []
for a in range(self.numActions):
action = tf.Variable(0., name="qval_action_" + str(a))
action_op = tf.summary.scalar("Q-Value_Action_"+str(a), action)
self.action_vars.append(action)
self.action_ops.append(action_op)
self.action_ops = tf.summary.merge(self.action_ops)
# initialize all tensorflow variables
# and finalize graph (cannot be modified anymore)
self.sess.run(tf.initialize_all_variables())
self.sess.graph.finalize()
# for debugging, variable values before and after
if self.params['veryveryverbose']:
variables = tf.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES,
scope="DQN")
for v in variables:
if v.name.endswith("conv1_2/weights:0"):
print(v.name, self.sess.run(v))
# do we want to use pretrained weights for the dqn
# from the classifier or a pretrained agent?
if self.params['resume']:
pass
elif self.params['useClassNN']:
print("restoring dqn net from classNN: {}".format(
self.params['classNN']))
if "ckpt" in self.params['classNN']:
self.q.saver.restore(
self.sess,
self.params['classNN'])
else:
self.q.saver.restore(
self.sess,
tf.train.latest_checkpoint(self.params['classNN']))
elif self.params['dqnNN'] is not None:
print("restoring dqn net from dqnNN: {}".format(
self.params['dqnNN']))
if "ckpt" in self.params['dqnNN']:
self.q.saver.restore(
self.sess,
self.params['dqnNN'])
else:
self.q.saver.restore(
self.sess,
tf.train.latest_checkpoint(self.params['dqnNN']))
# main network weights are set, now run target init op
self.sess.run(self.q.target_nn_init_op)
if (self.params['agentB'] is not None) and self.params['interEval']:
print("restoring agentB net from {}".format(
self.params['agentB']))
if "ckpt" in self.params['agentB']:
self.qAgentB.saver.restore(
self.agentB_sess,
self.params['agentB'])
else:
self.qAgentB.saver.restore(
self.agentB_sess,
tf.train.latest_checkpoint(self.params['agentB']))
# for debugging, variable values before and after
if self.params['veryveryverbose']:
variables = tf.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES,
scope="DQN")
for v in variables:
if v.name.endswith("conv1_2/weights:0"):
print(v.name, self.sess.run(v))
print("initialize classifier network")
if self.params['classNN'] is not None:
print("restoring reward class net from classNN: {}".format(
self.params['classNN']))
if "ckpt" in self.params['classNN']:
self.rewardClassNet.saver.restore(
self.sess,
self.params['classNN'])
else:
self.rewardClassNet.saver.restore(
self.sess,
tf.train.latest_checkpoint(self.params['classNN']))
# load previously trained model
if not self.params['resume'] and self.params['loadModel']:
if "ckpt" in self.params['loadModel']:
self.saver.restore(
self.sess,
self.params['loadModel'])
else:
self.saver.restore(
self.sess,
tf.train.latest_checkpoint(self.params['loadModel']))
printT("Model {} restored.".format(self.params['loadModel']))
# load previously filled replay buffer
if not self.params['resume'] and self.params['loadReplay'] is not None:
self.replay.load(self.params['loadReplay'])
printT("Buffer {} restored.".format(self.params['loadReplay']))
# resume old run
if self.params['resume']:
self.saver.restore(sess, tf.train.latest_checkpoint(
os.path.join(self.params['out_dir'], "models")))
printT("Model {} restored.".format(tf.train.latest_checkpoint(
os.path.join(self.params['out_dir'], "models"))))
# if not self.params['interEval'] :
self.replay.load(os.path.join(self.params['out_dir'],
"replayBuffer"))
printT("Buffer {} restored.".format(self.params['out_dir']))
else:
self.sess.run(self.resetGlStep)
# start immediately for interactive test runs
try:
if os.environ['IS_INTERACTIVE'] == 'true' \
and \
not self.params['sleep']:
self.params['startLearning'] = 1
except KeyError:
pass
# exploration variables
self.startEpsilon = self.params['epsilonStart']
self.endEpsilon = self.params['epsilonStop']
self.epsilon = sess.run(epsilonVar)
# evaluation/learning/exploration
self.evalEp = False
self.learning = True
self.pauseLearning = False
self.pauseExploring = False
self.stopLearning = False
self.stopExploring = False
self.qValFileExpl = open(os.path.join(self.params['out_dir'], "qValExpl.txt"), "a")
self.qValFileEval = open(os.path.join(self.params['out_dir'], "qValEval.txt"), "a")
self.actionLogFile = open(os.path.join(self.params['out_dir'], "actionLog.txt"), "a")
self.episodeLogFile = open(os.path.join(self.params['out_dir'], "episodeLog.txt"), "a")
self.episodeEvalLogFile = open(os.path.join(self.params['out_dir'], "episodeEvalLog.txt"), "a")
# remove stop/termination file
if os.path.exists("stop"):
os.remove(os.path.join(params['out_dir'], "stop"))
# reset
if self.params['onlyLearn']:
sess.run(self.resetEpStep)
sess.run(self.resetAcStep)
if self.params['onlyLearn']:
self.learn()
exit()
# multi-threaded
# learning and exploration threads act independently?
if self.params['async']:
t = threading.Thread(target=self.learnWrap)
t.daemon = True
t.start()
if self.params['evaluation']:
# evaluate this often
evalEpReward = 0
evalEpDiscReward = 0
evalEpStepCount = 0
evalIntv = 25
evalCnt = 40
evalOc = 0
# start exploration
self.episode = sess.run(self.episode_step)
if self.params['verbose']:
printT("start Episode: {}".format(self.episode))
acs = sess.run(self.action_step)
if self.params['verbose']:
printT("start action step: {}".format(acs))
self.globActStep = acs
gacs = sess.run(self.global_action_step)
if self.params['verbose']:
printT("start global action step: {}".format(gacs))
self.gac = gacs
while self.episode<self.params['numEpisodes']:
self.episode = sess.run(self.episode_step)
sess.run(self.increment_ep_step_op)
if self.params['verbose']:
print ("STARTING NEW EPISODE:"+ str(self.episode))
# do we want to explore/gather samples?
while self.stopExploring:
time.sleep(1)
# evaluation episode (no exploration?)
if self.params['evaluation'] and self.episode % (evalIntv+evalCnt) < evalCnt:
self.evalEp = True
if self.episode % (evalIntv+evalCnt) == 0:
if self.params['verbose']:
printT("Start Eval Episodes!")
evalOc += 1
elif self.params['onlyLearn'] or \
(self.params['limitExploring'] is not None \
and self.replay.size() >= self.params['limitExploring']):
self.pauseExploring = True
self.evalEp = False
else:
self.evalEp = False
# reset simulation/episode state
terminal = False
ep_reward = 0
ep_disc_reward = 0
ep_ave_max_q = 0
self.inEpStep = 0
if self.params['interEval']:
self.evalMethod = self.evalMethods[self.episode % (len(self.evalMethods))]
# reset environment
# set start state and allowed actions
nextState, allowedActions, terminal = self.env.reset(self.episode, self.evalEp, globActStep=self.globActStep)
allowedV=self.calcAllowedActionsVector(allowedActions)
if nextState is None:
# unable to get state
# restart with new episode
continue
lastTime=time.time()
# step forward until terminal
while not terminal:
if os.path.exists(os.path.join(params['out_dir'], "stop")):
self.terminate()
if self.params['async']:
if not t.isAlive():
printT("alive {}".format(t.isAlive()))
printT("Exception in user code:")
printT('-'*60)
traceback.print_exc(file=sys.stdout)
printT('-'*60)
sys.stdout.flush()
t.join(timeout=None)
os._exit(-1)
# state <- nextstate
state = nextState
# choose action
# random or according to dqn (depending on epsilon)
self.inEpStep += 1
if not self.evalEp:
sess.run(self.increment_ac_step_op)
self.globActStep += 1
sess.run(self.increment_gac_step_op)
self.gac += 1
epsStep=max(0,self.globActStep-(self.params['startLearning'] /4.0) )
tmp_step = min(epsStep, self.annealSteps)
self.epsilon = (self.startEpsilon - self.endEpsilon) * \
(1 - tmp_step / self.annealSteps) + \
self.endEpsilon
action = self.getActionID(state, allowedV)
if self.evalMethod=="fixed":
action=self.params['fixedAction']
# We choose a random action in these cases
rnm=np.random.rand()
if self.params['veryveryverbose']:
printT("rnm:"+str(rnm)+ " self.epsilon:"+ str(self.epsilon)+" |self.params['randomEps']:"+str(self.params['randomEps'])+" e:"+str(self.episode))
if (self.evalMethod == "random") or (not self.pauseExploring) and (not self.evalEp) and (self.episode < self.params['randomEps'] or rnm < self.epsilon):
if self.params['verbose']:
printT("randomly selecting action")
action = np.random.choice(allowedActions)
if self.params['verbose']:
printT("\nEpisode: {}, Step: {}, Time:{}, Next action (e-greedy {}): {}".format(
self.episode,
self.globActStep,
time.ctime(),
self.epsilon,
action))
else: # We let the DQN choose the action
if self.params['verbose']:
printT("Greedyly selecting action:")
if self.params['verbose']:
printT("\nEpisode: {}, Step: {}, Time:{}, Next action: {}".format(
self.episode, self.globActStep, time.ctime(), action))
# perform selected action and
# get new state, reward, and termination-info
nextState, reward, terminal, terminalP, allowedActions = self.env.act(action, self.episode, self.inEpStep , self.globActStep, self.evalEp)
if self.params['veryveryverbose']:
print('ACTIONLOG:',str(self.globActStep),str(self.episode), str(self.inEpStep), action, self.evalEp, terminal, terminalP, reward, self.epsilon, self.evalMethod)
self.actionLogFile.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(time.time(), str(self.globActStep),str(self.episode), str(self.inEpStep),
action, self.evalEp, terminal, terminalP, reward, self.epsilon, self.evalMethod))
self.actionLogFile.flush()
allowedV=self.calcAllowedActionsVector(allowedActions)
# accumulate episode reward
ep_disc_reward += pow(self.params['gamma'], self.inEpStep-1) * reward
ep_reward += reward
if (self.evalMethod == "agent") and not self.evalEp and not self.pauseExploring:
self.insertSamples(np.copy(state),
action, reward, terminal,
np.copy(nextState),
np.copy(allowedV))
# do logging inside of one episode
# we do not want to lose any data
if self.params['storeModel'] and \
((self.globActStep+1) % self.modelStoreIntv) == 0:
logDqn.logModel(self)
if self.params['storeBuffer'] and \
((self.globActStep+1) % self.bufferStoreIntv) == 0:
logDqn.logBuffer(self)
# if training/exploration not decoupled, do one learning step
if not self.params['async']:
for i in range(8):
self.learn()
sys.stdout.flush()
cTime=time.time()
usedTime=cTime-lastTime
# do we want to pause exploration thread?
# (to simulate slower stm)
if not self.pauseExploring and \
not self.evalEp and \
self.params['sleep'] and \
self.params['async'] and \
(self.replay.size() >= self.params['startLearning']) and \
(self.replay.size() >= self.params['miniBatchSize']):
if self.params['sleepA'] is not None:
sleepingTime=self.params['sleepA'] - usedTime
if sleepingTime >0:
time.sleep(sleepingTime)
else:
time.sleep(60)
cTime=time.time()
usedTime=cTime-lastTime
lastTime=cTime
self.mainLoopTimeFile.write(str(cTime)+" "+str(usedTime)+ "\n")
self.mainLoopTimeFile.flush()
# terminate episode after x steps
# even if no good state has been reached
if self.inEpStep == self.params['stepsTillTerm']:
self.env.switchApproachArea()
break
# end episode
# otherwise store episode summaries and print log
if self.evalEp:
evalEpReward += ep_reward
evalEpDiscReward += ep_disc_reward
evalEpStepCount += self.inEpStep
if self.episode % (evalIntv+evalCnt) == (evalCnt-1):
summary_str = self.sess.run(eval_sum_op, feed_dict={
eval_sum_vars[0]: evalEpReward/float(evalCnt),
eval_sum_vars[1]: evalEpDiscReward/float(evalCnt),
eval_sum_vars[2]: evalEpStepCount/float(evalCnt)
})
self.writer.add_summary(summary_str, evalOc-1)
evalEpReward = 0.0
evalEpDiscReward = 0.0
evalEpStepCount = 0.0
if self.params['veryveryverbose']:
printT("step count-eval: {}".format(self.inEpStep))
if self.params['veryverbose']:
printT('Time: {} | Reward: {} | Discounted Reward: {} | Eval-Episode {}'.
format(time.ctime(), ep_reward, ep_disc_reward, self.episode))
self.episodeEvalLogFile.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(time.time(), self.episode,
ep_reward, ep_disc_reward, self.inEpStep, self.epsilon))
self.episodeEvalLogFile.flush()
else:
if self.params['evaluation']:
et = self.episode - (evalOc * evalCnt)
else:
et = self.episode
summary_str = self.sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_disc_reward,
summary_vars[2]: ep_ave_max_q / float(max(self.inEpStep,1)),
summary_vars[3]: self.inEpStep,
summary_vars[4]: self.epsilon
})
self.writer.add_summary(summary_str, et)
self.writer.flush()
if self.params['veryveryverbose']:
printT("step count: {}".format(self.inEpStep))
if self.params['veryveryverbose']:
printT('Time: {} | Reward: {} | Discounted Reward: {} | Episode {} | Buffersize: {}'.
format(time.ctime(), ep_reward, ep_disc_reward, self.episode,
self.replay.size()))
self.episodeLogFile.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(time.time(), self.episode,
ep_reward, ep_disc_reward, self.inEpStep, self.epsilon, self.evalMethod))
self.episodeLogFile.flush()
# log some stuff
if self.params['storeModel'] and \
((self.episode+1) % self.modelStoreIntv) == 0:
logDqn.logModel(self)
if self.params['storeBuffer'] and \
((self.episode+1) % self.bufferStoreIntv) == 0:
logDqn.logBuffer(self)
statsIntv = 100
sys.stdout.flush()
# stop learning after last episode
self.learning = False
sys.stdout.flush()
def terminate(self):
printT("terminating...........")
sys.stdout.flush()
self.logStuff()
sys.stdout.flush()
printT("EXIT NOW!")
sys.stdout.flush()
exit(0)
def learnWrap(self):
try:
self.learn()
except:
printT("learn wrap failed")
printT("Exception in user code:")
printT('-'*60)
traceback.print_exc(file=sys.stdout)
printT('-'*60)
sys.stdout.flush()
os._exit(-1)
def learn(self):
y_batch = np.zeros((self.params['miniBatchSize'], 1))
tmp = np.zeros((self.params['miniBatchSize'], self.numActions))
lastTime=time.time()
count=0
while self.learning:
# Throtteling to allow the other thread a chance
count+=1
cTime=time.time()
loopTime=cTime-lastTime
lastTime=cTime
self.learnLoopTimeFile.write(str(cTime)+" "+str(loopTime)+ "\n")
self.learnLoopTimeFile.flush()
if self.stopLearning:
time.sleep(5.0)
continue
if self.replay.size() < self.params['startLearning'] or \
self.replay.size() < self.params['miniBatchSize'] or \
self.evalEp:
if self.params['async']:
time.sleep(5.0)
continue
else:
return
s_batch, a_batch, r_batch, t_batch, ns_batch, allowed_batch = \
self.replay.sample_batch(self.params['miniBatchSize'])
if self.params['doubleDQN']:
qValsNewState = self.estimate_ddqn(ns_batch, allowed_batch, p=False, mem=tmp)
else:
qValsNewState = self.predict_target_nn(ns_batch)
for i in range(self.params['miniBatchSize']):
if t_batch[i]:
y_batch[i] = r_batch[i]
else:
y_batch[i] = r_batch[i] + self.params['gamma'] * qValsNewState[i]
gS, qs, delta = self.update(s_batch, a_batch, y_batch)
if self.params['noHardResetDQN']:
self.update_targets()
elif (gS+1) % self.params['resetFreq'] == 0:
self.update_targets()
if not self.params['async']:
return
if self.params['onlyLearn']:
if (gS+1) % 1000 == 0:
logDqn.logModel(self)
# Returns vector of length 'self.numActions' containing
# Zeros for allowed actions
# '-inf' for forbidden actions
def calcAllowedActionsVector(self, allowedActions):
allowedV=np.zeros(shape=(self.numActions))
allowedV[:]=float("-inf") # init all actions as fobidden
for i in allowedActions:
allowedV[i]=0 # mark actions as allowed
return allowedV
# get action id for max q
def getActionID(self, state, allowedActionsV):
if self.params['interEval'] and self.evalMethod == 'agentB':
if self.params['verbose']:
print("PREDICTING WITH AGENTB:")
qs = self.qAgentB.run_predict(state)
print(qs)
else:
if self.params['verbose']:
print("PREDICTING WITH AGENT:")
qs = self.q.run_predict(state)
if self.evalEp:
self.qValFileEval.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(time.time(), str(self.globActStep),str(self.episode), str(self.inEpStep), qs[0], allowedActionsV))
self.qValFileEval.flush()
else:
self.qValFileExpl.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(time.time(), str(self.globActStep),str(self.episode), str(self.inEpStep), qs[0], allowedActionsV))
self.qValFileExpl.flush()
var_dict = {}
for a in range(self.numActions):
var_dict[self.action_vars[a]] = qs[0][a]
summary_str = self.sess.run(self.action_ops, feed_dict=var_dict)
self.writer.add_summary(summary_str, self.gac)
self.writer.flush()
printT("Q-values:" + str(qs))
qs = qs + allowedActionsV
return np.argmax(qs, axis=1)[0]
# update dqn main network
def update(self, states, actionIDs, targets):
step, out, delta, loss = self.q.run_train(states, actionIDs, targets)
# network diverged?
if np.isnan(loss):
printT("ABORT: NaN")
sys.stdout.flush()
os._exit(-1)
return step, out, delta
# update dqn target network
def update_targets(self):
self.q.run_update_target_nn()
# estimate q values using double dqn
# get values of target network for actions where main network is max
def estimate_ddqn(self, states, allowedActionsV, p=False, mem=None):
qs = self.q.run_predict(states)
if p:
if self.params['veryveryverbose']:
print("allowedActionsV.shape"+ str(allowedActionsV.shape))
print("qs.shape"+ str(qs.shape))
qs+=allowedActionsV # add '-inf' to the q values of forbidden actions
if p:
if self.params['veryveryverbose']:
print(states)
print(qs.shape)
print(states.shape)
printT("qs: {}".format(qs))
maxA = np.argmax(qs, axis=1)
qs = self.q.run_predict_target(states)
mem.fill(0)
mem[np.arange(maxA.size), maxA] = 1
mem = mem * qs
mem = np.sum(mem, axis=1)
return mem
# predict dqns
def predict_target_nn(self, states):
qs = self.q.run_predict_target(states)
return np.max(qs, axis=1)
def predict_nn(self, states):
qs = self.q.run_predict(states)
return np.max(qs, axis=1)
# insert samples into replay buffer
def insertSamples(self, stateScaled, action, reward, terminal,
newStateScaled, allowedActionsV):
stateScaled.shape = (stateScaled.shape[1],
stateScaled.shape[2],
stateScaled.shape[3])
newStateScaled.shape = (newStateScaled.shape[1],
newStateScaled.shape[2],
newStateScaled.shape[3])
states=(stateScaled,np.rot90(stateScaled, 2),np.fliplr(stateScaled), np.flipud(stateScaled) )
newStates=(newStateScaled,np.rot90(newStateScaled, 2),np.fliplr(newStateScaled), np.flipud(newStateScaled) )
if(self.params['fullAugmentation']):
self.lock.acquire()
for i in range(4):
for j in range(4):
self.replay.add(states[i], action, reward, terminal, allowedActionsV,
newStates[j])
self.lock.release()
else:
self.lock.acquire()
self.replay.add(stateScaled, action, reward, terminal, allowedActionsV,
newStateScaled)
self.replay.add(
np.ascontiguousarray(np.rot90(stateScaled, 2)),
action, reward, terminal, allowedActionsV,
np.ascontiguousarray(np.rot90(newStateScaled, 2)))
self.replay.add(
np.ascontiguousarray(np.fliplr(stateScaled)),
action, reward, terminal, allowedActionsV,
np.ascontiguousarray(np.fliplr(newStateScaled)))
self.replay.add(
np.ascontiguousarray(np.flipud(stateScaled)),
action, reward, terminal, allowedActionsV,
np.ascontiguousarray(np.flipud(newStateScaled)))
self.lock.release()
# if we want to stop if buffer is full
# or limit exploration
if self.pauseExploring == False and \
self.replay.size() == self.replayBufferSize:
if self.params['termAtFull']:
printT("Buffer FULL!")
self.logStuff()
self.pauseExploring = True
# exit()
elif self.pauseExploring == False and \
self.params['limitExploring'] is not None and \
self.replay.size() >= self.params['limitExploring']:
if self.params['termAtFull']:
printT("Buffer FULL!")
self.logStuff()
self.pauseExploring = True
def logStuff(self):
logDqn.logModel(self)
logDqn.logBuffer(self)
if __name__ == "__main__":
np.set_printoptions(linewidth=np.inf)
# load parameters from command line and config file
params = parseNNArgs.parseArgs()
if params['onlyLearn'] and \
not params['loadReplay'] and \
not params['loadModel']:
print("invalid parameters! onlyLearn only avaiable in combination with loadReplay and loadModel")
exit(-232)
params['type'] = "agent"
# resuming previous run?
if params['resume']:
out_dir = os.getcwd()
print("resuming... {}".format(out_dir))
newRun = False
else:
out_dir = outDir.setOutDir(params)
# copy all scripts to out_dir (for potential later reuse)
copy_tree(os.getcwd(), out_dir)
os.makedirs(os.path.join(out_dir, "models"))
os.makedirs(os.path.join(out_dir, "imgs"))
os.makedirs(os.path.join(out_dir, "imgsCollect"))
print("new start... {}".format(out_dir))
config = json.dumps(params)
with open(os.path.join(out_dir, "config"), 'w') as f:
f.write(config)
newRun = True
params['out_dir'] = out_dir
print("Results/Summaries/Logs will be written to: {}\n".format(out_dir))
#pipe log to file if not in interactive mode
interactive=False
try:
if os.environ['IS_INTERACTIVE'] == 'true':
interactive=True
except KeyError:
pass
if not interactive:
print("LogFile="+ os.path.join(out_dir, "log"))
sys.stdout.flush()
logFile = open(os.path.join(out_dir, "log"), 'a')
sys.stdout = sys.stderr = logFile
if params['startServer']:
p = Process(target=envServer.main, args=(params,))
p.start()
time.sleep(15)
# add paths to load classifier later on (reward calculation)
if params['classNN']:
if "ckpt" not in params['classNN']:
sys.path.insert(1, params['classNN'])
else:
sys.path.insert(1, os.path.dirname(params['classNN']))
try:
from classifierEval import ClassConvNetEval
except:
print("Failed to import form 'classifierEval.'")
print("Maybe the path to your classifier net is specified wrong?")
print(str(os.path.dirname(params['classNN'])))
exit(-1)
# start tensorflow session and start learning
if params['noGPU']:
tfconfig = tf.ConfigProto(
device_count = {'GPU': 0}
)
else:
tfconfig = None
if params['agentB'] is not None:
agentB_sess_ = tf.Session()
else:
agentB_sess_= None
with tf.Session(config=tfconfig) as sess:
rl = dqnRunner(sess, params, out_dir=out_dir, agentB_sess = agentB_sess_)
rl.run()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#-*- encoding: UTF-8 -*-
def main():
try:
nota1 = float(input("1ª nota: "))
nota2 = float(input("2ª nota: "))
except:
print("Apenas valores numéricos devem ser informados!")
if(nota1 < 0 or nota1 > 10 or nota2 < 0 or nota2 > 10):
print("Notas inválidas!")
else:
print(f"1ª Nota: {nota1}\n2ª Nota: {nota2}\nMédia aritmética simples: {(nota1 + nota2)/2}")
if(__name__ == "__main__"):
main()
|
nilq/baby-python
|
python
|
from . import util
ut = util.Util()
reload(util)
class ViewerMarlin():
def open_file(self, path):
with open(path) as f:
l = f.readlines()
# print(type(l))
# print(len(l))
# print(l)
return l
def get_value_move(self, str_):
### Split Elements
### Remove n
str_.replace("\n", "")
### Remove Comments
if ";" in str_:
str_rm_comment = str_.split(";")
new_str = str_rm_comment[0]
else:
new_str = str_
### Gcode (per Line)
# print(new_str)
### Split Space
elements = new_str.split()
### init
xx = None
yy = None
zz = None
ee = None
for i in xrange(len(elements)):
elm = elements[i]
### Get Value
if ("X" in elm):
tmp_x = elm.split("X")
xx = float(tmp_x[1])
elif ("Y" in elm):
tmp_y = elm.split("Y")
yy = float(tmp_y[1])
elif ("Z" in elm):
tmp_z = elm.split("Z")
zz = float(tmp_z[1])
elif ("E" in elm):
tmp_e = elm.split("E")
ee = float(tmp_e[1])
return [xx, yy, zz, ee]
def gcode_operate_move(self, gcode_line):
none_list = [None, None, None, None]
### Move
if ("G0" in gcode_line) or \
("G1" in gcode_line) or \
("G00" in gcode_line) or \
("G01" in gcode_line) or \
("G92 E0" in gcode_line):
### get position
return self.get_value_move(gcode_line)
### Commment Out
elif (";" in gcode_line[0]) or (gcode_line == "\n"):
return none_list
### Setting G
elif ("G4" in gcode_line) or \
("G04" in gcode_line) or \
("G21" in gcode_line) or \
("G28" in gcode_line) or \
("G90" in gcode_line) or \
("G91" in gcode_line) or \
("G92" in gcode_line):
return none_list
### Setting M
elif ("M82" in gcode_line) or \
("M84" in gcode_line) or \
("M104" in gcode_line) or \
("M106" in gcode_line) or \
("M107" in gcode_line) or \
("M109" in gcode_line) or \
("M140" in gcode_line) or \
("M190" in gcode_line) or \
("M204" in gcode_line) or \
("M205" in gcode_line):
return none_list
### Setting T
elif ("T0" in gcode_line) or \
("T1" in gcode_line):
return none_list
else:
# return none_list
return "bug!"
def gcode_to_array(self, path):
### open gcode
gcode = self.open_file(path)
### Get Vaules from gcode
values = []
for i in xrange(len(gcode)):
gcode_line = gcode[i]
### XYZE
elements = self.gcode_operate_move(gcode_line)
## DEBUG ALL
# print(i, gcode_line)
### DEBUG bug
if (elements == "bug!"):
print(i, gcode_line)
## DEBUG
values.append(elements)
### Padding Previous Value(None)
values_zip = ut.zip_matrix(values)
# print(len(values_zip))
new_values = []
for j in xrange(len(values_zip)):
list_ = values_zip[j]
list_pad = ut.padding_previous_value(list_)
new_values.append(list_pad)
gcode_values = ut.zip_matrix(new_values)
# print(len(values))
# print(len(gcode_values), len(gcode_values[0]))
return gcode_values
def segment_extrude(self, xyze):
### Segment Print / Travel
### https://docs.google.com/spreadsheets/d/1S4SQ-NT09Nh8sb3Lg6FSauKB1rZPMwLSDjvnrKerXFs/edit?usp=sharing
array_seg = []
list_seg = []
for j in xrange(len(xyze)):
xxx, yyy, zzz, eee = xyze[j]
item = [xxx, yyy, zzz]
# print(j)
# print(j, xyze[j])
### Index[0]
if (j == 0):
x1, y1, z1, e1 = xyze[j]
x2, y2, z2, e2 = xyze[j + 1]
bool_b = e1 < e2
if (bool_b == True):
list_seg = []
list_seg.append(item)
### Index[0] - Index[Last - 1]
elif (j > 0) and (j < (len(xyze) - 1)):
x0, y0, z0, e0 = xyze[j - 1]
x1, y1, z1, e1 = xyze[j]
x2, y2, z2, e2 = xyze[j + 1]
bool_a = e0 < e1
bool_b = e1 < e2
if (bool_a == False) and (bool_b == True):
list_seg = []
list_seg.append(item)
elif (bool_a == True) and (bool_b == True):
list_seg.append(item)
elif (bool_a == True) and (bool_b == False):
list_seg.append(item)
array_seg.append(list_seg)
elif (bool_a == False) and (bool_b == False):
pass
else:
print("Error!!")
### Index[Last]
elif (j == (len(xyze) - 1)):
x0, y0, z0, e0 = xyze[j - 1]
x1, y1, z1, e1 = xyze[j]
bool_a = e0 < e1
if (bool_a == True):
list_seg.append(item)
array_seg.append(list_seg)
# print(array_out)
return array_seg
def remove_invalid_polylines(self, array_seg):
### Remove Invalid Polylines (Remove Same Element as the Previous One)
layers = []
for k in xrange(len(array_seg)):
tmp_layer = array_seg[k]
tmp_removed = ut.remove_previous_elements(tmp_layer)
if len(tmp_removed) != 1:
layers.append(tmp_removed)
return layers
def draw_path(self, values_4):
### Remove Same Element as the Previous One
xyze = ut.remove_previous_elements(values_4)
### print(len(values_4), len(xyze))
### Segment Print / Travel
array_seg = self.segment_extrude(xyze)
### Remove Invalid Polylines (Remove Same Element as the Previous One)
layers = self.remove_invalid_polylines(array_seg)
"""
### Draw All Path
pts = []
for i in xrange(len(xyze)):
x, y, z, e = values_4[i]
pt = [x, y, z]
pts.append(pt)
"""
return layers
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of tofbot, a friendly IRC bot.
# You may redistribute it under the Simplified BSD License.
# If we meet some day, and you think this stuff is worth it,
# you can buy us a beer in return.
#
# Copyright (c) 2011,2015 Etienne Millon <etienne.millon@gmail.com>
# Martin Kirchgessner <martin.kirch@gmail.com>
# Nicolas Dumazet <nicdumz.commits@gmail.com>
# Quentin Sabah <quentin.sabah@gmail.com>
# Christophe-Marie Duquesne <chm.duquesne@gmail.com>
"""
./bot.py [options] [legacy-arguments]
Legacy-arguments:
NICK CHANNEL [CHANNEL...]
Don't prepend a # to chan names
Tofbot will connect to freenode.net
"""
from datetime import datetime
from irc import Bot
import time
import random
import sys
import os
import plugins
import types
from toflib import *
from toflib import _simple_dispatch, _simple_conf_dispatch, urls_in
import re
from optparse import OptionParser
import json
import atexit
import socket
import traceback
import plugins.euler
import plugins.lolrate
import plugins.donnezmoi
import plugins.jokes
import plugins.twitter
import plugins.dassin
import plugins.eightball
import plugins.sed
import plugins.rick
import plugins.expand
import plugins.like
import plugins.ponce
import plugins.lag
random.seed()
class AutosaveEvent(CronEvent):
def __init__(self, bot, filename):
CronEvent.__init__(self, None)
self.filename = filename
self.bot = bot
def fire(self):
self.bot.save(self.filename)
class Tofbot(Bot):
# Those attributes are published and can be changed by irc users
# value is a str to object converter. It could do sanitization:
# if value is incorrect, raise ValueError
_mutable_attributes = {
"TGtime":int,
"memoryDepth":int
}
def __init__(self, nick=None, name=None, channels=None, password=None, debug=True):
Bot.__init__(self, nick, name, channels, password)
self.joined = False
self.autoTofadeThreshold = 98
self.riddleMaxDist = 2
self.debug = debug
self.TGtime = 5
self.pings = {}
self.memoryDepth = 20
self.lolRateDepth = 8
self.msgMemory = []
self.cron = Cron()
self.plugins = self.load_plugins()
self.startMsgs = []
self.msgHandled = False
def run(self, host=None):
if host == None and not hasattr(self,'host'):
raise Exception("run: no host set or given")
if self.nick == None:
raise Exception("run: no nick set")
if self.name == None:
raise Exception("run: no name set")
self.host = host or self.host
Bot.run(self, self.host)
def load_plugins(self):
d = os.path.dirname(__file__)
plugindir = os.path.join(d, 'plugins')
plugin_instances = {}
for m in dir(plugins):
if type(getattr(plugins,m)) != types.ModuleType:
continue
plugin = getattr(plugins, m)
for n in dir(plugin):
c = getattr(plugin, n)
if type(c) not in [types.ClassType, types.TypeType]:
continue
name = c.__name__
if name.startswith('Plugin'):
instance = c(self)
plugin_name = name[6:].lower()
plugin_instances[plugin_name] = instance
return plugin_instances
# line-feed-safe
def msg(self, chan, msg):
self.msgHandled = True
for m in msg.split("\n"):
Bot.msg(self, chan, m)
def log(self, msg):
if self.debug:
print(msg)
def try_join(self, args):
if (args[0] in ['End of /MOTD command.',
"This server was created ... I don't know"]
):
for chan in self.channels:
self.write(('JOIN', chan))
self.joined = True
def dispatch(self, origin, args):
self.log("o=%s n=%s a=%s" % (origin.sender, origin.nick, args))
is_config = False
senderNick = origin.nick
commandType = args[1]
# if command type is 'BOTCONFIG', bypass the try_join
# because we are configuring the bot before any
# connection.
if commandType != 'BOTCONFIG':
if not self.joined:
self.try_join(args)
return
else:
is_config = 1
args.remove('BOTCONFIG')
commandType = args[1]
if commandType == 'JOIN':
for m in self.startMsgs:
self.msg(self.channels[0], m)
self.startMsgs = []
for p in self.plugins.values():
p.on_join(args[0], senderNick)
elif commandType == 'KICK' and args[3] == self.nick:
reason = args[0]
chan = args[2]
self.write(('JOIN', chan))
for p in self.plugins.values():
p.on_kick(chan, reason)
elif commandType == 'PRIVMSG':
msg_text = args[0]
msg = msg_text.split(" ")
cmd = msg[0]
chan = args[2]
self.pings[senderNick] = datetime.now()
if is_config == False:
self.cron.tick()
if len(cmd) == 0:
return
urls = urls_in(msg_text)
self.msgHandled = False
# We only allow one plugin to answer, so we trigger them
# in random order
for p in self.plugins.values():
if not self.msgHandled:
p.handle_msg(msg_text, chan, senderNick)
for url in urls:
p.on_url(url)
if chan == self.channels[0] and cmd[0] != '!':
self.msgMemory.append("<" + senderNick + "> " + msg_text)
if len(self.msgMemory) > self.memoryDepth:
del self.msgMemory[0]
if len(cmd) == 0 or cmd[0] != '!':
return
cmd = cmd[1:]
chan = None
if len(self.channels) == 0:
chan = 'config'
else:
chan = self.channels[0]
if cmd in _simple_dispatch:
act = self.find_cmd_action("cmd_" + cmd)
act(chan, msg[1:], senderNick)
elif is_config and (cmd in _simple_conf_dispatch):
act = self.find_cmd_action("confcmd_" + cmd)
act(chan, msg[1:], senderNick)
elif cmd == 'context':
self.send_context(senderNick)
elif cmd == 'help':
self.send_help(senderNick)
elif commandType == 'PING':
self.log('PING received in bot.py')
elif commandType == 'ERROR':
traceback.print_exc(file=sys.stdout)
else: # Unknown command type
self.log('Unknown command type : %s' % commandType)
def find_cmd_action(self, cmd_name):
targets = self.plugins.values()
targets.insert(0, self)
for t in targets:
if (hasattr(t, cmd_name)):
action = getattr(t, cmd_name)
return action
def nop(self, chan, args):
pass
return nop
def safe_getattr(self, key):
if key not in self._mutable_attributes:
return None
if not hasattr(self, key):
return "(None)"
else:
return str(getattr(self, key))
def safe_setattr(self, key, value):
try:
converter = self._mutable_attributes.get(key)
if converter is None:
return False
value = converter(value)
setattr(self, key, value)
return True
except ValueError:
pass
@confcmd(1)
def confcmd_chan(self, chan, args):
new_chan = args[0]
if self.channels.count(new_chan) == 0:
self.channels.append(new_chan)
@confcmd(1)
def confcmd_server(self, chan, args):
host = args[0].strip()
self.host = host
@confcmd(1)
def confcmd_port(self, chan, args):
port = int(args[0].strip())
self.port = port
@confcmd(1)
def confcmd_nick(self, chan, args):
nick = args[0].strip()
self.nick = nick
self.user = nick
@confcmd(1)
def confcmd_name(self, chan, args):
name = args[0].strip()
self.name = name
@confcmd(1)
def confcmd_loadchanges(self, chan, args):
filename = args[0].strip()
if not os.path.exists(filename):
return
with open(filename) as f:
changes = f.readlines()
self.startMsgs += changes
@cmd(1)
def cmd_ping(self, chan, args):
"Find when X was last online"
who = args[0]
if who in self.pings:
self.msg(chan,
"Last message from %s was on %s (btw my local time is %s)" %
(who, self.pings[who].__str__(), datetime.now().__str__() ))
else:
self.msg(chan, "I havn't seen any message from " + who)
@cmd(1)
def cmd_get(self, chan, args):
"Retrieve a configuration variable's value"
key = args[0]
value = self.safe_getattr(key)
if value is None:
self.msg(chan, "Ne touche pas à mes parties privées !")
else:
self.msg(chan, "%s = %s" % (key, value))
@cmd(2)
def cmd_set(self, chan, args):
"Set a configuration variable's value"
key = args[0]
value = args[1]
ok = self.safe_setattr(key, value)
if not ok:
self.msg(chan, "N'écris pas sur mes parties privées !")
def send_context(self, to):
"Gives you last messages from the channel"
intro = "Last " + str(len(self.msgMemory)) + " messages sent on " + self.channels[0] + " :"
self.msg(to, intro)
for msg in self.msgMemory:
self.msg(to, msg)
def send_help(self, to):
"Show this help message"
maxlen = 1 + max(map(len, _simple_dispatch))
self.msg(to, "Commands should be entered in the channel or by private message")
self.msg(to, '%*s - %s' % (maxlen, "!help", self.send_help.__doc__))
self.msg(to, '%*s - %s' % (maxlen, "!context", self.send_context.__doc__))
for cmd in _simple_dispatch:
f = self.find_cmd_action("cmd_" + cmd)
self.msg(to, '%*s - %s' % (maxlen, "!"+cmd, f.__doc__))
self.msg(to, "you can also !get or !set " + ", ".join(self._mutable_attributes.keys()))
self.msg(to, "If random-tofades are boring you, enter 'TG " + self.nick + "' (but can be cancelled by GG " + self.nick + ")")
def load(self, filename):
try:
with open(filename) as f:
state = json.load(f)
if state['version'] != 1:
return False
for name, plugin_state in state['plugins'].items():
try:
plugin = self.plugins[name]
plugin.load(plugin_state)
except KeyError:
pass
except IOError as e:
print "Can't load state. Error: ", e
def save(self, filename):
try:
with open(filename, 'w') as f:
state = { 'version': 1
, 'plugins': {}
}
for name, plugin in self.plugins.items():
plugin_state = plugin.save()
state['plugins'][name] = plugin_state
json.dump(state, indent=4, fp=f)
except IOError as e:
print "Can't save state. Error: ", e
def __main():
class FakeOrigin:
pass
def bot_config(b, cmd):
o = FakeOrigin
o.sender = 'bot_config'
o.nick = 'bot_config'
b.dispatch(o, [cmd.strip(), 'BOTCONFIG','PRIVMSG','#bot_config'])
# default timeout for urllib2, in seconds
socket.setdefaulttimeout(15)
# option parser
parser = OptionParser(__doc__)
parser.add_option("-x","--execute", dest="cmds",action="append",help="File to execute prior connection. Can be used several times.")
parser.add_option("-s","--host", dest="host",help="IRC server hostname")
parser.add_option("-p","--port", dest="port",help="IRC server port")
parser.add_option("-k","--nick", dest="nick",help="Bot nickname",default='Tofbot')
parser.add_option("-n","--name", dest="name",help="Bot name",default='Tofbot')
parser.add_option("-c","--channel",dest="channel",action="append",help="Channel to join (without # prefix). Can be used several times.")
parser.add_option("--password", dest="password")
parser.add_option("-d","--debug", action="store_true", dest="debug", default=False)
(options,args) = parser.parse_args();
# legacy arguments handled first
# (new-style arguments prevail)
if len(args) > 0:
options.nick = options.nick or args[0]
options.channel = options.channel or []
for chan in args[1:]:
if options.channel.count(chan) == 0:
options.channel.append(chan)
# initialize Tofbot
# using command-line arguments
b = Tofbot(options.nick, options.name, options.channel, options.password, options.debug)
# execute command files
# these commands may override command-line arguments
options.cmds = options.cmds or []
for filename in options.cmds:
cmdsfile = open(filename,'r')
for line in cmdsfile:
bot_config(b, line)
# Restore serialized data
state_file = "state.json"
b.load(state_file)
# Perform auto-save periodically
autosaveEvent = AutosaveEvent(b, state_file)
b.cron.schedule(autosaveEvent)
# ... and save at exit
@atexit.register
def save_atexit():
print("Exiting, saving state...")
b.save(state_file)
print("Done !")
# default host when legacy-mode
if options.host == None and len(options.cmds) == 0 and len(args) > 0:
options.host = 'irc.freenode.net'
b.run(options.host)
if __name__ == "__main__":
try:
__main()
except Exception, ex:
import traceback
dumpFile = open("_TOFDUMP.txt","w")
traceback.print_exc(None, dumpFile)
dumpFile.close()
raise ex
|
nilq/baby-python
|
python
|
# Copyright 2016 Peter Dymkar Brandt All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
PortfolioReport generates visualizations of past performance of a portfolio of
financial instruments.
Example:
# See historical_data documentation for more info.
data = historical_data.HistoricalData(historical_data_config,
tor_scraper_config)
daily = data.get_daily()
if daily is None:
return
print portfolio_report.PortfolioReport({
'subject_format': 'Portfolio Report -- {}',
}, daily).get_report()
"""
import io
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import PIL
import plot_utils
class PortfolioReport(object):
"""Contains all functionality for the portfolio_report module.
"""
_FILENAME = 'report.png'
_STYLE_SHEET = 'ggplot'
_TEXT_COLOR = (.3, .3, .3, 1.0)
_BAR_ALPHA = .67
_TITLE_DOLLAR_FORMAT = '${:,.2f}'
_REPORT_COLS = 2
def __init__(self, portfolio_report_config, daily):
"""PortfolioReport must be initialized with args similar to those shown
in the example at the top of this file.
Args:
portfolio_report_config: Determines the behavior of this instance.
daily: pandas.DataFrame of prices of the same type returned by
historical_data.get_daily(). Rows represent dates in ascending
order, and columns represent financial instruments.
"""
self._config = portfolio_report_config
self._daily = daily
def _get_percent_returns(self, cumulative=False):
"""Calculate percent returns for the entire time period, either
cumulative from the beginning or separately for each day.
"""
if cumulative is True:
return self._daily['adj_close'] / (
self._daily['adj_close'].ix[0, :]) - 1.0
else:
return self._daily['adj_close'].pct_change()
def _get_dollar_values(self, group=False):
"""Calculate the value of portfolio holdings using closing prices.
Optionally aggregate the values into groups provided in config.
"""
dates = sorted(self._config['dates'])
# Copy dataframe and zero data before earliest portfolio date.
dollar_values = self._daily['close'].copy()
dollar_values.ix[
dollar_values.index < pd.to_datetime(str(dates[0])), :] = 0.0
# Loop thru dates and calculate each date range using bitmask index.
for i, item in enumerate(dates):
index = dollar_values.index >= pd.to_datetime(str(item))
if i < (len(dates) - 1):
index = index & (
dollar_values.index < pd.to_datetime(str(dates[i + 1])))
for key in list(dollar_values.columns.values):
value = self._config['dates'][item]['symbols'].get(key)
if value is None:
dollar_values.ix[index, key] = 0.0
else:
dollar_values.ix[index, key] *= value * self._config[
'value_ratio']
if group is True:
dollar_values = self._sum_symbol_groups(dollar_values)
return dollar_values
def _get_dollar_returns(self, group=False):
"""Calculate the dollar returns for portfolio holdings. Optionally
aggregate the returns into groups provided in config.
"""
dollar_values = self._get_dollar_values()
percent_returns = self._get_percent_returns()
dollar_returns = dollar_values * percent_returns
if group is True:
dollar_returns = self._sum_symbol_groups(dollar_returns)
return dollar_returns
def _get_profit_and_loss(self):
"""Calculate the profit and loss of the portfolio over time.
"""
profit_and_loss = self._get_dollar_values().sum(1)
dates = sorted(self._config['dates'])
# Correct spike on first portfolio date.
first_date = np.argmax(
profit_and_loss.index >= pd.to_datetime(str(dates[0])))
profit_and_loss.ix[first_date:] -= profit_and_loss.ix[first_date]
# Adjust for capital changes.
for i, item in enumerate(dates):
if i > 0:
index = profit_and_loss.index >= pd.to_datetime(str(item))
profit_and_loss.ix[index] -= self._config[
'dates'][item]['capital_change'] * self._config[
'value_ratio']
return profit_and_loss
def _sum_symbol_groups(self, data_frame):
"""Sum columns of dataframe using symbol_groups in config.
"""
sum_data_frame = pd.DataFrame()
for key, value in sorted(self._config['symbol_groups'].iteritems()):
sum_data_frame[key] = data_frame[value].sum(1)
return sum_data_frame
def plot_dollar_change_bars(self, group=False):
"""Plot the change in dollars for the most recent day as a bar plot.
Args:
group: Whether to aggregate based on symbol_groups in config.
"""
dollar_values = self._get_dollar_values(group).ix[-1, :]
dollar_returns = self._get_dollar_returns(group).ix[-1, :]
percent_returns = dollar_returns / dollar_values
labels = plot_utils.get_percent_strings(percent_returns)
bar_colors = plot_utils.get_conditional_colors(
percent_returns, self._BAR_ALPHA)
title = ('1-Day Change | ' + self._TITLE_DOLLAR_FORMAT + (
'\n')).format(np.sum(dollar_returns))
plot = dollar_returns.plot(kind='bar', color=bar_colors)
plot.set_title(title, color=self._TEXT_COLOR)
plot.set_xticklabels(dollar_returns.index, rotation=0)
plot_utils.format_y_ticks_as_dollars(plot)
plot_utils.add_bar_labels(plot, labels, self._TEXT_COLOR)
return plot
def plot_percent_return_lines(self):
"""Plot percent returns for each symbol for the entire time period as a
line plot.
"""
percent_returns = self._get_percent_returns(True)
title = 'Symbol Returns\n'
plot = percent_returns.plot(kind='line', ax=plt.gca())
plot.set_title(title, color=self._TEXT_COLOR)
plot_utils.format_x_ticks_as_dates(plot)
plot_utils.format_y_ticks_as_percents(plot)
plot_utils.format_legend(plot, self._TEXT_COLOR)
return plot
def plot_dollar_value_bars(self, group=False):
"""Plot the dollar value of portfolio holdings for the most recent day
as a bar plot.
Args:
group: Whether to aggregate based on symbol_groups in config.
"""
dollar_values = self._get_dollar_values(group).ix[-1, :]
percents = dollar_values / np.sum(dollar_values)
labels = plot_utils.get_percent_strings(percents)
title = 'Portfolio Weights\n'
plot = dollar_values.plot(kind='bar', alpha=self._BAR_ALPHA)
plot.set_title(title, color=self._TEXT_COLOR)
plot.set_xticklabels(dollar_values.index, rotation=0)
plot_utils.format_y_ticks_as_dollars(plot)
plot_utils.add_bar_labels(plot, labels, self._TEXT_COLOR)
return plot
def plot_dollar_value_lines(self, group=False):
"""Plot the dollar value of portfolio holdings for the entire time
period as a line plot.
Args:
group: Whether to aggregate based on symbol_groups in config.
"""
dollar_values = self._get_dollar_values(group)
dollar_values['TOTAL'] = dollar_values.sum(1)
title = ('Portfolio Value | ' + self._TITLE_DOLLAR_FORMAT + (
'\n')).format(dollar_values['TOTAL'].ix[-1])
plot = dollar_values.plot(kind='line', ax=plt.gca())
plot.set_title(title, color=self._TEXT_COLOR)
plot_utils.format_x_ticks_as_dates(plot)
plot_utils.format_y_ticks_as_dollars(plot)
plot_utils.format_legend(plot, self._TEXT_COLOR)
return plot
def plot_profit_and_loss_lines(self):
"""Plot the profit and loss of the portfolio for the entire time period
as a line plot.
Args:
group: Whether to aggregate based on symbol_groups in config.
"""
profit_and_loss = self._get_profit_and_loss()
title = ('Cumulative P&L | ' + self._TITLE_DOLLAR_FORMAT + (
'\n')).format(profit_and_loss[-1])
plot = profit_and_loss.plot(kind='line', ax=plt.gca())
plot.set_title(title, color=self._TEXT_COLOR)
plot_utils.format_x_ticks_as_dates(plot)
plot_utils.format_y_ticks_as_dollars(plot)
return plot
def get_report(self):
"""Creates the entire report composed of individual plots.
"""
subject = self._config['subject_format'].format(str(
self._daily['adj_close'].index[-1].date()))
plain_body = ''
plt.style.use(self._STYLE_SHEET)
# Create list of plot images to include in the report image.
plot_images = []
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_change_bars, group=True))
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_change_bars))
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_value_bars, group=True))
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_value_bars))
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_value_lines, group=True))
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_value_lines))
plot_images.append(plot_utils.get_plot_image(
self.plot_profit_and_loss_lines))
plot_images.append(plot_utils.get_plot_image(
self.plot_percent_return_lines))
plot_images = [PIL.Image.open(x) for x in plot_images]
# Arrange plot images in a grid in the report image.
plot_width = plot_images[0].size[0]
plot_height = plot_images[0].size[1]
report_image = PIL.Image.new('RGB', (
plot_width * self._REPORT_COLS, plot_height * int(
np.ceil(len(plot_images) / self._REPORT_COLS))), 'white')
for i, item in enumerate(plot_images):
report_image.paste(item, ((i % self._REPORT_COLS) * plot_width, int(
np.floor(i / self._REPORT_COLS)) * plot_height))
# Convert report image to bytes in PNG format.
report_image_bytes = io.BytesIO()
report_image.save(report_image_bytes, format='png')
report_image_bytes.seek(0)
return {'subject': subject,
'plain_body': plain_body,
'files': {self._FILENAME: report_image_bytes}}
|
nilq/baby-python
|
python
|
from numpy import absolute, isnan, where
from scipy.spatial.distance import correlation
def compute_correlation_distance(x, y):
correlation_distance = correlation(x, y)
if isnan(correlation_distance):
return 2
else:
return where(absolute(correlation_distance) < 1e-8, 0, correlation_distance)
|
nilq/baby-python
|
python
|
# This sample tests the case where a subclass of Dict uses
# a dictionary literal as an argument to the constructor call.
from collections import Counter, defaultdict
from typing import Callable, Generic, Mapping, Optional, TypeVar
c1 = Counter({0, 1})
reveal_type(c1, expected_text="Counter[int]")
for i in range(256):
c1 = Counter({0: c1[1]})
reveal_type(c1, expected_text="Counter[int]")
reveal_type(c1, expected_text="Counter[int]")
K = TypeVar("K")
V = TypeVar("V")
MyFuncType = Callable[[Callable[[K], V]], V]
class MyFunc(Generic[K, V]):
def __init__(self, g: MyFuncType[K, V]) -> None:
self.g = g
MyFuncMapping = Mapping[K, Optional[MyFunc[K, V]]]
my_func_defaultdict: MyFuncMapping[str, int] = defaultdict(
lambda: None, {"x": MyFunc(lambda f: f("a"))}
)
|
nilq/baby-python
|
python
|
# Sequência dos termos numéricos de uma função arbitrária.
# Printa a sequência dos termos da função X^2 até um termo escolhido.
n = int(input())
for i in range(0,n):
print(i*i)
i = i+1
# Printa a sequência dos termos da função X^3 até um termo escolhido
y = int(input())
for i in range (0,y):
print (i*i*i)
i = i+1
#Este código pode se repetir de forma genérica para todos os expoentes possíveis da função print
# Como escolher a quantidade de vezes que a função Print deveria exponenciar o argumento X^k ?
# Sendo X a base e K o expoente de valor inteiro.
|
nilq/baby-python
|
python
|
from bisect import bisect
from contextlib import closing, contextmanager
from itertools import accumulate, chain, islice, zip_longest
from multiprocessing import Lock, RawValue, Process
from os import cpu_count
from re import sub
from sys import argv, stdout
output_file = open("bench_output-fasta_bg.txt", mode="wb", buffering=0)
write = output_file.write
def acquired_lock():
lock = Lock()
lock.acquire()
return lock
def started_process(target, args):
process = Process(target=target, args=args)
process.start()
return process
@contextmanager
def lock_pair(pre_lock=None, post_lock=None, locks=None):
pre, post = locks if locks else (pre_lock, post_lock)
if pre:
pre.acquire()
yield
if post:
post.release()
def write_lines(
sequence, n, width, lines_per_block=10000, newline=b'\n', table=None):
i = 0
blocks = (n - width) // width // lines_per_block
if blocks:
for _ in range(blocks):
output = bytearray()
for i in range(i, i + width * lines_per_block, width):
output += sequence[i:i + width] + newline
else:
i += width
if table:
write(output.translate(table))
else:
write(output)
output = bytearray()
if i < n - width:
for i in range(i, n - width, width):
output += sequence[i:i + width] + newline
else:
i += width
output += sequence[i:n] + newline
if table:
write(output.translate(table))
else:
write(output)
stdout.buffer.flush()
def cumulative_probabilities(alphabet, factor=1.0):
probabilities = tuple(accumulate(p * factor for _, p in alphabet))
table = bytearray.maketrans(
bytes(chain(range(len(alphabet)), [255])),
bytes(chain((ord(c) for c, _ in alphabet), [10]))
)
return probabilities, table
def copy_from_sequence(header, sequence, n, width, locks=None):
sequence = bytearray(sequence, encoding='utf8')
while len(sequence) < n:
sequence.extend(sequence)
with lock_pair(locks=locks):
write(header)
write_lines(sequence, n, width)
def lcg(seed, im, ia, ic):
local_seed = seed.value
try:
while True:
local_seed = (local_seed * ia + ic) % im
yield local_seed
finally:
seed.value = local_seed
def lookup(probabilities, values):
for value in values:
yield bisect(probabilities, value)
def lcg_lookup_slow(probabilities, seed, im, ia, ic):
with closing(lcg(seed, im, ia, ic)) as prng:
yield from lookup(probabilities, prng)
def lcg_lookup_fast(probabilities, seed, im, ia, ic):
local_seed = seed.value
try:
while True:
local_seed = (local_seed * ia + ic) % im
yield bisect(probabilities, local_seed)
finally:
seed.value = local_seed
def lookup_and_write(
header, probabilities, table, values, start, stop, width, locks=None):
if isinstance(values, bytearray):
output = values
else:
output = bytearray()
output[:stop - start] = lookup(probabilities, values)
with lock_pair(locks=locks):
if start == 0:
write(header)
write_lines(output, len(output), width, newline=b'\xff', table=table)
def random_selection(header, alphabet, n, width, seed, locks=None):
im = 139968.0
ia = 3877.0
ic = 29573.0
probabilities, table = cumulative_probabilities(alphabet, im)
if not locks:
with closing(lcg_lookup_fast(probabilities, seed, im, ia, ic)) as prng:
output = bytearray(islice(prng, n))
lookup_and_write(header, probabilities, table, output, 0, n, width)
else:
pre_seed, post_seed, pre_write, post_write = locks
m = cpu_count() * 3 if n > width * 15 else 1
partitions = [n // (width * m) * width * i for i in range(1, m)]
processes = []
pre = pre_write
with lock_pair(locks=(pre_seed, post_seed)):
with closing(lcg(seed, im, ia, ic)) as prng:
for start, stop in zip([0] + partitions, partitions + [n]):
values = list(islice(prng, stop - start))
post = acquired_lock() if stop < n else post_write
processes.append(started_process(
lookup_and_write,
(header, probabilities, table, values,
start, stop, width, (pre, post))
))
pre = post
for p in processes:
p.join()
def fasta(n):
alu = sub(r'\s+', '', """
GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA
TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT
AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG
GCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG
CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA
""")
iub = list(zip_longest('acgtBDHKMNRSVWY',
(.27, .12, .12, .27), fillvalue=.02))
homosapiens = list(zip('acgt', (0.3029549426680, 0.1979883004921,
0.1975473066391, 0.3015094502008)))
seed = RawValue('f', 42)
width = 60
tasks = [
(copy_from_sequence,
[b'>ONE Homo sapiens alu\n', alu, n * 2, width]),
(random_selection,
[b'>TWO IUB ambiguity codes\n', iub, n * 3, width, seed]),
(random_selection,
[b'>THREE Homo sapiens frequency\n', homosapiens, n * 5, width, seed]),
]
if cpu_count() < 2:
for func, args in tasks:
func(*args)
else:
written_1 = acquired_lock()
seeded_2 = acquired_lock()
written_2 = acquired_lock()
locks_sets = [
(None, written_1),
(None, seeded_2, written_1, written_2),
(seeded_2, None, written_2, None),
]
processes = [
started_process(target, args + [locks_sets[i]])
for i, (target, args) in enumerate(tasks)
]
for p in processes:
p.join()
output_file.close()
if __name__ == "__main__":
if len(argv) > 1:
fasta(int(argv[1]))
else:
fasta(1000000)
|
nilq/baby-python
|
python
|
# ______ _ _ _ _ _ _ _
# | ___ \ | | | | (_) (_) | | (_)
# | |_/ / __ ___ | |__ __ _| |__ _| |_ ___| |_ _ ___
# | __/ '__/ _ \| '_ \ / _` | '_ \| | | / __| __| |/ __|
# | | | | | (_) | |_) | (_| | |_) | | | \__ \ |_| | (__
# \_| |_| \___/|_.__/ \__,_|_.__/|_|_|_|___/\__|_|\___|
# ___ ___ _ _
# | \/ | | | (_)
# | . . | ___ ___| |__ __ _ _ __ _ ___ ___
# | |\/| |/ _ \/ __| '_ \ / _` | '_ \| |/ __/ __|
# | | | | __/ (__| | | | (_| | | | | | (__\__ \
# \_| |_/\___|\___|_| |_|\__,_|_| |_|_|\___|___/
# _ _ _
# | | | | | |
# | | __ _| |__ ___ _ __ __ _| |_ ___ _ __ _ _
# | | / _` | '_ \ / _ \| '__/ _` | __/ _ \| '__| | | |
# | |___| (_| | |_) | (_) | | | (_| | || (_) | | | |_| |
# \_____/\__,_|_.__/ \___/|_| \__,_|\__\___/|_| \__, |
# __/ |
# |___/
#
# MIT License
#
# Copyright (c) 2019 Probabilistic Mechanics Laboratory
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
""" Custom layers """
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.keras.constraints import MinMaxNorm
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import constraints
from tensorflow.python.framework import tensor_shape
class DOrC(Layer):
""" Discrete ordinal classifier layer
"""
def __init__(self,
kernel_initializer = 'glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(DOrC, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
def build(self, input_shape, **kwargs):
self.threshold1 = self.add_weight("threshold1",
shape = [1],
initializer = self.kernel_initializer,
constraint = MinMaxNorm(min_value=0.0, max_value=0.3, rate=1.0),
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.threshold2 = self.add_weight("threshold2",
shape = [1],
initializer = self.kernel_initializer,
constraint = MinMaxNorm(min_value=0.2, max_value=0.5, rate=1.0),
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.threshold3 = self.add_weight("threshold3",
shape = [1],
initializer = self.kernel_initializer,
constraint = MinMaxNorm(min_value=0.4, max_value=0.8, rate=1.0),
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.threshold4 = self.add_weight("threshold4",
shape = [1],
initializer = self.kernel_initializer,
constraint = MinMaxNorm(min_value=0.8, max_value=2.0, rate=1.0),
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.built = True
def call(self, inputs):
first_threshold = 1/(1 + gen_math_ops.exp(-5e1*(inputs-self.threshold1)))
second_threshold = 1/(1 + gen_math_ops.exp(-5e1*(inputs*first_threshold-self.threshold2)))
third_threshold = 1/(1 + gen_math_ops.exp(-5e1*(inputs*second_threshold-self.threshold3)))
fourth_threshold = 1/(1 + gen_math_ops.exp(-5e1*(inputs*third_threshold-self.threshold4)))
output = 1 + first_threshold +second_threshold + third_threshold + fourth_threshold
return output
def compute_output_shape(self, input_shape):
aux_shape = tensor_shape.TensorShape((None,1))
return aux_shape[:-1].concatenate(1)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
drift - Logging setup code
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Set up logging based on config dict.
"""
from __future__ import absolute_import
import os
import logging
from logging.handlers import SysLogHandler
import logging.config
import json
import datetime
import sys
import time
import uuid
from socket import gethostname
from collections import OrderedDict
from functools import wraps
from logstash_formatter import LogstashFormatterV1
import six
from six.moves.urllib.parse import urlsplit
from flask import g, request
from drift.core.extensions.jwt import current_user
from drift.utils import get_tier_name
def get_stream_handler():
"""returns a stream handler with standard formatting for use in local development"""
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter(
fmt="%(asctime)s %(levelname)-8s %(name)-15s %(message)s"
)
stream_handler.setFormatter(stream_formatter)
return stream_handler
def get_caller():
"""returns a nice string representing caller for logs
Note: This is heavy"""
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
caller = "{} ({}#{})".format(calframe[2][3], calframe[2][1], calframe[2][2])
return caller
def get_clean_path_from_url(url):
"""extract the endpoint path from the passed in url and remove
service information and any id's so that the endpoint path
might be easily used in grouping.
"""
clean_path = None
try:
lst = urlsplit(url)
path = lst.path
lst = path.split("/")
for i, l in enumerate(lst):
try:
int(l)
except ValueError:
pass
else:
lst[i] = "<int>"
# assume that the service name is the first part so we skip it
clean_path = "/" + "/".join(lst[2:])
except Exception:
# Todo: should report these errors
pass
return clean_path
def get_log_details():
details = OrderedDict()
tenant_name = None
tier_name = get_tier_name()
remote_addr = None
try:
remote_addr = request.remote_addr
except Exception:
pass
try:
if hasattr(g, "conf"):
tenant_name = (
g.conf.tenant_name["tenant_name"] if g.conf.tenant_name else "(none)"
)
except RuntimeError as e:
if "Working outside of application context" in repr(e):
pass
else:
raise
log_context = {}
log_context["created"] = datetime.datetime.utcnow().isoformat() + "Z"
log_context["tenant"] = tenant_name
log_context["tier"] = tier_name
log_context["remote_addr"] = remote_addr
details["logger"] = log_context
jwt_context = {}
try:
fields = set(
[
"user_id",
"player_id",
"roles",
"jti",
"user_name",
"player_name",
"client_id",
"identity_id",
]
)
for k, v in current_user.items():
if k in fields:
key = "{}".format(k)
jwt_context[key] = v
if k == "roles" and v:
jwt_context[k] = ",".join(v)
except Exception as e:
pass
if jwt_context:
details["user"] = jwt_context
# add Drift-Log-Context" request headers to the logs
try:
details["client"] = json.loads(request.headers.get("Drift-Log-Context"))
except Exception:
pass
return details
# Custom log record
_logRecordFactory = logging.getLogRecordFactory()
def drift_log_record_factory(*args, **kw):
global _logRecordFactory
logrec = _logRecordFactory(*args, **kw)
log_details = get_log_details()
for k, v in log_details.items():
setattr(logrec, k, v)
logger_fields = (
"levelname",
"levelno",
"process",
"thread",
"name",
"filename",
"module",
"funcName",
"lineno",
)
for f in logger_fields:
log_details["logger"][f] = getattr(logrec, f, None)
try:
correlation_id = request.correlation_id
except Exception:
correlation_id = None
log_details["logger"]["correlation_id"] = correlation_id
log_details["logger"]["created"] = datetime.datetime.utcnow().isoformat() + "Z"
for k, v in log_details.items():
setattr(logrec, k, v)
return logrec
class JSONFormatter(logging.Formatter):
"""
Format log message as JSON.
"""
source_host = gethostname()
log_tag = None
def __init__(self):
super(JSONFormatter, self).__init__()
def formatTime(self, record, datefmt=None):
dt = datetime.datetime.fromtimestamp(record.created)
return dt.isoformat() + "Z"
def get_formatted_data(self, record):
data = OrderedDict()
# put the timestamp first for splunk timestamp indexing
data["timestamp"] = self.formatTime(record)
if hasattr(record, "logger") and "tier" in record.logger:
data["tenant"] = "{}.{}".format(
record.logger.get("tier", None), record.logger.get("tenant", None)
)
field_names = "logger", "client", "user"
data.update(
{key: getattr(record, key) for key in field_names if hasattr(record, key)}
)
return data
def format(self, record):
data = self.get_formatted_data(record)
json_text = json.dumps(data, default=self._json_default)
return json_text
def json_format(self, data):
json_text = json.dumps(data, default=self._json_default)
return "drift.%s: @cee: %s" % (self.log_tag, json_text)
@staticmethod
def _json_default(obj):
"""
Coerce everything to strings.
All objects representing time get output as ISO8601.
"""
if (
isinstance(obj, datetime.datetime)
or isinstance(obj, datetime.date)
or isinstance(obj, datetime.time)
):
return obj.isoformat()
else:
return str(obj)
class ServerLogFormatter(JSONFormatter):
log_tag = "server"
def format(self, record):
data = self.get_formatted_data(record)
data["message"] = super(JSONFormatter, self).format(record)
data["level"] = record.levelname
try:
data["request"] = "{} {}".format(request.method, request.url)
except Exception:
pass
return self.json_format(data)
class EventLogFormatter(JSONFormatter):
log_tag = "events"
def format(self, record):
data = self.get_formatted_data(record)
data["event_name"] = super(JSONFormatter, self).format(record)
data.update(getattr(record, "extra", {}))
return self.json_format(data)
class ClientLogFormatter(JSONFormatter):
log_tag = "client"
def format(self, record):
data = self.get_formatted_data(record)
data.update(getattr(record, "extra", {}))
return self.json_format(data)
def trim_logger(data):
# remove unnecessary logger fields
for k, v in data["logger"].copy().items():
if k not in ["name", "tier", "tenant", "correlation_id"]:
del data["logger"][k]
def format_request_body(key, value):
if key == "password":
return "*"
else:
# constrain the body to 64 characters per key and convert to string
return str(value)[:64]
class RequestLogFormatter(JSONFormatter):
log_tag = "request"
def format(self, record):
data = self.get_formatted_data(record)
trim_logger(data)
try:
data["method"] = request.method
data["url"] = request.url
data["remote_addr"] = request.remote_addr
except Exception:
pass
data["endpoint"] = get_clean_path_from_url(request.url)
request_body = None
try:
if request.json:
request_body = {
key: format_request_body(key, value)
for key, value in request.json.items()
}
else:
request_body = request.data
except Exception:
pass
if request_body:
data["request_body"] = request_body
try:
data.update(getattr(record, "extra", {}))
except Exception:
pass
if data.get("log_level") == 1:
data = {
"timestamp": data["timestamp"],
"tenant": data["tenant"],
"method": data["method"],
"endpoint": data["endpoint"],
}
return self.json_format(data)
# Calling 'logsetup' more than once may result in multiple handlers emitting
# multiple log events for a single log call. Flagging it is a simple fix.
_setup_done = False
class StreamFormatter(logging.Formatter):
"""
The stream formatter automatically grab the record's extra field
and append its content to the log message
"""
def format(self, record):
message = super(StreamFormatter, self).format(record)
if hasattr(record, "extra"):
message += " | {}".format(record.extra)
return message
def logsetup(app):
global _setup_done
if _setup_done:
return
_setup_done = True
app.log_formatter = None
output_format = app.config.get("LOG_FORMAT", "json").lower()
log_level = app.config.get("LOG_LEVEL", "INFO").upper()
if output_format == "json":
logger = logging.getLogger()
logger.setLevel(log_level)
formatter = LogstashFormatterV1()
app.log_formatter = formatter
# make sure this is our only stream handler
logger.handlers = []
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
else:
logging.basicConfig(
level=log_level, format='%(asctime)s - %(name)-14s %(levelname)-5s: %(message)s'
)
# if output_format == 'text':
# logging.basicConfig(level=log_level)
# else:
# handler = logging.StreamHandler()
# formatter = LogstashFormatterV1()
# handler.setFormatter(formatter)
# logging.basicConfig(handlers=[handler], level=log_level)
# if 'logging' in app.config:
# logging.config.dictConfig(app.config['logging'])
@app.before_request
def _setup_logging():
return setup_logging(app)
def setup_logging(app):
"""Inject a tracking identifier into the request and set up context-info
for all debug logs
"""
g.log_defaults = None
request_id = request.headers.get("Request-ID", None)
if not request_id:
default_request_id = str(uuid.uuid4())
request_id = request.headers.get("X-Request-ID", default_request_id)
request.request_id = request_id
g.log_defaults = get_log_defaults()
if app.log_formatter:
app.log_formatter.defaults = g.log_defaults
def get_log_defaults():
defaults = {}
tenant_name = None
tier_name = get_tier_name()
remote_addr = None
try:
remote_addr = request.remote_addr
except Exception:
pass
try:
if hasattr(g, 'conf'):
tenant_name = g.conf.tenant_name['tenant_name'] if g.conf.tenant_name else '(none)'
except RuntimeError as e:
if "Working outside of application context" in repr(e):
pass
else:
raise
defaults["tenant"] = tenant_name
defaults["tier"] = tier_name
defaults["remote_addr"] = remote_addr
jwt_context = get_user_context()
if jwt_context:
defaults["user"] = jwt_context
# add Client-Log-Context" request headers to the logs
client = None
try:
client = request.headers.get("Client-Log-Context", None)
defaults["client"] = json.loads(client)
except Exception:
defaults["client"] = client
defaults["request"] = {
"request_id": request.request_id,
"url": request.url,
"method": request.method,
"remote_addr": request.remote_addr,
"path": request.path,
"user_agent": request.headers.get('User-Agent'),
"endpoint": get_clean_path_from_url(request.url)
}
defaults["request"].update(request.view_args or {})
return defaults
def get_user_context():
jwt_context = {}
try:
fields = set(["user_id", "player_id", "roles", "jti", "user_name",
"player_name", "client_id", "identity_id"])
for k, v in current_user.items():
if k in fields:
key = "{}".format(k)
jwt_context[key] = v
if k == "roles" and v:
jwt_context[k] = ",".join(v)
except Exception:
pass
return jwt_context
def drift_init_extension(app, **kwargs):
logsetup(app)
def request_log_level(level):
def wrapper(fn):
@wraps(fn)
def decorated(*args, **kwargs):
g.request_log_level = int(level)
return fn(*args, **kwargs)
return decorated
return wrapper
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
Created on 2017/09/14
@author: yuyang
'''
import os
import urllib
import uuid
from docx.shared import Pt
from docx.shared import RGBColor
from docx.shared import Inches
JPEG_EXTENSION = '.jpg'
PNG_EXTENSION = '.png'
GIF_EXTENSION = '.gif'
SPLIT_STRING = '///'
def add_author(document, author):
para = document.add_paragraph()
run = para.add_run(author)
font = run.font
#font.name = 'Microsoft YaHei'
font.size = Pt(12)
font.color.rgb = RGBColor(0x43, 0x6E, 0xEE)
def add_content(document, content, para = None, font_size = 16):
if not para:
para = document.add_paragraph()
run = para.add_run(content)
font = run.font
font.bold = False
font.size = Pt(font_size)
font.color.rgb = RGBColor(0x08, 0x08, 0x08)
def add_picture(document, story):
filenames = analyze_pic(story)
for filename in filenames:
try:
document.add_picture(filename, width=Inches(5))
except:
print '插入图片出错:' + filename
def add_time(document, time):
para = document.add_paragraph()
run = para.add_run(time)
font = run.font
font.italic = True
#font.name = 'Microsoft YaHei'
font.size = Pt(10)
font.color.rgb = RGBColor(0x7A, 0x7A, 0x7A)
def download_pic(url, extension):
try:
if not os.path.exists('.//pics'):
os.mkdir('.//pics')
filename = '.\\pics\\' + str(uuid.uuid4()) + extension
urllib.urlretrieve(url, filename)
except Exception:
print '下载图片出错: ' + url
return filename
def analyze_pic(story):
filenames = []
picBox = None
imgGroup = None
try:
picBox = story.find_element_by_class_name('picBox')
except:
None
try:
imgGroup = story.find_element_by_class_name('tl_imgGroup')
except:
None
if picBox:# one picture
img_url = picBox.find_element_by_tag_name('a').get_attribute('href')
print '图片:', img_url
filename = download_pic(img_url, JPEG_EXTENSION)
filenames.append(filename)
elif imgGroup:# multi picture
a_tags = imgGroup.find_elements_by_tag_name('a')
for a_tag in a_tags:
img_url = a_tag.get_attribute('href')
print '图片:', img_url
filename = download_pic(img_url, JPEG_EXTENSION)
filenames.append(filename)
return filenames
|
nilq/baby-python
|
python
|
"""
Views for the app
"""
from __future__ import absolute_import
from __future__ import division
import os
import uuid
from auth import constants
from auth.forms import \
CategoryForm, \
CountryForm, \
CurrencyForm, \
GatewayForm, \
LoginVoucherForm, \
MyUserForm, \
NetworkForm, \
NewVoucherForm, \
ProductForm, \
UserForm
from auth.models import Auth, Category, Country, Currency, Gateway, Network, Product, User, Voucher, db
# from auth.payu import get_transaction, set_transaction, capture
from auth.resources import logos
from auth.services import \
environment_dump, \
healthcheck as healthcheck_service
from auth.utils import is_logged_in, has_role
from flask import \
Blueprint, \
abort, \
current_app, \
flash, \
redirect, \
request, \
render_template, \
send_from_directory, \
session, \
url_for
from flask_menu import register_menu
from flask_potion.exceptions import ItemNotFound
from flask_security import \
auth_token_required, \
current_user, \
login_required, \
roles_accepted
from PIL import Image
bp = Blueprint('auth', __name__)
RESOURCE_MODELS = {
'categories': Category,
'countries': Country,
'currencies': Currency,
'gateways': Gateway,
'networks': Network,
'products': Product,
'users': User,
'vouchers': Voucher,
}
def generate_token():
"""Generate token for the voucher session"""
return uuid.uuid4().hex
def resource_query(resource):
"""Generate a filtered query for a resource"""
model = RESOURCE_MODELS[resource]
query = model.query
if current_user.has_role('network-admin') or current_user.has_role('gateway-admin'):
if model == Network:
query = query.filter_by(id=current_user.network_id)
elif model in [ Gateway, User ]:
query = query.filter_by(network_id=current_user.network_id)
if current_user.has_role('network-admin'):
if model == Voucher:
query = query.join(Voucher.gateway).join(Gateway.network).filter(Network.id == current_user.network_id)
if current_user.has_role('gateway-admin'):
if model == Gateway:
query = query.filter_by(id=current_user.gateway_id)
elif model in [ User, Voucher ]:
query = query.filter_by(gateway_id=current_user.gateway_id)
return query
def resource_instance(resource, id):
"""Return instances"""
model = RESOURCE_MODELS[resource]
return resource_query(resource).filter(model.id == id).first_or_404()
def resource_instances(resource):
"""Return instances"""
query = resource_query(resource)
if resource == 'vouchers':
return (query.filter(Voucher.status != 'archived')
.order_by(Voucher.status, Voucher.created_at.desc())
.all())
else:
return query.all()
def resource_index(resource, form=None):
"""Handle a resource index request"""
instances = resource_instances(resource)
return render_template('%s/index.html' % resource,
form=form,
instances=instances)
def resource_new(resource, form):
"""Handle a new resource request"""
if form.validate_on_submit():
instance = RESOURCE_MODELS[resource]()
form.populate_obj(instance)
db.session.add(instance)
db.session.commit()
flash('Create %s successful' % instance)
return redirect(url_for('.%s_index' % resource))
return render_template('%s/new.html' % resource, form=form)
def resource_edit(resource, id, form_class):
instance = resource_instance(resource, id)
form = form_class(obj=instance)
if form.validate_on_submit():
form.populate_obj(instance)
db.session.commit()
flash('Update %s successful' % instance)
return redirect(url_for('.%s_index' % resource))
return render_template('%s/edit.html' % resource,
form=form,
instance=instance)
def resource_delete(resource, id):
instance = resource_instance(resource, id)
if request.method == 'POST':
db.session.delete(instance)
db.session.commit()
flash('Delete %s successful' % instance)
return redirect(url_for('.%s_index' % resource))
return render_template('shared/delete.html',
instance=instance,
resource=resource)
def resource_action(resource, id, action):
instance = resource_instance(resource, id)
if request.method == 'POST':
if action in constants.ACTIONS[resource]:
getattr(instance, action)()
db.session.commit()
flash('%s %s successful' % (instance, action))
return redirect(url_for('.%s_index' % resource))
else:
abort(404)
return render_template('shared/action.html',
instance=instance,
action=action,
resource=resource)
@bp.route('/network', methods=['GET', 'POST'])
@login_required
@roles_accepted('network-admin')
@register_menu(
bp,
'.network',
'My Network',
visible_when=has_role('network-admin'),
order=997
)
def my_network():
form = NetworkForm(obj=current_user.network)
if form.validate_on_submit():
form.populate_obj(current_user.network)
db.session.commit()
flash('Update successful')
return redirect('/')
return render_template('networks/current.html',
form=form,
instance=current_user.network)
@bp.route('/gateway', methods=['GET', 'POST'])
@login_required
@roles_accepted('gateway-admin')
@register_menu(
bp,
'.gateway',
'My Gateway',
visible_when=has_role('gateway-admin'),
order=998
)
def my_gateway():
gateway = current_user.gateway
return _gateways_edit(
gateway,
'My Gateway',
url_for('.my_gateway'),
url_for('.home')
)
@bp.route('/user', methods=['GET', 'POST'])
@login_required
@register_menu(
bp,
'.account',
'My Account',
visible_when=is_logged_in,
order=999
)
def my_account():
form = MyUserForm(obj=current_user)
if form.validate_on_submit():
if form.password.data == '':
del form.password
form.populate_obj(current_user)
db.session.commit()
flash('Update successful')
return redirect('/')
return render_template('users/current.html',
form=form,
instance=current_user)
@bp.route('/networks')
@login_required
@roles_accepted('super-admin')
@register_menu(
bp,
'.networks',
'Networks',
visible_when=has_role('super-admin'),
order=10
)
def networks_index():
return resource_index('networks')
@bp.route('/networks/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def networks_new():
form = NetworkForm()
return resource_new('networks', form)
@bp.route('/networks/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def networks_edit(id):
return resource_edit('networks', id, NetworkForm)
@bp.route('/networks/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def networks_delete(id):
return resource_delete('networks', id)
@bp.route('/gateways')
@login_required
@roles_accepted('super-admin', 'network-admin')
@register_menu(
bp,
'.gateways',
'Gateways',
visible_when=has_role('super-admin', 'network-admin'),
order=20)
def gateways_index():
return resource_index('gateways')
def handle_logo(form):
if request.files['logo']:
filename = form.logo.data = logos.save(request.files['logo'], name='%s.' % form.id.data)
im = Image.open(logos.path(filename))
im.thumbnail((300, 300), Image.ANTIALIAS)
im.save(logos.path(filename))
else:
del form.logo
@bp.route('/gateways/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin')
def gateways_new():
form = GatewayForm()
if form.validate_on_submit():
handle_logo(form)
gateway = Gateway()
form.populate_obj(gateway)
db.session.add(gateway)
db.session.commit()
flash('Create %s successful' % gateway)
return redirect(url_for('.gateways_index'))
return render_template('gateways/new.html', form=form)
def _gateways_edit(gateway, page_title, action_url, redirect_url):
form = GatewayForm(obj=gateway)
if form.validate_on_submit():
handle_logo(form)
form.populate_obj(gateway)
db.session.commit()
flash('Update %s successful' % gateway)
return redirect(redirect_url)
return render_template('gateways/edit.html',
action_url=action_url,
form=form,
instance=gateway,
logos=logos,
page_title=page_title)
@bp.route('/gateways/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin')
def gateways_edit(id):
gateway = Gateway.query.filter_by(id=id).first_or_404()
return _gateways_edit(
gateway,
'Edit Gateway',
url_for('.gateways_edit', id=id),
url_for('.gateways_index')
)
@bp.route('/gateways/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin')
def gateways_delete(id):
return resource_delete('gateways', id)
@bp.route('/users')
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
@register_menu(
bp,
'.users',
'Users',
visible_when=has_role('super-admin', 'network-admin', 'gateway-admin'),
order=40
)
def users_index():
form = UserForm()
return resource_index('users', form=form)
@bp.route('/users/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def users_new():
form = UserForm()
if current_user.has_role('gateway-admin'):
del form.roles
return resource_new('users', form)
@bp.route('/users/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def users_edit(id):
instance = resource_instance('users', id)
if (current_user.has_role('network-admin')
and instance.network != current_user.network):
abort(403)
if (current_user.has_role('gateway-admin')
and (instance.network != current_user.network
or instance.gateway != current_user.gateway)):
abort(403)
form = UserForm(obj=instance)
if current_user.has_role('network-admin'):
del form.gateway
if current_user == instance:
del form.active
del form.roles
if form.validate_on_submit():
if form.password.data == '':
del form.password
form.populate_obj(instance)
db.session.commit()
flash('Update %s successful' % instance)
return redirect(url_for('.users_index'))
return render_template('users/edit.html', form=form, instance=instance)
@bp.route('/users/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def users_delete(id):
return resource_delete('users', id)
@bp.route('/vouchers')
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
@register_menu(
bp,
'.vouchers',
'Vouchers',
visible_when=has_role('super-admin', 'network-admin', 'gateway-admin'),
order=5
)
def vouchers_index():
return resource_index('vouchers')
@bp.route('/vouchers/<id>/<action>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def vouchers_action(id, action):
return resource_action('vouchers', id, action)
@bp.route('/categories')
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
@register_menu(
bp,
'.categories',
'Categories',
visible_when=has_role('super-admin', 'network-admin', 'gateway-admin'),
order=99
)
def categories_index():
return resource_index('categories')
@bp.route('/categories/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def categories_new():
form = CategoryForm()
return resource_new('categories', form)
@bp.route('/categories/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def categories_delete(id):
return resource_delete('categories', id)
@bp.route('/categories/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def categories_edit(id):
return resource_edit('categories', id, CategoryForm)
@bp.route('/products')
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
@register_menu(
bp,
'.products',
'Products',
visible_when=has_role('super-admin', 'network-admin', 'gateway-admin'),
order=99
)
def products_index():
return resource_index('products')
@bp.route('/products/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def products_new():
form = ProductForm()
return resource_new('products', form)
@bp.route('/products/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def products_delete(id):
return resource_delete('products', id)
@bp.route('/products/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def products_edit(id):
return resource_edit('products', id, ProductForm)
@bp.route('/countries')
@login_required
@roles_accepted('super-admin')
@register_menu(
bp,
'.countries',
'Countries',
visible_when=has_role('super-admin'),
order=99
)
def countries_index():
return resource_index('countries')
@bp.route('/countries/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def countries_new():
form = CountryForm()
return resource_new('countries', form)
@bp.route('/countries/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def countries_delete(id):
return resource_delete('countries', id)
@bp.route('/countries/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def countries_edit(id):
return resource_edit('countries', id, CountryForm)
@bp.route('/currencies')
@login_required
@roles_accepted('super-admin')
@register_menu(
bp,
'.currencies',
'Currencies',
visible_when=has_role('super-admin'),
order=99
)
def currencies_index():
return resource_index('currencies')
@bp.route('/currencies/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def currencies_new():
form = CurrencyForm()
return resource_new('currencies', form)
@bp.route('/currencies/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def currencies_delete(id):
return resource_delete('currencies', id)
@bp.route('/currencies/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def currencies_edit(id):
return resource_edit('currencies', id, CurrencyForm)
@bp.route('/new-voucher', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
@register_menu(
bp,
'.new-voucher',
'New Voucher',
visible_when=has_role('super-admin', 'network-admin', 'gateway-admin'),
order=0
)
def vouchers_new():
form = NewVoucherForm()
choices = []
defaults = {}
if current_user.has_role('gateway-admin'):
choices = [
[
current_user.gateway_id,
'%s - %s' % (current_user.gateway.network.title,
current_user.gateway.title)
]
]
defaults[current_user.gateway_id] = {
'minutes': current_user.gateway.default_minutes,
'megabytes': current_user.gateway.default_megabytes,
}
else:
if current_user.has_role('network-admin'):
networks = [current_user.network]
else:
networks = Network.query.all()
for network in networks:
for gateway in network.gateways:
choices.append([
gateway.id,
'%s - %s' % (network.title,
gateway.title)
])
defaults[gateway.id] = {
'minutes': gateway.default_minutes,
'megabytes': gateway.default_megabytes,
}
if choices == []:
flash('Define a network and gateway first.')
return redirect(request.referrer)
form.gateway_id.choices = choices
item = defaults[choices[0][0]]
if request.method == 'GET':
form.minutes.data = item['minutes']
form.megabytes.data = item['megabytes']
if form.validate_on_submit():
voucher = Voucher()
form.populate_obj(voucher)
db.session.add(voucher)
db.session.commit()
return redirect(url_for('.vouchers_new', code=voucher.code))
return render_template('vouchers/new.html', form=form, defaults=defaults)
@bp.route('/wifidog/login/', methods=['GET', 'POST'])
def wifidog_login():
form = LoginVoucherForm(request.form)
if form.validate_on_submit():
voucher_code = form.voucher_code.data.upper()
voucher = Voucher.query.filter_by(code=voucher_code, status='new').first()
if voucher is None:
flash(
'Voucher not found, did you type the code correctly?',
'error'
)
return redirect(request.referrer)
form.populate_obj(voucher)
voucher.token = generate_token()
db.session.commit()
session['voucher_token'] = voucher.token
url = ('http://%s:%s/wifidog/auth?token=%s' %
(voucher.gw_address,
voucher.gw_port,
voucher.token))
return redirect(url)
if request.method == 'GET':
gateway_id = request.args.get('gw_id')
else:
gateway_id = form.gateway_id.data
if gateway_id is None:
abort(404)
gateway = Gateway.query.filter_by(id=gateway_id).first_or_404()
return render_template('wifidog/login.html', form=form, gateway=gateway)
@bp.route('/wifidog/ping/')
def wifidog_ping():
return ('Pong', 200)
@bp.route('/wifidog/auth/')
def wifidog_auth():
auth = Auth(
user_agent=request.user_agent.string,
stage=request.args.get('stage'),
ip=request.args.get('ip'),
mac=request.args.get('mac'),
token=request.args.get('token'),
incoming=int(request.args.get('incoming')),
outgoing=int(request.args.get('outgoing')),
gateway_id=request.args.get('gw_id')
)
(auth.status, auth.messages) = auth.process_request()
db.session.add(auth)
db.session.commit()
def generate_point(measurement):
return {
"measurement": 'auth_%s' % measurement,
"tags": {
"source": "auth",
"network_id": auth.gateway.network_id,
"gateway_id": auth.gateway_id,
"user_agent": auth.user_agent,
"stage": auth.stage,
"ip": auth.ip,
"mac": auth.mac,
"token": auth.token,
},
"time": auth.created_at,
"fields": {
"value": getattr(auth, measurement),
}
}
# points = [generate_point(m) for m in [ 'incoming', 'outgoing' ]]
# influx_db.connection.write_points(points)
return ("Auth: %s\nMessages: %s\n" % (auth.status, auth.messages), 200)
@bp.route('/wifidog/portal/')
def wifidog_portal():
voucher_token = session.get('voucher_token')
if voucher_token:
voucher = Voucher.query.filter_by(token=voucher_token).first()
else:
voucher = None
gateway_id = request.args.get('gw_id')
if gateway_id is None:
abort(404)
gateway = Gateway.query.filter_by(id=gateway_id).first_or_404()
logo_url = None
if gateway.logo:
logo_url = logos.url(gateway.logo)
return render_template('wifidog/portal.html',
gateway=gateway,
logo_url=logo_url,
voucher=voucher)
@bp.route('/pay')
def pay():
return_url = url_for('.pay_return', _external=True)
cancel_url = url_for('.pay_cancel', _external=True)
response = set_transaction('ZAR',
1000,
'Something',
return_url,
cancel_url)
return redirect('%s?PayUReference=%s' % (capture, response.payUReference))
@bp.route('/pay/return')
def pay_return():
response = get_transaction(request.args.get('PayUReference'))
basketAmount = '{:.2f}'.format(int(response.basket.amountInCents) / 100)
category = 'success' if response.successful else 'error'
flash(response.displayMessage, category)
return render_template('payu/transaction.html',
response=response,
basketAmount=basketAmount)
@bp.route('/pay/cancel')
def pay_cancel():
response = get_transaction(request.args.get('payUReference'))
basketAmount = '{:.2f}'.format(int(response.basket.amountInCents) / 100)
flash(response.displayMessage, 'warning')
return render_template('payu/transaction.html',
response=response,
basketAmount=basketAmount)
@bp.route('/favicon.ico')
def favicon():
directory = os.path.join(current_app.root_path, 'static')
return send_from_directory(directory,
'favicon.ico',
mimetype='image/vnd.microsoft.icon')
@bp.route('/auth-token')
@login_required
def auth_token():
return current_user.get_auth_token()
@bp.route('/healthcheck')
@auth_token_required
def healthcheck():
return healthcheck_service.check()
@bp.route('/environment')
@auth_token_required
def environment():
return environment_dump.dump_environment()
@bp.route('/')
def home():
return redirect(url_for('security.login'))
|
nilq/baby-python
|
python
|
# Copyright (c) Naas Development Team.
# Distributed under the terms of the Modified BSD License.
import os
c = get_config()
c.NotebookApp.ResourceUseDisplay.track_cpu_percent = True
c.NotebookApp.ResourceUseDisplay.mem_warning_threshold = 0.1
c.NotebookApp.ResourceUseDisplay.cpu_warning_threshold = 0.1
# We rely on environment variables to configure JupyterHub so that we
# avoid having to rebuild the JupyterHub container every time we change a
# configuration parameter.
# Spawn single-user servers as Docker containers
c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
c.JupyterHub.logo_file = "/srv/jupyterhub/naas_logo.svg"
c.JupyterHub.service_tokens = {
'secret-token': os.environ.get('ADMIN_API_TOKEN', 'SHOULD_BE_CHANGED'),
}
c.KubeSpawner.image = os.environ['DOCKER_NOTEBOOK_IMAGE']
c.KubeSpawner.image_pull_policy = 'Always'
# JupyterHub requires a single-user instance of the Notebook server, so we
# default to using the `start-singleuser.sh` script included in the
# jupyter/docker-stacks *-notebook images as the Docker run command when
# spawning containers. Optionally, you can override the Docker run command
# using the DOCKER_SPAWN_CMD environment variable.
c.KubeSpawner.environment = {
'JUPYTERHUB_URL': os.environ.get('JUPYTERHUB_URL', ''),
'PUBLIC_DK_API': os.environ.get('PUBLIC_DK_API', ''),
'TC_API_SCREENSHOT': os.environ.get('TC_API_SCREENSHOT', ''),
'ALLOWED_IFRAME': os.environ.get('ALLOWED_IFRAME', ''),
'TZ': os.environ.get('TZ', 'Europe/Paris')
}
c.KubeSpawner.cpu_guarantee = os.environ.get('KUBE_CPU_GUAR', 0.3)
c.KubeSpawner.cpu_limit = os.environ.get('KUBE_CPU_LIMIT', 1.0)
c.KubeSpawner.mem_limit = os.environ.get('KUBE_MEM_LIMIT', '4G')
c.KubeSpawner.mem_guarantee = os.environ.get('KUBE_MEM_GUAR', '500M')
# Explicitly set notebook directory because we'll be mounting a host volume to
# it. Most jupyter/docker-stacks *-notebook images run the Notebook server as
# user `jovyan`, and set the notebook directory to `/home/jovyan/work`.
# We follow the same convention.
notebook_dir = os.environ.get('DOCKER_NOTEBOOK_DIR') or '/home/ftp'
c.KubeSpawner.notebook_dir = notebook_dir
# Mount the real user's Docker volume on the host to the notebook user's
c.KubeSpawner.volumes = [
{
'name': 'nfs-root',
'nfs': {
'server': os.environ.get('VOLUME_SERVER', 'fs-b87bd009.efs.eu-west-3.amazonaws.com'),
'path': '/'
}
}
]
c.KubeSpawner.volume_mounts = [
{
'name': 'nfs-root',
'mountPath': os.environ.get('DOCKER_NOTEBOOK_DIR'),
'subPath': os.environ.get('KUBE_NAMESPACE', 'prod') + '/ftpusers/{username}'
}
]
# This is used to set proper rights on NFS mount point.
c.KubeSpawner.lifecycle_hooks = {
"postStart": {
"exec": {
"command": ["/bin/sh", "-c", f"chown -R 21:21 {os.environ.get('DOCKER_NOTEBOOK_DIR')}"]
}
}
}
c.KubeSpawner.extra_pod_config = {
"subdomain": "jupyter-single-user",
"hostname": "jupyter-{username}",
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "jupyterNodeGroup",
"operator": "In",
"values": [
"true"
]
}
]
}
]
}
}
},
"tolerations": [
{
"key": "jupyter",
"operator": "Equal",
"value": "true",
"effect": "NoSchedule"
}
]
}
c.KubeSpawner.extra_labels = {
"name": "jupyter-single-user"
}
# For debugging arguments passed to spawned containers
c.KubeSpawner.debug = True
c.KubeSpawner.start_timeout = 120
# User containers will access hub by container name on the Docker network
c.JupyterHub.hub_ip = os.environ.get('HOST', '0.0.0.0')
c.JupyterHub.hub_port = os.environ.get('PORT', 8081)
c.KubeSpawner.hub_connect_ip = 'hub'
# Authenticate users with local
c.JupyterHub.authenticator_class = 'naasauthenticator.NaasAuthenticator'
c.Authenticator.check_common_password = True
c.Authenticator.minimum_password_length = 10
c.Authenticator.allowed_failed_logins = 10
# Persist hub data on volume mounted inside container
data_dir = os.environ.get('DATA_VOLUME_CONTAINER', '/data')
c.JupyterHub.cookie_secret_file = os.path.join(data_dir,
'jupyterhub_cookie_secret')
c.JupyterHub.db_url = 'postgresql://postgres:{password}@{host}/{db}'.format(
host=os.environ['POSTGRES_HOST'],
password=os.environ['POSTGRES_PASSWORD'],
db=os.environ['POSTGRES_DB'],
)
c.JupyterHub.tornado_settings = {
'headers': {
'Content-Security-Policy': 'frame-ancestors self ' + os.environ.get('ALLOWED_IFRAME', '')
}
}
# Whitlelist users and admins
c.Authenticator.whitelist = whitelist = set()
c.Authenticator.admin_users = admin = set()
c.JupyterHub.admin_access = True
|
nilq/baby-python
|
python
|
#FUNÇÕES (FUNCTION)
#EXEMPLO SEM O USO DE FUNÇÃO :(
rappers_choice = ["L7NNON", "KB", "Trip Lee", "Travis Scott", ["Lecrae", "Projota", "Tupac"], "Don Omar"]
rappers_country = {"BR":["Hungria", "Kamau", "Projota", "Mano Brown", "Luo", "L7NNON"],
"US":["Tupac", "Drake", "Eminem", "KB", "Kanye West", "Lecrae", "Travis Scott", "Trip Lee"]}
for rp in rappers_choice:
if isinstance(rp, list):
for rp_one in rp:
if rp_one in rappers_country["BR"]:
print(f"Rapper BR: {rp_one}")
elif rp_one in rappers_country["US"]:
print(f"Rapper US: {rp_one}")
else:
print(f"Rapper not found in lists: {rp_one}")
else:
if rp in rappers_country["BR"]:
print(f"Rapper BR: {rp}")
elif rp in rappers_country["US"]:
print(f"Rapper US: {rp}")
else:
print(f"Rapper not found in lists: {rp}")
|
nilq/baby-python
|
python
|
from __future__ import print_function
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
from skimage.segmentation import clear_border
from ortools.graph import pywrapgraph
import time
def buildFeatureFrame(filename,timepoint):
temp = np.asarray(np.load(filename,allow_pickle=True)).item()
imfilename = temp['filename']
img = io.imread(imfilename);
masks = clear_border(temp['masks'])
image_props = measure.regionprops_table(masks,
intensity_image=img,
properties=('label','area','filled_area', 'centroid',
'eccentricity','mean_intensity'))
im_df = pd.DataFrame(image_props)
im_df['time'] = timepoint
return(im_df)
def generateCandidates(image1, image2, im1_select, dist_multiplier=2):
delX = np.sqrt((image1['centroid-0'][im1_select]-image2['centroid-0'])**2+
(image1['centroid-1'][im1_select]-image2['centroid-1'])**2)
max_dist = dist_multiplier*min(delX)
candidates = np.array(delX[delX < max_dist].index)
return(candidates)
def generateLinks(filename_t0, filename_t1,timepoint, nnDist = 10,costMax=35, mN_Int = 10, mN_Ecc=4, mN_Area=25, mN_Disp=1):
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
arr = pd.DataFrame()
for i in np.array(ip0.index):
candidates = generateCandidates(ip0, ip1, i, dist_multiplier=nnDist)
canFRAME = pd.DataFrame(candidates)
canFRAME["1"] = i
arr = arr.append(canFRAME)
arr = arr.rename(columns={0: "t1", "1": "t0"})
arr = arr.reset_index(drop=True)
properties = pd.DataFrame()
mInt_0 = float(np.median(ip0.loc[:,['mean_intensity']]))
mInt_1 = float(np.median(ip1.loc[:,['mean_intensity']]))
for link in np.array(arr.index):
tmp_props_0 = (ip0.loc[arr.loc[link,["t0"]],:])
tmp_props_1 = (ip1.loc[arr.loc[link,["t1"]],:])
deltaInt = (np.abs((int(tmp_props_0["mean_intensity"])/mInt_0)-(int(tmp_props_1["mean_intensity"])/mInt_1))/
np.mean([(int(tmp_props_0["mean_intensity"])/mInt_0),(int(tmp_props_1["mean_intensity"])/mInt_1)]))
deltaArea = (np.abs(int(tmp_props_0['area']) - int(tmp_props_1['area']))/
np.mean([int(tmp_props_0["area"]),int(tmp_props_1["area"])]))
deltaEcc = np.absolute(float(tmp_props_0['eccentricity']) - float(tmp_props_1['eccentricity']))
deltaX = np.sqrt((int(tmp_props_0['centroid-0'])-int(tmp_props_1['centroid-0']))**2+
(int(tmp_props_0['centroid-1'])-int(tmp_props_1['centroid-1']))**2)
properties = properties.append(pd.DataFrame([int(tmp_props_0['label']),int(tmp_props_1['label']),
deltaInt ,deltaArea,deltaEcc,deltaX]).T)
properties = properties.rename(columns={0: "label_t0", 1: "label_t1", 2: "deltaInt",
3: "deltaArea", 4: "deltaEcc", 5: "deltaX"})
properties = properties.reset_index(drop=True)
properties["Cost"]=(properties.loc[:,"deltaInt"]*mN_Int)+(properties.loc[:,"deltaEcc"]*mN_Ecc)+(properties.loc[:,"deltaArea"]*mN_Area)+(properties.loc[:,"deltaX"]*mN_Disp)
properties["TransitionCapacity"]=1
properties = properties.loc[properties["Cost"]<costMax]
properties = properties.reset_index(drop=True)
return(properties)
def DivSimScore(daughterCell_1, daughterCell_2, FrameNext):
daughterStats_1 = FrameNext[(FrameNext['label'] == daughterCell_1)]
daughterStats_2 = FrameNext[(FrameNext['label'] == daughterCell_2)]
deltaInt = (np.abs((int(daughterStats_1["mean_intensity"]))-(int(daughterStats_2["mean_intensity"])))/
np.mean([(int(daughterStats_1["mean_intensity"])),(int(daughterStats_2["mean_intensity"]))]))
deltaArea = (np.abs(int(daughterStats_1['area']) - int(daughterStats_2['area']))/
np.mean([int(daughterStats_1["area"]),int(daughterStats_2["area"])]))
deltaEcc = np.absolute(float(daughterStats_1['eccentricity']) - float(daughterStats_2['eccentricity']))
deltaX = np.sqrt((int(daughterStats_1['centroid-0'])-int(daughterStats_2['centroid-0']))**2+
(int(daughterStats_1['centroid-1'])-int(daughterStats_2['centroid-1']))**2)
sims = pd.DataFrame([int(daughterCell_1),int(daughterCell_2),
deltaInt ,deltaArea,deltaEcc,deltaX]).T
sims = sims.rename(columns={0: "label_D1", 1: "label_D2", 2: "D2deltaInt",
3: "D2deltaArea", 4: "D2deltaEcc", 5: "D2deltaX"})
return(sims)
def DivSetupScore(motherCell, daughterCell_1, daughterCell_2, FrameCurr, FrameNext):
#determine similarities between mother and daughters
simDF = DivSimScore(daughterCell_1, daughterCell_2, FrameNext)
#determine relative area of mother compared to daughters
MotherArea = int(FrameCurr[(FrameCurr['label'] == motherCell)]['area'])
daughterArea_1 = int(FrameNext[(FrameNext['label'] == daughterCell_1)]['area'])
daughterArea_2 = int(FrameNext[(FrameNext['label'] == daughterCell_2)]['area'])
areaChange = MotherArea/(daughterArea_1 + daughterArea_2)
simDF["MDDeltaArea"] = areaChange
return(simDF)
def DivisionCanditates(propMtx, filename_t0,filename_t1,timepoint,mS_Area = 10, mS_Ecc = 2, mS_Int = 2, mS_Disp = 1, MDAR_thresh = 0.75, SDis_thresh = 20.0):
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
Mothers = np.unique(propMtx.loc[:,['label_t0']])
DivCandidacy = pd.DataFrame()
for cell in Mothers:
DaughtersPossible = (propMtx[(propMtx['label_t0'] == cell)].loc[:,'label_t1'])
DaughtersPairs = np.array(np.meshgrid(DaughtersPossible, DaughtersPossible)).T.reshape(-1,2)
Sisters = np.unique(np.sort(DaughtersPairs),axis=0)
for pair in range(Sisters.shape[0]):
if (Sisters[pair,0] != Sisters[pair,1]):
tmpScoreSetup = (DivSetupScore(cell,Sisters[pair,0], Sisters[pair,1], ip0,ip1))
LogicMDAR = (tmpScoreSetup["MDDeltaArea"]>MDAR_thresh)
ScoreSDis = (mS_Int*tmpScoreSetup["D2deltaInt"]) + (mS_Area*tmpScoreSetup["D2deltaArea"]) + (mS_Ecc*tmpScoreSetup["D2deltaEcc"]) + (mS_Disp*tmpScoreSetup["D2deltaX"])
LogicSDis = (ScoreSDis<SDis_thresh)
tmpCandidacy = pd.DataFrame([cell,Sisters[pair,0],Sisters[pair,1],(LogicSDis&LogicMDAR).bool()]).T
DivCandidacy = DivCandidacy.append(tmpCandidacy)
DivCandidacy = DivCandidacy.rename(columns={0: "Mother", 1: "Daughter1", 2: "Daughter2",3: "Div"})
DivCandidacy = DivCandidacy.reset_index(drop=True)
# select true values
DivSelect = DivCandidacy[(DivCandidacy['Div'] == True)]
DivConnects_1 = DivSelect[['Mother','Daughter1','Div']]
DivConnects_2 = DivSelect[['Mother','Daughter2','Div']]
DivConnects_1 = DivConnects_1.rename(columns={'Mother': "label_t0", 'Daughter1': "label_t1"})
DivConnects_2 = DivConnects_2.rename(columns={'Mother': "label_t0", 'Daughter2': "label_t1"})
DivConnects = pd.concat([DivConnects_1,DivConnects_2])
DivConnects = DivConnects.reset_index(drop=True)
return(DivConnects)
def UpdateConnectionsDiv(propMtx,DivCandidatesMtx):
propMtx.loc[propMtx['label_t0'].isin(np.unique(DivCandidatesMtx['label_t0'])),['TransitionCapacity']] = 2
for div in range(DivCandidatesMtx.shape[0]):
tmp_prop = propMtx.loc[(DivCandidatesMtx.loc[div,'label_t0'] ==propMtx['label_t0'])&(DivCandidatesMtx.loc[div,'label_t1'] ==propMtx['label_t1']),]
old_score = float(tmp_prop.loc[:,'Cost'])
new_score = (old_score/2)
propMtx.loc[(DivCandidatesMtx.loc[div,'label_t0'] ==propMtx['label_t0'])&(DivCandidatesMtx.loc[div,'label_t1'] ==propMtx['label_t1']),'Cost'] = new_score
return(propMtx)
def SolveMinCostTable(filename_t0, filename_t1, DivisionTable,timepoint, OpeningCost = 30, ClosingCost = 30):
#rename
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip0 = ip0.rename(columns={"label" : "label_t0"})
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
ip1 = ip1.rename(columns={"label" : "label_t1"})
ip0["slabel_t0"] = np.array(range(ip0.label_t0.shape[0]))+1
i0max = np.max(np.asarray(ip0["slabel_t0"]))
ip1["slabel_t1"] = np.array(range(i0max,i0max+ip1.label_t1.shape[0]))+1
i1max = np.max(np.asarray(ip1["slabel_t1"]))
i0_translation = ip0[["label_t0","slabel_t0"]]
i1_translation = ip1[["label_t1","slabel_t1"]]
result_tmp = pd.merge(DivisionTable, i0_translation, on=['label_t0'])
result = pd.merge(result_tmp, i1_translation, on=['label_t1'])
result_shorthand = result[['slabel_t0','slabel_t1','Cost','TransitionCapacity']]
transNodes0 = np.array(result_shorthand['slabel_t0']) ;
transNodes1 = np.array(result_shorthand['slabel_t1']) ;
transCosts = np.array(result_shorthand['Cost']) ;
transCaps = np.repeat(1,transNodes0.size) ;
sourceNodes0 = np.repeat([0],i1max)
sourceNodes1 = np.array(range(i1max))+1
sourceCosts = np.concatenate((np.repeat(1,ip0.shape[0]),np.repeat(OpeningCost,ip1.shape[0])), axis=None)
#Source capacities are dictates by which node could be splitting. Source capacity = 2 if there was a division candidate
tmpUnique0 = result_shorthand[["slabel_t0","TransitionCapacity"]].drop_duplicates()
HighCaps = tmpUnique0.loc[tmpUnique0["TransitionCapacity"]==2,]
LowCaps = pd.DataFrame(i0_translation).copy(deep=True)
LowCaps['Cap'] = 1
LowCaps.loc[LowCaps['slabel_t0'].isin(np.array(HighCaps['slabel_t0'])),'Cap'] = 2
sourceCaps = np.concatenate((np.array(LowCaps['Cap']),np.repeat(1,ip1.shape[0])), axis=None)
sinkNodes0 = np.array(range(i1max))+1
sinkNodes1 = np.repeat([i1max+1],i1max)
sinkCosts = np.concatenate((np.repeat(ClosingCost,ip0.shape[0]),np.repeat(1,ip1.shape[0])), axis=None)
sinkCaps = np.repeat(1,i1max)
# Define the directed graph for the flow.
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
start_nodes = np.concatenate((sourceNodes0, transNodes0, sinkNodes0)).tolist()
end_nodes = np.concatenate((sourceNodes1, transNodes1, sinkNodes1)).tolist()
capacities = np.concatenate((sourceCaps, transCaps, sinkCaps)).tolist()
costs = np.concatenate((sourceCosts, transCosts, sinkCosts)).tolist()
source = 0
sink = i1max+1
supply_amount = np.max([i0max,i1max-i0max])
supplies = np.concatenate(([supply_amount],np.repeat(0,i1max),[-1*supply_amount])).tolist()
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
# Add each arc.
for i in range(len(start_nodes)):
min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i], end_nodes[i],capacities[i], int(costs[i]))
# Add node supplies.
for i in range(len(supplies)):
min_cost_flow.SetNodeSupply(i, supplies[i])
ArcFrame = pd.DataFrame()
# Find the minimum cost flow between node 0 and node 4.
if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:
print('Minimum cost:', min_cost_flow.OptimalCost())
for i in range(min_cost_flow.NumArcs()):
cost = min_cost_flow.Flow(i) * min_cost_flow.UnitCost(i)
ArcFrame = ArcFrame.append(pd.DataFrame([min_cost_flow.Tail(i),
min_cost_flow.Head(i),
min_cost_flow.Flow(i),
min_cost_flow.Capacity(i),
cost]).T)
else:
print('There was an issue with the min cost flow input.')
ArcFrame = ArcFrame.rename(columns={0:'start',1:'end',2:"Flow",3:"Capacity",4:"Cost"})
#ArcFrame = ArcFrame.reset_index(drop=True)
FinalFrame = ArcFrame.loc[ArcFrame["Flow"]!=0,]
FinalFrame = FinalFrame.reset_index(drop=True)
return(FinalFrame)
def ReviewCostTable(minCostFlowtable, timepoint, OpeningCost=30,ClosingCost=30):
sink = max(minCostFlowtable["end"])
Transitions = minCostFlowtable.loc[(minCostFlowtable["start"]!=0)&(minCostFlowtable["end"]!=sink),]
trans_start_nodes = np.unique(Transitions["start"])
trans_end_nodes = np.unique(Transitions["end"])
#find nodes that either appear (no start) or disappear (no end)
appearing = minCostFlowtable[(~minCostFlowtable.start.isin(trans_start_nodes))&
(~minCostFlowtable.end.isin(trans_start_nodes))&
(~minCostFlowtable.start.isin(trans_end_nodes))&
(~minCostFlowtable.end.isin(trans_end_nodes))]
appearing = appearing.loc[(appearing["Cost"] == OpeningCost)|(appearing["Cost"] == ClosingCost)]
appearing = appearing.reset_index(drop=True)
appearFrame = pd.DataFrame()
for i in range(appearing.shape[0]):
if(appearing.loc[i,"start"] == 0):
appearFrame = appearFrame.append(pd.DataFrame([-1,appearing.loc[i,"end"]]).T)
elif(appearing.loc[i,"end"] == sink):
appearFrame = appearFrame.append(pd.DataFrame([appearing.loc[i,"end"],-1]).T)
appearFrame = appearFrame.rename(columns={0:"slabel_t0",1:"slabel_t1"})
appearFrame = appearFrame.reset_index(drop=True)
#Assemble
transFrame = Transitions.loc[:,["start","end"]]
transFrame = transFrame.rename(columns={"start":"slabel_t0","end":"slabel_t1"})
totalFrame = pd.concat([appearFrame,transFrame])
totalFrame = totalFrame.reset_index(drop=True)
totalFrame["timepoint"] = timepoint
return(totalFrame)
def TranslationTable(filename_t0, filename_t1, DivisionTable,timepoint):
#rename
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip0 = ip0.rename(columns={"label" : "label_t0"})
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
ip1 = ip1.rename(columns={"label" : "label_t1"})
ip0["slabel_t0"] = np.array(range(ip0.label_t0.shape[0]))+1
i0max = np.max(np.asarray(ip0["slabel_t0"]))
ip1["slabel_t1"] = np.array(range(i0max,i0max+ip1.label_t1.shape[0]))+1
i1max = np.max(np.asarray(ip1["slabel_t1"]))
i0_translation = ip0[["label_t0","slabel_t0"]]
i1_translation = ip1[["label_t1","slabel_t1"]]
dvtabDF = DivisionTable
result_tmp = pd.merge(dvtabDF, i0_translation, on=['label_t0'])
translation_table = pd.merge(result_tmp, i1_translation, on=['label_t1'])
#result_shorthand = result[['slabel_t0','slabel_t1','Cost','TransitionCapacity']]
startLabels = translation_table.loc[:,["label_t0","slabel_t0"]]
startLabels["timepoint"] = timepoint
startLabels["frame"] = timepoint+1
endLabels = translation_table.loc[:,["label_t1","slabel_t1"]]
endLabels["timepoint"] = timepoint+1
endLabels["frame"] = timepoint+2
startLabels = startLabels.rename(columns={"label_t0":"label","slabel_t0":"slabel"})
endLabels = endLabels.rename(columns={"label_t1":"label","slabel_t1":"slabel"})
allLabels = pd.concat([startLabels,endLabels])
allLabels = allLabels.reset_index(drop=True)
allLabels = allLabels.astype( 'int64')
allLabels["Master_ID"] = allLabels["timepoint"].astype('str')+"_"+allLabels["label"].astype('str')
allLabels = allLabels.astype({"Master_ID":'str'})
allLabels["RajTLG_ID"] = allLabels["frame"]*int(10**(np.ceil(np.log10(max(allLabels['slabel'])))+2))+allLabels["label"]
allLabels = allLabels.drop_duplicates()
allLabels = allLabels.reset_index(drop=True)
return(allLabels)
def TranslateConnections(ConnectionTable, TranslationTable, timepoint, preference = "Master_ID"):
subTranslationTable_0 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_0['slabel_t0'] = subTranslationTable_0['slabel']
subTranslationTable_1 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_1['slabel_t1'] = subTranslationTable_1['slabel']
merge_0 = pd.merge(ConnectionTable, subTranslationTable_0, on="slabel_t0")
merge = pd.merge(merge_0, subTranslationTable_1, on="slabel_t1")
pref = str(preference)
result = merge.loc[:,[pref+"_x",pref+"_y"]]
result = result.drop_duplicates()
result = result.dropna(thresh=1)
result = result.reset_index(drop=True)
result = result.rename(columns = {(pref+"_x") : (pref+"_"+str(timepoint)), (pref+"_y") : (pref+"_"+str(timepoint+1))})
return(result)
def RajTLG_wrap(filename_t0, filename_t1,timepoint,ConnectionTable,TranslationTable):
frame0 = buildFeatureFrame(filename_t0,timepoint);
frame1 = buildFeatureFrame(filename_t1,timepoint+1);
frames = pd.concat([frame0,frame1])
frames["timepoint"] = frames["time"]
InfoDF = pd.merge(frames,TranslationTable, on=['label','timepoint'])
RajTLG_translation = TranslateConnections(ConnectionTable=ConnectionTable, TranslationTable=TranslationTable, timepoint=timepoint, preference="RajTLG_ID")
RajTLGFrame = pd.DataFrame()
if (timepoint == 0):
for i in range(RajTLG_translation.shape[0]):
tmpID = RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)]
tmpFrame = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"frame"])
tmpX = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"centroid-1"])
tmpY = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"centroid-0"])
tmpParent = "NaN"
RajTLGFrame = RajTLGFrame.append(pd.DataFrame([tmpID,tmpFrame,tmpX,tmpY,tmpParent]).T)
for i in range(RajTLG_translation.shape[0]):
tmpID = RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)]
tmpFrame = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"frame"])
tmpX = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"centroid-1"])
tmpY = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"centroid-0"])
tmpParent = int(RajTLG_translation.loc[RajTLG_translation["RajTLG_ID"+"_"+str(timepoint+1)] == tmpID,
"RajTLG_ID"+"_"+str(timepoint)])
RajTLGFrame = RajTLGFrame.append(pd.DataFrame([tmpID,tmpFrame,tmpX,tmpY,tmpParent]).T)
RajTLGFrame = RajTLGFrame.reset_index(drop=True)
RajTLGFrame = RajTLGFrame.rename(columns={0:"pointID", 1:"frameNumber",
2:"xCoord",3:"yCoord",4:"parentID"})
RajTLGFrame["annotation"] = "none"
#RajTLGFrame.to_csv(outfilename,index=False)
return(RajTLGFrame)
def HCR_connect(sampleName, TLlast_mask, HCR_mask, timepoint, nnDist=3, costMax=35, mN_Int=10, mN_Ecc=4, mN_Area=25, mN_Disp=1, mS_Area = 10, mS_Ecc = 2, mS_Int = 2, mS_Disp = 1, MDAR_thresh = 0.75, SDis_thresh = 20.0, openingCost = 30, closingCost = 30):
propies = generateLinks(filename_t0 = TLlast_mask, filename_t1 = HCR_mask,
timepoint = timepoint, nnDist = nnDist,
costMax = costMax, mN_Int = mN_Int,
mN_Ecc = mN_Ecc, mN_Area = mN_Area,
mN_Disp = mN_Disp)
tmpdivs = DivisionCanditates(propMtx = propies,
filename_t0 = TLlast_mask, filename_t1 = HCR_mask,
MDAR_thresh = MDAR_thresh, SDis_thresh = SDis_thresh,
mS_Disp = mS_Disp, mS_Area = mS_Area,
mS_Ecc = mS_Ecc, mS_Int = mS_Int,
timepoint = timepoint)
finaldivs = UpdateConnectionsDiv(propies, tmpdivs)
minCost_table = SolveMinCostTable(TLlast_mask, HCR_mask,
DivisionTable=finaldivs,
timepoint=timepoint,
OpeningCost = openingCost,
ClosingCost = closingCost)
finTable = ReviewCostTable(minCostFlowtable = minCost_table, timepoint=timepoint)
translation_table = TranslationTable(TLlast_mask, HCR_mask, DivisionTable=finaldivs,
timepoint=timepoint)
masterConnects_Raj = TranslateConnections(finTable, translation_table, timepoint=timepoint, preference="RajTLG_ID")
masterConnects_Master = TranslateConnections(finTable, translation_table, timepoint=timepoint, preference="Master_ID")
col_df = finTable[(finTable['slabel_t0']!=-1)&(finTable['slabel_t1']!=-1)]
col_df.to_csv('results/'+sampleName+'/HCR/'+sampleName+'_HCR_connect.csv', index=False)
translation_table.to_csv('results/'+sampleName+'/HCR/'+sampleName+'_HCR_translation.csv', index=False)
masterConnects_Raj.to_csv('results/'+sampleName+'/HCR/'+sampleName+'_HCR_connections_RajLab.csv', index=False)
masterConnects_Master.to_csv('results/'+sampleName+'/HCR/'+sampleName+'_HCR_connections_MasterID.csv', index=False)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""This script adds Type B uncertainties to those given in the .apu file.
"""
from sys import exit
from glob import glob
from numpy import matrix
from math import radians, sin, cos, sqrt, atan2, degrees
def dd2dms(dd):
minutes, seconds = divmod(abs(dd) * 3600, 60)
degrees, minutes = divmod(minutes, 60)
dms = degrees + (minutes / 100) + (seconds / 10000)
return dms if dd >= 0 else -dms
def dms2dd(dms):
degmin, seconds = divmod(abs(dms) * 1000, 10)
degrees, minutes = divmod(degmin, 100)
dd = degrees + (minutes / 60) + (seconds / 360)
return dd if dms >= 0 else -dd
def rotation_matrix(lat, lon):
"""Returns the 3x3 rotation matrix for a given latitude and longitude
(given in decimal degrees)
See Section 4.2.3 of the DynaNet User's Guide v3.3
"""
(rlat, rlon) = (radians(lat), radians(lon))
rot_matrix = matrix(
[[-sin(rlon), -sin(rlat)*cos(rlon), cos(rlat)*cos(rlon)],
[cos(rlon), -sin(rlat)*sin(rlon), cos(rlat)*sin(rlon)],
[0.0, cos(rlat), sin(rlat)]]
)
return rot_matrix
def vcv_cart2local(vcv_cart, lat, lon):
"""Transforms a 3x3 VCV from the Cartesian to the local reference frame
See Section 4.4.1 of the DynaNet User's Guide v3.3
"""
rot_matrix = rotation_matrix(lat, lon)
vcv_local = rot_matrix.transpose() * vcv_cart * rot_matrix
return vcv_local
def error_ellipse(vcv):
"""Calculate the semi-major axis, semi-minor axis, and the orientation of
the error ellipse calculated from a 3x3 VCV
See Section 7.3.3.1 of the DynaNet User's Guide v3.3
"""
z = sqrt((vcv[0, 0] - vcv[1, 1])**2 + 4 * vcv[0, 1]**2)
a = sqrt(0.5 * (vcv[0, 0] + vcv[1, 1] + z))
b = sqrt(0.5 * (vcv[0, 0] + vcv[1, 1] - z))
orientation = 90 - degrees(0.5 * atan2((2 * vcv[0, 1]),
(vcv[0, 0] - vcv[1, 1])))
return a, b, orientation
def circ_hz_pu(a, b):
"""Calculate the circularised horizontal PU(95%) from the semi-major and
semi-minor axes
"""
q0 = 1.960790
q1 = 0.004071
q2 = 0.114276
q3 = 0.371625
c = b / a
k = q0 + q1 * c + q2 * c**2 + q3 * c**3
r = a * k
return r
# Determine the files to use
apuFiles = glob('*.apu')
if (len(apuFiles) == 1):
apuFile = apuFiles[0]
elif (len(apuFiles) == 0):
exit('\nThere is no apu file to work on\n')
else:
print('\nThere are multiple apu files:')
i = 0
for apuFile in apuFiles:
i += 1
print('\t' + str(i) + '\t' + apuFile)
fileNum = input('Type the number of the file you want to check: ')
if int(fileNum) < 1 or int(fileNum) > len(apuFiles):
exit('Invalid response. Select a number between 1 and ' +
str(len(apuFiles)))
apuFile = apuFiles[int(fileNum) - 1]
# Set the Type B uncertainties
rvsE = 0.003
rvsN = 0.003
rvsU = 0.006
nonRvsE = 0.006
nonRvsN = 0.006
nonRvsU = 0.012
# Create a list of RVS stations
rvsStations = ['ALBY', 'ALIC_2011201', 'ANDA', 'ARMC', 'ARUB', 'BALA', 'BBOO',
'BDLE', 'BDVL', 'BEEC', 'BING', 'BKNL', 'BNDY', 'BRO1', 'BROC', 'BULA',
'BUR2', 'BURA', 'CEDU', 'CNBN', 'COEN', 'COOB', 'COOL', 'DARW_2003094',
'DODA', 'EDSV', 'ESPA_2016055', 'EXMT', 'FLND', 'FROY', 'GABO', 'GASC',
'HERN', 'HIL1_2006222', 'HNIS', 'HOB2_2004358', 'HUGH', 'HYDN', 'IHOE',
'JAB2_2016065', 'JERV', 'JLCK', 'KALG', 'KARR_2013254', 'KAT1', 'KELN',
'KGIS', 'KILK', 'KMAN', 'LAMB', 'LARR_2011062', 'LIAW', 'LKYA', 'LONA',
'LORD_2014185', 'LURA', 'MAIN', 'MEDO', 'MOBS_2004358', 'MRO1', 'MTCV',
'MTDN', 'MTEM', 'MTMA', 'MULG', 'NBRK', 'NCLF', 'NEBO', 'NHIL', 'NMTN',
'NNOR_2012276', 'NORF', 'NORS', 'NSTA', 'NTJN', 'PARK', 'PERT_2012297',
'PTHL', 'PTKL', 'PTLD_2012123', 'RAVN', 'RKLD', 'RNSP_2015349', 'RSBY',
'SA45', 'SPBY_2011326', 'STNY', 'STR1_2003311', 'SYDN', 'TBOB', 'THEV',
'TID1_2004348', 'TMBO', 'TOMP', 'TOOW', 'TOW2_2011266', 'TURO', 'UCLA',
'WAGN', 'WALH', 'WARA', 'WILU', 'WLAL', 'WMGA', 'WWLG', 'XMIS_2014177',
'YAR2_2013171', 'YEEL', 'YELO_2016082']
# Open output file
fout = open(apuFile + '.typeB', 'w')
# Read in the apu file
apuLines = []
i = 0
with open(apuFile) as f:
for line in f:
if line[:9] == 'Station ':
j = i + 2
apuLines.append(line.rstrip())
i += 1
# Print out the header info
for line in apuLines[:j]:
fout.write(line + '\n')
# Loop over the .apu file and read in the uncertainty info
stations = []
hpLat = {}
hpLon = {}
lat = {}
lon = {}
hPU = {}
vPU = {}
semiMajor = {}
semiMinor = {}
orient = {}
xLine = {}
xVar = {}
xyCoVar = {}
xzCoVar = {}
yLine = {}
yVar = {}
yzCoVar = {}
zLine = {}
zVar = {}
for line in apuLines[j:]:
cols = line.split()
numCols = len(cols)
if numCols == 2:
yLine[station] = line
yVar[station] = float(line[131:150].strip())
yzCoVar[station] = float(line[150:].strip())
elif numCols == 1:
zLine[station] = line
zVar[station] = float(line[150:].strip())
else:
station = line[:20].rstrip()
stations.append(station)
hpLat[station] = float(line[23:36])
hpLon[station] = float(line[38:51])
lat[station] = dms2dd(hpLat[station])
lon[station] = dms2dd(hpLon[station])
hPU[station] = float(line[51:62].strip())
vPU[station] = float(line[62:73].strip())
semiMajor[station] = float(line[73:86].strip())
semiMinor[station] = float(line[86:99].strip())
orient[station] = float(line[99:112].strip())
xLine[station] = line[112:]
xVar[station] = float(line[112:131].strip())
xyCoVar[station] = float(line[131:150].strip())
xzCoVar[station] = float(line[150:].strip())
# Create the full Cartesian VCV from the upper triangular
vcv_cart = {}
for stat in stations:
vcv_cart[stat] = matrix([[xVar[stat], xyCoVar[stat], xzCoVar[stat]],
[xyCoVar[stat], yVar[stat], yzCoVar[stat]],
[xzCoVar[stat], yzCoVar[stat], zVar[stat]]
])
# Loop over all the stations
for stat in stations:
# Transform the XYZ VCV to ENU
vcv_local = vcv_cart2local(vcv_cart[stat], lat[stat], lon[stat])
# Add the Type B uncertainty
if stat in rvsStations:
vcv_local[0, 0] += rvsE**2
vcv_local[1, 1] += rvsN**2
vcv_local[2, 2] += rvsU**2
else:
vcv_local[0, 0] += nonRvsE**2
vcv_local[1, 1] += nonRvsN**2
vcv_local[2, 2] += nonRvsU**2
# Calculate the semi-major axis, semi-minor axis and orientation, and
# convert the orientation from deciaml degrees to HP notation
a, b, orientation = error_ellipse(vcv_local)
orientation = dd2dms(orientation)
# Calculate the PUs
hz_pu = circ_hz_pu(a, b)
vt_pu = 1.96 * sqrt(vcv_local[2, 2])
# Output the uncertainties
line = '{:20}{:>16.9f}{:>15.9f}{:11.4f}{:11.4f}{:13.4f}{:13.4f}{:13.4f}'. \
format(stat, hpLat[stat], hpLon[stat], hz_pu, vt_pu, a, b,
orientation)
line += xLine[stat]
fout.write(line + '\n')
fout.write(yLine[stat] + '\n')
fout.write(zLine[stat] + '\n')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018 ZhicongYan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import os
import sys
sys.path.append('./')
sys.path.append('../')
from cfgs.networkconfig import get_config
import tensorflow as tf
from tensorflow import layers as tl
import tensorflow.contrib.layers as tcl
import numpy as np
from tensorflow.python import pywrap_tensorflow
from netutils.weightsinit import get_weightsinit
from netutils.activation import get_activation
from netutils.normalization import get_normalization
class BaseNetwork(object):
def __init__(self, config, is_training):
assert('name' in config)
self.name = config['name']
self.is_training = is_training
self.moving_variables_collection = 'BATCH_NORM_MOVING_VARS'
self.using_tcl_library = config.get('use tcl library', False)
self.norm_params = {
'is_training' : self.is_training,
'moving_vars_collection' : self.moving_variables_collection
}
self.config = config
# when first applying the network to input tensor, the reuse is false
self.reuse = False
self.end_points = {}
act_fn = self.config.get('activation', 'relu')
output_act_fn = self.config.get('output_activation', 'none')
has_bias = self.config.get('has bias', True)
conv_has_bias = self.config.get('conv has bias', has_bias)
fc_has_bias = self.config.get('fc has bias', has_bias)
out_has_bias = self.config.get('out has bias', has_bias)
norm_fn = self.config.get('normalization', 'batch_norm')
norm_params = self.norm_params.copy()
norm_params.update(self.config.get('normalization params', {}))
winit_fn = self.config.get('weightsinit', 'xavier')
binit_fn = self.config.get('biasesinit', 'zeros')
padding = self.config.get('padding', 'SAME')
self.conv_args = {
'norm_fn':norm_fn,
'norm_params':norm_params,
'act_fn':act_fn,
'winit_fn':winit_fn,
'binit_fn':binit_fn,
'padding':padding,
'has_bias':conv_has_bias,
}
self.fc_args = {
'norm_fn':norm_fn,
'norm_params':norm_params,
'act_fn':act_fn,
'winit_fn':winit_fn,
'binit_fn':binit_fn,
'has_bias':fc_has_bias,
}
self.deconv_args = {
'norm_fn':norm_fn,
'norm_params':norm_params,
'act_fn':act_fn,
'winit_fn':winit_fn,
'binit_fn':binit_fn,
'padding':padding,
'has_bias' : conv_has_bias,
}
self.out_conv_args = {
'act_fn':output_act_fn,
'winit_fn':winit_fn,
'binit_fn':binit_fn,
'padding':padding,
'has_bias' : out_has_bias,
}
self.out_fc_args = {
'act_fn':output_act_fn,
'winit_fn':winit_fn,
'binit_fn':binit_fn,
'has_bias': out_has_bias,
}
def uniform_initializer(self, stdev):
return tf.random_uniform_initializer(-stdev*np.sqrt(3), stdev*np.sqrt(3))
def conv2d(self, name, x, nb_filters, ksize, stride=1, *,
norm_fn='none', norm_params=None, act_fn='none', winit_fn='xavier', binit_fn='zeros', padding='SAME', has_bias=True,
disp=True, collect_end_points=True):
if callable(act_fn):
act_fn_str = 'func'
act_fn = act_fn
else:
act_fn_str = self.config.get(name + ' activation', act_fn)
act_fn = get_activation(act_fn_str)
if callable(norm_fn):
norm_fn_str = 'func'
norm_fn = norm_fn
else:
norm_fn_str = self.config.get(name + ' normalization', norm_fn)
norm_fn = get_normalization(norm_fn_str)
winit_fn_str = self.config.get(name + ' weightsinit', winit_fn)
if 'special' in winit_fn_str:
split = winit_fn_str.split()
winit_name = split[0]
if winit_name == 'he_uniform':
input_nb_filters = int(x.get_shape()[-1])
fan_in = input_nb_filters * (ksize**2)
fan_out = nb_filters * (ksize**2) / (stride**2)
filters_stdev = np.sqrt(4.0/(fan_in + fan_out))
winit_fn = self.uniform_initializer(filters_stdev)
else:
raise Exception('Error weights initializer function name : ' + winit_fn_str)
else:
winit_fn = get_weightsinit(winit_fn_str)
binit_fn_str = self.config.get(name + ' biasesinit', binit_fn)
binit_fn = get_weightsinit(binit_fn_str)
_padding = self.config.get(name + ' padding', padding)
if self.using_tcl_library:
x = tcl.conv2d(x, nb_filters, ksize, stride=stride,
activation_fn=act_fn,
normalizer_fn=norm_fn,
normalizer_params=norm_params,
weights_initializer=winit_fn,
padding=_padding,
scope=name)
else:
x = tl.conv2d(x, nb_filters, ksize, strides=stride,
padding=_padding,
use_bias=has_bias,
kernel_initializer=winit_fn,
bias_initializer=binit_fn,
trainable=True,
name=name)
with tf.variable_scope(name):
if norm_fn is not None:
norm_params = norm_params or {}
x = norm_fn(x, **norm_params)
if act_fn is not None:
x = act_fn(x)
if disp:
print('\t\tConv2D(' + str(name) + ') --> ', x.get_shape(), ' ', (act_fn_str, norm_fn_str, winit_fn_str, _padding))
if collect_end_points:
self.end_points[name] = x
return x
def deconv2d(self, name, x, nb_filters, ksize, stride, *,
norm_fn='none', norm_params=None, act_fn='relu', winit_fn='xavier', binit_fn='zeros', padding='SAME', has_bias=True,
disp=True, collect_end_points=True):
if callable(act_fn):
act_fn_str = 'func'
act_fn = act_fn
else:
act_fn_str = self.config.get(name + ' activation', act_fn)
act_fn = get_activation(act_fn_str)
if callable(norm_fn):
norm_fn_str = 'func'
norm_fn = norm_fn
else:
norm_fn_str = self.config.get(name + ' normalization', norm_fn)
norm_fn = get_normalization(norm_fn_str)
winit_fn_str = self.config.get(name + ' weightsinit', winit_fn)
if 'special' in winit_fn_str:
split = winit_fn_str.split()
winit_name = split[0]
if winit_name == 'he_uniform':
input_nb_filters = int(x.get_shape()[-1])
fan_in = input_nb_filters * (ksize**2) / (stride**2)
fan_out = nb_filters * (ksize**2)
filters_stdev = np.sqrt(4.0/(fan_in + fan_out))
winit_fn = self.uniform_initializer(filters_stdev)
else:
raise Exception('Error weights initializer function name : ' + winit_fn_str)
else:
winit_fn = get_weightsinit(winit_fn_str)
binit_fn_str = self.config.get(name + ' biasesinit', binit_fn)
binit_fn = get_weightsinit(binit_fn_str)
_padding = self.config.get(name + ' padding', padding)
if self.using_tcl_library:
x = tcl.conv2d_transpose(x, nb_filters, ksize, stride=stride,
use_bias=True,
activation_fn=act_fn,
normalizer_fn=norm_fn,
normalizer_params=norm_params,
weights_initializer=winit_fn,
padding=_padding,
scope=name)
else:
x = tl.conv2d_transpose(x, nb_filters, ksize, strides=stride,
padding=_padding,
use_bias=has_bias,
kernel_initializer=winit_fn,
bias_initializer=binit_fn,
trainable=True, name=name)
with tf.variable_scope(name):
if norm_fn is not None:
norm_params = norm_params or {}
x = norm_fn(x, **norm_params)
if act_fn is not None:
x = act_fn(x)
if disp:
print('\t\tDeonv2D(' + str(name) + ') --> ', x.get_shape(), ' ', (act_fn_str, norm_fn_str, winit_fn_str, _padding))
if collect_end_points:
self.end_points[name] = x
return x
def fc(self, name, x, nb_nodes, *,
norm_fn='none', norm_params=None, act_fn='none', winit_fn='xavier', binit_fn='zeros', has_bias=True,
disp=True, collect_end_points=True):
if callable(act_fn):
act_fn_str = 'func'
act_fn = act_fn
else:
act_fn_str = self.config.get(name + ' activation', act_fn)
act_fn = get_activation(act_fn_str)
if callable(norm_fn):
norm_fn_str = 'func'
norm_fn = norm_fn
else:
norm_fn_str = self.config.get(name + ' normalization', norm_fn)
norm_fn = get_normalization(norm_fn_str)
winit_fn_str = self.config.get(name + ' weightsinit', winit_fn)
if 'special' in winit_fn_str:
split = winit_fn_str.split()
winit_name = split[0]
if winit_name == 'glorot_uniform':
input_nb_nodes = int(x.get_shape()[-1])
filters_stdev = np.sqrt(2.0/(input_nb_nodes + nb_nodes))
winit_fn = self.uniform_initializer(filters_stdev)
else:
raise Exception('Error weights initializer function name : ' + winit_fn_str)
else:
winit_fn = get_weightsinit(winit_fn_str)
binit_fn_str = self.config.get(name + ' biasesinit', binit_fn)
binit_fn = get_weightsinit(binit_fn_str)
if self.using_tcl_library:
x = tcl.fully_connected(x, nb_nodes,
activation_fn=act_fn, normalizer_fn=norm_fn, normalizer_params=norm_params,
weights_initializer=winit_fn, scope=name)
else:
x = tl.dense(x, nb_nodes, use_bias=has_bias, kernel_initializer=winit_fn,
bias_initializer=binit_fn,
trainable=True, name=name)
with tf.variable_scope(name):
if norm_fn is not None:
norm_params = norm_params or {}
x = norm_fn(x, **norm_params)
if act_fn is not None:
x = act_fn(x)
if disp:
print('\t\tFC(' + str(name) + ') --> ', x.get_shape(), ' ', (act_fn_str, norm_fn_str, winit_fn_str))
if collect_end_points:
self.end_points[name] = x
return x
def concat(self, name, x_list, disp=True, collect_end_points=True):
x = tf.concat(x_list, axis=3)
if disp:
print('\t\tConcat(' + str(name) + ') --> ', x.get_shape())
if collect_end_points:
self.end_points[name] = x
return x
def maxpool2d(self, name, x, size, stride, padding='SAME', disp=True, collect_end_points=True):
_padding = self.config.get(name + ' padding', padding)
x = tcl.max_pool2d(x, size, stride=stride, padding=_padding, scope=name)
if disp:
print('\t\tMaxPool(' + str(name) + ') --> ', x.get_shape())
if collect_end_points:
self.end_points[name] = x
return x
def upsample2d(self, name, x, size):
# tf.resize_images
pass
def activation(self, x, act_fn='relu'):
if not callable(act_fn):
act_fn = get_activation(act_fn)
return act_fn(x)
def zero_padding2d(self, x, padding):
if isinstance(padding, int):
padding = ((padding, padding), (padding, padding))
elif isinstance(padding, list) or isinstance(padding, tuple) and isinstance(padding[0], int) and isinstance(padding[1], int):
padding = ((padding[0], padding[0]), (padding[1], padding[1]))
else:
raise ValueError('BaseNetwork : padding error')
return tf.spatial_2d_padding(x, padding=padding, data_format='channels_last')
@property
def vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
@property
def trainable_vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
@property
def conv_vars(self):
return [var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name) if self.name+'/conv' in var.name]
@property
def top_vars(self):
return [var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name) if self.name+'/fc' in var.name]
@property
def store_vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name) + tf.get_collection(self.moving_variables_collection, scope=self.name)
@property
def moving_vars(self):
return tf.get_collection(self.moving_variables_collection, scope=self.name)
@property
def all_vars(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
@property
def histogram_summary_list(self):
return [tf.summary.histogram(var.name, var) for var in self.store_vars]
def find_pretrained_weights_path(self, weights_filename, throw_not_found_error=False):
model_path = os.path.join('C:\\Models', weights_filename)
if not os.path.exists(model_path):
model_path = os.path.join('E:\\Models', weights_filename)
if not os.path.exists(model_path):
model_path = os.path.join('F:\\Models', weights_filename)
if not os.path.exists(model_path):
model_path = os.path.join('/mnt/data01/models/', weights_filename)
if not os.path.exists(model_path):
model_path = os.path.join('/mnt/data02/models/', weights_filename)
if not os.path.exists(model_path):
model_path = os.path.join('/mnt/data03/models/', weights_filename)
if not os.path.exists(model_path):
model_path = os.path.join('/mnt/data04/models/', weights_filename)
if not os.path.exists(model_path):
if throw_not_found_error:
raise ValueError('Base Network : the pretrained weights file ' + weights_filename + ' is not found')
else:
model_path = None
return model_path
def load_pretrained_weights(self, sess):
print('base network load pretrained weights')
return False
def load_pretrained_model_weights(self, sess, cfg, network_name, only_bottom=True):
config_file = get_config(cfg)
asset_filepath = config_file['assets dir']
ckpt_path = os.path.join(asset_filepath, config_file["trainer params"].get("checkpoint dir", "checkpoint"))
ckpt_name = ''
with open(os.path.join(ckpt_path, 'checkpoint'), 'r') as infile:
for line in infile:
if line.startswith('model_checkpoint_path'):
ckpt_name = line[len("model_checkpoint_path: \""):-2]
checkpoint_path = os.path.join(ckpt_path, ckpt_name)
print("Load checkpoint : ", checkpoint_path)
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
assign_list = []
var_list = self.all_vars
var_dict = {var.name.split(':')[0] : var for var in var_list}
for key in var_to_shape_map:
if key.startswith(network_name):
if only_bottom and 'fc' in key:
continue
var_name = self.name + '/' + key[len(network_name)+1:]
assign_list.append(tf.assign(var_dict[var_name], reader.get_tensor(key)))
assign_op = tf.group(assign_list)
sess.run(assign_op)
return True
|
nilq/baby-python
|
python
|
from ctypes import *
import threading
import json
import os
tls_var = threading.local()
import csv
csv.field_size_limit(500000)
from G2Exception import TranslateG2ModuleException, G2ModuleNotInitialized, G2ModuleGenericException
def resize_return_buffer(buf_, size_):
""" callback function that resizs return buffer when it is too small
Args:
size_: size the return buffer needs to be
"""
try:
if (sizeof(tls_var.buf) < size_) :
tls_var.buf = create_string_buffer(size_)
except AttributeError:
tls_var.buf = create_string_buffer(size_)
return addressof(tls_var.buf)
class G2Module(object):
"""G2 module access library
Attributes:
_lib_handle: A boolean indicating if we like SPAM or not.
_resize_func_def: resize function definiton
_resize_func: resize function pointer
_module_name: CME module name
_ini_file_name: name and location of .ini file
"""
# flags for exporting entity data
G2_EXPORT_INCLUDE_ALL_ENTITIES = ( 1 << 0 )
G2_EXPORT_CSV_INCLUDE_FULL_DETAILS = ( 1 << 1 )
G2_EXPORT_INCLUDE_RESOLVED = ( 1 << 2 )
G2_EXPORT_INCLUDE_POSSIBLY_SAME = ( 1 << 3 )
G2_EXPORT_INCLUDE_POSSIBLY_RELATED = ( 1 << 4 )
G2_EXPORT_INCLUDE_NAME_ONLY = ( 1 << 5 )
G2_EXPORT_INCLUDE_DISCLOSED = ( 1 << 6 )
# flags for outputting entity feature data
G2_ENTITY_INCLUDE_ALL_FEATURES = ( 1 << 7 )
G2_ENTITY_INCLUDE_REPRESENTATIVE_FEATURES = ( 1 << 8 )
G2_ENTITY_INCLUDE_SINGLE_FEATURES = ( 1 << 9 )
G2_ENTITY_INCLUDE_NO_FEATURES = ( 1 << 10 )
# flags for finding entity path data
G2_FIND_PATH_PREFER_EXCLUDE = ( 1 << 11 )
# flags for outputting entity relation data
G2_ENTITY_INCLUDE_ALL_RELATIONS = ( 1 << 12 )
G2_ENTITY_INCLUDE_POSSIBLY_SAME_RELATIONS = ( 1 << 13 )
G2_ENTITY_INCLUDE_POSSIBLY_RELATED_RELATIONS = ( 1 << 14 )
G2_ENTITY_INCLUDE_NAME_ONLY_RELATIONS = ( 1 << 15 )
G2_ENTITY_INCLUDE_DISCLOSED_RELATIONS = ( 1 << 16 )
G2_ENTITY_INCLUDE_NO_RELATIONS = ( 1 << 17 )
# flag for getting a minimal entity
G2_ENTITY_MINIMAL_FORMAT = ( 1 << 18 )
# flag for excluding feature scores from search results
G2_SEARCH_NO_FEATURE_SCORES = ( 1 << 19 )
# recommended settings
G2_EXPORT_DEFAULT_FLAGS = G2_EXPORT_INCLUDE_ALL_ENTITIES
G2_ENTITY_DEFAULT_FLAGS = G2_ENTITY_INCLUDE_REPRESENTATIVE_FEATURES | G2_ENTITY_INCLUDE_ALL_RELATIONS
G2_FIND_PATH_DEFAULT_FLAGS = G2_ENTITY_INCLUDE_REPRESENTATIVE_FEATURES | G2_ENTITY_INCLUDE_ALL_RELATIONS
G2_SEARCH_BY_ATTRIBUTES_DEFAULT_FLAGS = G2_ENTITY_INCLUDE_REPRESENTATIVE_FEATURES
G2_SEARCH_BY_ATTRIBUTES_MINIMAL_STRONG = G2_ENTITY_MINIMAL_FORMAT | G2_SEARCH_NO_FEATURE_SCORES | G2_ENTITY_INCLUDE_NO_RELATIONS | G2_EXPORT_INCLUDE_RESOLVED | G2_EXPORT_INCLUDE_POSSIBLY_SAME
G2_SEARCH_BY_ATTRIBUTES_MINIMAL_ALL = G2_ENTITY_MINIMAL_FORMAT | G2_SEARCH_NO_FEATURE_SCORES | G2_ENTITY_INCLUDE_NO_RELATIONS
# backwards compatability flags
G2_EXPORT_DEFAULT_REPORT_FLAGS = G2_EXPORT_INCLUDE_ALL_ENTITIES
def init(self):
""" Initializes the G2 engine
This should only be called once per process. Currently re-initializing the G2 engin
after a destroy requires unloaded the class loader used to load this class.
Returns:
int: 0 on success
"""
if self._debug:
print("Initializing G2 module")
resize_return_buffer(None, 65535)
p_module_name = self.prepareStringArgument(self._module_name)
p_ini_file_name = self.prepareStringArgument(self._ini_file_name)
self._lib_handle.G2_init.argtypes = [c_char_p, c_char_p, c_int]
retval = self._lib_handle.G2_init(p_module_name,
p_ini_file_name,
self._debug)
if self._debug:
print("Initialization Status: " + str(retval))
if retval == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif retval == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
elif retval < 0:
raise G2ModuleGenericException("Failed to initialize G2 Module")
return retval
def __init__(self, module_name_, ini_file_name_, debug_=False):
# type: (str, str, bool) -> None
""" G2Module class initialization
Args:
moduleName: A short name given to this instance of the engine
iniFilename: A fully qualified path to the G2 engine INI file (often /opt/senzing/g2/python/G2Module.ini)
verboseLogging: Enable diagnostic logging which will print a massive amount of information to stdout
"""
try:
if os.name == 'nt':
self._lib_handle = cdll.LoadLibrary("G2.dll")
else:
self._lib_handle = cdll.LoadLibrary("libG2.so")
except OSError as ex:
print("ERROR: Unable to load G2. Did you remember to setup your environment by sourcing the setupEnv file?")
print("ERROR: For more information see https://senzing.zendesk.com/hc/en-us/articles/115002408867-Introduction-G2-Quickstart")
print("ERROR: If you are running Ubuntu or Debian please also review the ssl and crypto information at https://senzing.zendesk.com/hc/en-us/articles/115010259947-System-Requirements")
raise G2ModuleGenericException("Failed to load the G2 library")
self._resize_func_def = CFUNCTYPE(c_char_p, c_char_p, c_size_t)
self._resize_func = self._resize_func_def(resize_return_buffer)
self._module_name = module_name_
self._ini_file_name = ini_file_name_
self._debug = debug_
def primeEngine(self):
""" Primes the G2 engine
Return:
None
"""
resize_return_buffer(None, 65535)
self._lib_handle.G2_primeEngine.restype = c_int
self._lib_handle.G2_primeEngine.argtypes = []
ret_code = self._lib_handle.G2_primeEngine()
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
def clearLastException(self):
""" Clears the last exception
Return:
None
"""
resize_return_buffer(None, 65535)
self._lib_handle.G2_clearLastException.restype = None
self._lib_handle.G2_clearLastException.argtypes = []
self._lib_handle.G2_clearLastException()
def getLastException(self):
""" Gets the last exception
"""
resize_return_buffer(None, 65535)
self._lib_handle.G2_getLastException.restype = c_int
self._lib_handle.G2_getLastException.argtypes = [c_char_p, c_size_t]
self._lib_handle.G2_getLastException(tls_var.buf,sizeof(tls_var.buf))
resultString = tls_var.buf.value.decode('utf-8')
return resultString
def getLastExceptionCode(self):
""" Gets the last exception code
"""
resize_return_buffer(None, 65535)
self._lib_handle.G2_getLastExceptionCode.restype = c_int
self._lib_handle.G2_getLastExceptionCode.argtypes = []
exception_code = self._lib_handle.G2_getLastExceptionCode()
return exception_code
def process(self, input_umf_):
# type: (str) -> None
""" Generic process function without return
This method will send a record for processing in g2.
Args:
record: An input record to be processed. Contains the data and control info.
Return:
None
"""
if type(input_umf_) == str:
input_umf_string = input_umf_.encode('utf-8')
elif type(input_umf_) == bytearray:
input_umf_string = str(input_umf_)
else:
input_umf_string = input_umf_
resize_return_buffer(None, 65535)
self._lib_handle.G2_process.argtypes = [c_char_p]
self._lib_handle.G2_process.restype = c_int
ret_code = self._lib_handle.G2_process(input_umf_string)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
elif ret_code < 0:
raise G2ModuleGenericException("ERROR_CODE: " + str(ret_code))
def processWithResponse(self, input_umf_):
""" Generic process function that returns results
This method will send a record for processing in g2. It is a synchronous
call, i.e. it will wait until g2 actually processes the record, and then
optionally return any response message.
Args:
record: An input record to be processed. Contains the data and control info.
response: If there is a response to the message it will be returned here.
Note there are performance benefits of calling the process method
that doesn't need a response message.
Return:
str: The response in G2 JSON format.
"""
# type: (str) -> str
""" resolves an entity synchronously
Args:
input_umf_: G2 style JSON
"""
if type(input_umf_) == str:
input_umf_string = input_umf_.encode('utf-8')
elif type(input_umf_) == bytearray:
input_umf_string = str(input_umf_)
else:
input_umf_string = input_umf_
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_processWithResponseResize.argtypes = [c_char_p, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_processWithResponseResize(input_umf_string,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def checkRecord(self, input_umf_, recordQueryList):
# type: (str,str,str) -> str
""" Scores the input record against the specified one
Args:
input_umf_: A JSON document containing the attribute information
for the observation.
dataSourceCode: The data source for the observation.
recordID: The ID for the record
Return:
str: The response in G2 JSON format.
"""
if type(input_umf_) == str:
input_umf_string = input_umf_.encode('utf-8')
elif type(input_umf_) == bytearray:
input_umf_string = str(input_umf_)
else:
input_umf_string = input_umf_
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_checkRecord.argtypes = [c_char_p, c_char_p, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_checkRecord(input_umf_string,
recordQueryList,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def getExportFlagsForMaxMatchLevel(self, max_match_level, includeSingletons, includeExtraCols):
""" Converts a maximum match level into an appropriate export flag bitmask value.
Args:
max_match_level: The maximum match level to use in an export.
includeSingletons: Also include singletons.
includeExtraCols: Also include extra export output.
Return:
int: A bitmask flag representing the match-levels to include.
"""
g2ExportFlags = 0
if max_match_level == 1:
# Include resolved entities
g2ExportFlags = self.G2_EXPORT_INCLUDE_RESOLVED
elif max_match_level == 2:
# Include possibly same relationships in addition to resolved entities
g2ExportFlags = self.G2_EXPORT_INCLUDE_RESOLVED | self.G2_EXPORT_INCLUDE_POSSIBLY_SAME
elif max_match_level == 3:
# Include possibly related relationships in addition to resolved entities & possibly same
g2ExportFlags = self.G2_EXPORT_INCLUDE_RESOLVED | self.G2_EXPORT_INCLUDE_POSSIBLY_SAME | self.G2_EXPORT_INCLUDE_POSSIBLY_RELATED
elif max_match_level == 4:
# Include name-only relationships in addition to resolved entities & possibly same & possibly related
g2ExportFlags = self.G2_EXPORT_INCLUDE_RESOLVED | self.G2_EXPORT_INCLUDE_POSSIBLY_SAME | self.G2_EXPORT_INCLUDE_POSSIBLY_RELATED | self.G2_EXPORT_INCLUDE_NAME_ONLY
elif max_match_level == 5:
# Include disclosed relationships in addition to resolved entities & possibly same & possibly related & name-only
g2ExportFlags = self.G2_EXPORT_INCLUDE_RESOLVED | self.G2_EXPORT_INCLUDE_POSSIBLY_SAME | self.G2_EXPORT_INCLUDE_POSSIBLY_RELATED | self.G2_EXPORT_INCLUDE_NAME_ONLY | self.G2_EXPORT_INCLUDE_DISCLOSED
else:
g2ExportFlags = self.G2_EXPORT_INCLUDE_ALL_ENTITIES
#Add 1 to flags if we are including singletons
if includeSingletons:
g2ExportFlags = g2ExportFlags | self.G2_EXPORT_INCLUDE_ALL_ENTITIES
#Add 2 to flags if we are including extra header columns
if includeExtraCols:
g2ExportFlags = g2ExportFlags | self.G2_EXPORT_CSV_INCLUDE_FULL_DETAILS
return g2ExportFlags
def getExportHandleFromFlags(self, exportType, g2ExportFlags, colNames=None):
if exportType == 'CSV':
if colNames and isinstance(colNames, list):
colNames = ",".join(colNames)
self._lib_handle.G2_exportCSVEntityReport_V2.restype = c_void_p
self._lib_handle.G2_exportCSVEntityReport_V2.argtypes = [c_char_p, c_int]
exportHandle = self._lib_handle.G2_exportCSVEntityReport_V2(colNames.encode(), g2ExportFlags)
else:
self._lib_handle.G2_exportCSVEntityReport.restype = c_void_p
self._lib_handle.G2_exportCSVEntityReport.argtypes = [c_int]
exportHandle = self._lib_handle.G2_exportCSVEntityReport(g2ExportFlags)
else:
self._lib_handle.G2_exportJSONEntityReport.restype = c_void_p
self._lib_handle.G2_exportJSONEntityReport.argtypes = [c_int]
exportHandle = self._lib_handle.G2_exportJSONEntityReport(g2ExportFlags)
return exportHandle
def getExportHandle(self, exportType, max_match_level):
# type: (str, int) -> c_void_p
""" Generate a CSV or JSON export
This is used to export entity data from known entities. This function
returns an export-handle that can be read from to get the export data
in the requested format. The export-handle should be read using the "G2_fetchNext"
function, and closed when work is complete. If CSV, the first output row returned
by the export-handle contains the CSV column headers as a string. Each
following row contains the exported entity data.
Args:
exportType: CSV or JSON
max_match_level: The match-level to specify what kind of entity resolves
and relations we want to see.
1 -- "resolved" relationships
2 -- "possibly same" relationships
3 -- "possibly related" relationships
4 -- "name only" relationships *** Internal only
5 -- "disclosed" relationships
Return:
c_void_p: handle for the export
"""
g2ExportFlags = self.getExportFlagsForMaxMatchLevel(max_match_level, True, True)
if exportType == 'CSV':
self._lib_handle.G2_exportCSVEntityReport.restype = c_void_p
exportHandle = self._lib_handle.G2_exportCSVEntityReport(g2ExportFlags)
else:
self._lib_handle.G2_exportJSONEntityReport.restype = c_void_p
exportHandle = self._lib_handle.G2_exportJSONEntityReport(g2ExportFlags)
return exportHandle
def fetchExportRecord(self, exportHandle):
# type: (c_void_p) -> str
""" Fetch a record from an export
Args:
exportHandle: handle from generated export
Returns:
str: Record fetched, empty if there is no more data
"""
resultString = ""
resize_return_buffer(None,65535)
self._lib_handle.G2_fetchNext.argtypes = [c_void_p, c_char_p, c_size_t]
rowData = self._lib_handle.G2_fetchNext(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
while rowData:
resultString += tls_var.buf.value.decode('utf-8')
if resultString[-1] == '\n':
resultString = resultString[0:-1]
break
else:
rowData = self._lib_handle.G2_fetchNext(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
return resultString
def fetchCsvExportRecord(self, exportHandle, csvHeaders = None):
# type: (c_void_p, str) -> str
""" Fetch a CSV record from an export
Args:
exportHandle: handle from generated export
csvHeaders: CSV header record
Returns:
dict: Record fetched using the csvHeaders as the keys.
None if no more data is available.
"""
resultString = self.fetchExportRecord(exportHandle)
if resultString:
csvRecord = next(csv.DictReader([resultString], fieldnames=csvHeaders))
else:
csvRecord = None
return csvRecord
def exportCSVEntityReport(self, max_match_level, g2ExportFlags, includeSingletons, includeExtraCols):
# type: (int, int) -> str
""" Generate a CSV Entity Report
This is used to export entity data from known entities. This function
returns an export-handle that can be read from to get the export data
in CSV format. The export-handle should be read using the "G2_fetchNext"
function, and closed when work is complete. Each output row contains the
exported entity data for a single resolved entity.
Args:
max_match_level: The match-level to specify what kind of entity resolves
and relations we want to see.
1 -- "resolved" relationships
2 -- "possibly same" relationships
3 -- "possibly related" relationships
4 -- "name only" relationships *** Internal only
5 -- "disclosed" relationships
g2ExportFlags: A bit mask specifying other control flags, such as
"G2_EXPORT_INCLUDE_SINGLETONS". The default and recommended
value is "G2_EXPORT_DEFAULT_FLAGS".
includeSingletons: Also include singletons
includeExtraCols: Also include extra export output
Return:
c_void_p: handle for the export
"""
resultString = b""
fullG2ExportFlags_ = self.getExportFlagsForMaxMatchLevel(max_match_level, includeSingletons, includeExtraCols)
fullG2ExportFlags_ = fullG2ExportFlags_ | g2ExportFlags
self._lib_handle.G2_exportCSVEntityReport.restype = c_void_p
exportHandle = self._lib_handle.G2_exportCSVEntityReport(fullG2ExportFlags_)
rowCount = 0
resize_return_buffer(None,65535)
self._lib_handle.G2_fetchNext.argtypes = [c_void_p, c_char_p, c_size_t]
rowData = self._lib_handle.G2_fetchNext(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
while rowData:
rowCount += 1
stringData = tls_var.buf
resultString += stringData.value
rowData = self._lib_handle.G2_fetchNext(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
self._lib_handle.G2_closeExport(c_void_p(exportHandle))
return (resultString.decode('utf-8'), rowCount)
def exportCSVEntityReportV2(self, csvColumnList, max_match_level, g2ExportFlags, includeSingletons, includeExtraCols):
# type: (int, int) -> str
""" Generate a CSV Entity Report
This is used to export entity data from known entities
"""
resultString = b""
fullG2ExportFlags_ = self.getExportFlagsForMaxMatchLevel(max_match_level, includeSingletons, includeExtraCols)
fullG2ExportFlags_ = fullG2ExportFlags_ | g2ExportFlags
_csvColumnList = self.prepareStringArgument(csvColumnList)
self._lib_handle.G2_exportCSVEntityReport_V2.restype = c_void_p
self._lib_handle.G2_exportCSVEntityReport_V2.argtypes = [c_char_p, c_int]
exportHandle = self._lib_handle.G2_exportCSVEntityReport_V2(_csvColumnList,fullG2ExportFlags_)
rowCount = 0
resize_return_buffer(None,65535)
self._lib_handle.G2_fetchNext.argtypes = [c_void_p, c_char_p, c_size_t]
rowData = self._lib_handle.G2_fetchNext(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
while rowData:
rowCount += 1
stringData = tls_var.buf
resultString += stringData.value
rowData = self._lib_handle.G2_fetchNext(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
self._lib_handle.G2_closeExport(c_void_p(exportHandle))
return (resultString.decode('utf-8'), rowCount)
def exportJSONEntityReport(self, max_match_level, g2ExportFlags, includeSingletons, includeExtraCols):
# type: (int, int) -> str
""" Generate a JSON Entity Report
This is used to export entity data from known entities. This function
returns an export-handle that can be read from to get the export data
in JSON format. The export-handle should be read using the "G2_fetchNext"
function, and closed when work is complete. Each output row contains the
exported entity data for a single resolved entity.
Args:
max_match_level: The match-level to specify what kind of entity resolves
and relations we want to see.
1 -- "resolved" relationships
2 -- "possibly same" relationships
3 -- "possibly related" relationships
4 -- "name only" relationships
5 -- "disclosed" relationships
g2ExportFlags: A bit mask specifying other control flags, such as
"G2_EXPORT_INCLUDE_SINGLETONS". The default and recommended
value is "G2_EXPORT_DEFAULT_FLAGS".
includeSingletons: Also include singletons
includeExtraCols: Also include extra export output
Return:
c_void_p: handle for the export
"""
resultString = b""
fullG2ExportFlags_ = self.getExportFlagsForMaxMatchLevel(max_match_level, includeSingletons, includeExtraCols)
fullG2ExportFlags_ = fullG2ExportFlags_ | g2ExportFlags
self._lib_handle.G2_exportJSONEntityReport.restype = c_void_p
exportHandle = self._lib_handle.G2_exportJSONEntityReport(fullG2ExportFlags_)
rowCount = 0
resize_return_buffer(None,65535)
self._lib_handle.G2_fetchNext.argtypes = [c_void_p, c_char_p, c_size_t]
rowData = self._lib_handle.G2_fetchNext(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
while rowData:
rowCount += 1
stringData = tls_var.buf
resultString += stringData.value
rowData = self._lib_handle.G2_fetchNext(c_void_p(exportHandle),tls_var.buf,sizeof(tls_var.buf))
self._lib_handle.G2_closeExport(c_void_p(exportHandle))
return (resultString.decode('utf-8'), rowCount)
def prepareStringArgument(self, stringToPrepare):
# type: (str) -> str
""" Internal processing function """
if stringToPrepare == None:
return None
#if string is unicode, transcode to utf-8 str
if type(stringToPrepare) == str:
return stringToPrepare.encode('utf-8')
#if input is bytearray, assumt utf-8 and convert to str
elif type(stringToPrepare) == bytearray:
return str(stringToPrepare)
#input is already a str
return stringToPrepare
def addRecord(self,dataSourceCode,recordId,jsonData,loadId=None):
# type: (str,str,str,str) -> int
""" Loads the JSON record
Args:
dataSourceCode: The data source for the observation.
recordID: The ID for the record
jsonData: A JSON document containing the attribute information
for the observation.
loadID: The observation load ID for the record, can be null and will default to dataSourceCode
Return:
int: 0 on success
"""
_dataSourceCode = self.prepareStringArgument(dataSourceCode)
_loadId = self.prepareStringArgument(loadId)
_recordId = self.prepareStringArgument(recordId)
_jsonData = self.prepareStringArgument(jsonData)
resize_return_buffer(None, 65535)
ret_code = self._lib_handle.G2_addRecord(_dataSourceCode,_recordId,_jsonData,_loadId)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
elif ret_code < 0:
raise G2ModuleGenericException("ERROR_CODE: " + str(ret_code))
return ret_code
def replaceRecord(self,dataSourceCode,recordId,jsonData,loadId=None):
# type: (str,str,str,str) -> int
""" Replace the JSON record, loads if doesn't exist
Args:
dataSourceCode: The data source for the observation.
recordID: The ID for the record
jsonData: A JSON document containing the attribute information
for the observation.
loadID: The load ID for the record, can be null and will default to dataSourceCode
Return:
int: 0 on success
"""
_dataSourceCode = self.prepareStringArgument(dataSourceCode)
_loadId = self.prepareStringArgument(loadId)
_recordId = self.prepareStringArgument(recordId)
_jsonData = self.prepareStringArgument(jsonData)
resize_return_buffer(None, 65535)
ret_code = self._lib_handle.G2_replaceRecord(_dataSourceCode,_recordId,_jsonData,_loadId)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
elif ret_code < 0:
raise G2ModuleGenericException("ERROR_CODE: " + str(ret_code))
return ret_code
def deleteRecord(self,dataSourceCode,recordId,loadId=None):
# type: (str,str,str) -> int
""" Delete the record
Args:
dataSourceCode: The data source for the observation.
recordID: The ID for the record
loadID: The load ID for the record, can be null and will default to dataSourceCode
Return:
int: 0 on success
"""
_dataSourceCode = self.prepareStringArgument(dataSourceCode)
_loadId = self.prepareStringArgument(loadId)
_recordId = self.prepareStringArgument(recordId)
resize_return_buffer(None, 65535)
ret_code = self._lib_handle.G2_deleteRecord(_dataSourceCode,_recordId,_loadId)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
elif ret_code < 0:
raise G2ModuleGenericException("ERROR_CODE: " + str(ret_code))
return ret_code
def reevaluateRecord(self,dataSourceCode,recordId,flags):
# type: (str,str,int) -> int
""" Reevaluate the JSON record
Args:
dataSourceCode: The data source for the observation.
recordID: The ID for the record
flags: Bitwise control flags
Return:
int: 0 on success
"""
_dataSourceCode = self.prepareStringArgument(dataSourceCode)
_recordId = self.prepareStringArgument(recordId)
resize_return_buffer(None, 65535)
ret_code = self._lib_handle.G2_reevaluateRecord(_dataSourceCode,_recordId,flags)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
elif ret_code < 0:
raise G2ModuleGenericException("ERROR_CODE: " + str(ret_code))
return ret_code
def reevaluateEntity(self,entityID,flags):
# type: (int,int) -> int
""" Reevaluate the JSON record
Args:
entityID: The entity ID to reevaluate.
flags: Bitwise control flags
Return:
int: 0 on success
"""
resize_return_buffer(None, 65535)
ret_code = self._lib_handle.G2_reevaluateEntity(entityID,flags)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
elif ret_code < 0:
raise G2ModuleGenericException("ERROR_CODE: " + str(ret_code))
return ret_code
def searchByAttributes(self,jsonData):
# type: (str) -> str
""" Find records matching the provided attributes
Args:
jsonData: A JSON document containing the attribute information to search.
Return:
str: JSON document with results
"""
_jsonData = self.prepareStringArgument(jsonData)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_searchByAttributes.argtypes = [c_char_p, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_searchByAttributes(_jsonData,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return tls_var.buf.value.decode('utf-8')
def searchByAttributesV2(self,jsonData,flags):
# type: (str) -> str
""" Find records matching the provided attributes
Args:
jsonData: A JSON document containing the attribute information to search.
flags: control flags.
Return:
str: JSON document with results
"""
_jsonData = self.prepareStringArgument(jsonData)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_searchByAttributes_V2.restype = c_int
self._lib_handle.G2_searchByAttributes_V2.argtypes = [c_char_p, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_searchByAttributes_V2(_jsonData,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return tls_var.buf.value.decode('utf-8')
def findPathByEntityID(self,startEntityID,endEntityID,maxDegree):
# type: (int) -> str
""" Find a path between two entities in the system.
Args:
startEntityID: The entity ID you want to find the path from
endEntityID: The entity ID you want to find the path to
maxDegree: The maximum path length to search for
Return:
str: JSON document with results
"""
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findPathByEntityID.restype = c_int
self._lib_handle.G2_findPathByEntityID.argtypes = [c_longlong, c_longlong, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findPathByEntityID(startEntityID,endEntityID,maxDegree,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return tls_var.buf.value.decode('utf-8')
def findPathByEntityIDV2(self,startEntityID,endEntityID,maxDegree,flags):
# type: (int) -> str
""" Find a path between two entities in the system.
Args:
startEntityID: The entity ID you want to find the path from
endEntityID: The entity ID you want to find the path to
maxDegree: The maximum path length to search for
flags: control flags.
Return:
str: JSON document with results
"""
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findPathByEntityID_V2.restype = c_int
self._lib_handle.G2_findPathByEntityID_V2.argtypes = [c_longlong, c_longlong, c_int, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findPathByEntityID_V2(startEntityID,endEntityID,maxDegree,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return tls_var.buf.value.decode('utf-8')
def findNetworkByEntityID(self,entityList,maxDegree,buildOutDegree,maxEntities):
# type: (int) -> str
""" Find a network between entities in the system.
Args:
entityList: The entities to search for the network of
maxDegree: The maximum path length to search for between entities
buildOutDegree: The number of degrees to build out the surrounding network
maxEntities: The maximum number of entities to include in the result
Return:
str: JSON document with results
"""
_entityList = self.prepareStringArgument(entityList)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findNetworkByEntityID.restype = c_int
self._lib_handle.G2_findNetworkByEntityID.argtypes = [c_char_p, c_int, c_int, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findNetworkByEntityID(_entityList,maxDegree,buildOutDegree,maxEntities,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return tls_var.buf.value.decode('utf-8')
def findNetworkByEntityIDV2(self,entityList,maxDegree,buildOutDegree,maxEntities,flags):
# type: (int) -> str
""" Find a network between entities in the system.
Args:
entityList: The entities to search for the network of
maxDegree: The maximum path length to search for between entities
buildOutDegree: The number of degrees to build out the surrounding network
maxEntities: The maximum number of entities to include in the result
flags: control flags.
Return:
str: JSON document with results
"""
_entityList = self.prepareStringArgument(entityList)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findNetworkByEntityID_V2.restype = c_int
self._lib_handle.G2_findNetworkByEntityID_V2.argtypes = [c_char_p, c_int, c_int, c_int, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findNetworkByEntityID_V2(_entityList,maxDegree,buildOutDegree,maxEntities,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return tls_var.buf.value.decode('utf-8')
def findPathByRecordID(self,startDsrcCode,startRecordId,endDsrcCode,endRecordId,maxDegree):
# type: (str,str) -> str
""" Find a path between two records in the system.
Args:
startDataSourceCode: The data source for the record you want to find the path from
startRecordID: The ID for the record you want to find the path from
endDataSourceCode: The data source for the record you want to find the path to
endRecordID: The ID for the record you want to find the path to
maxDegree: The maximum path length to search for
Return:
str: JSON document with results
"""
_startDsrcCode = self.prepareStringArgument(startDsrcCode)
_startRecordId = self.prepareStringArgument(startRecordId)
_endDsrcCode = self.prepareStringArgument(endDsrcCode)
_endRecordId = self.prepareStringArgument(endRecordId)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findPathByRecordID.restype = c_int
self._lib_handle.G2_findPathByRecordID.argtypes = [c_char_p, c_char_p, c_char_p, c_char_p, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findPathByRecordID(_startDsrcCode,_startRecordId,_endDsrcCode,_endRecordId,maxDegree,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def findPathByRecordIDV2(self,startDsrcCode,startRecordId,endDsrcCode,endRecordId,maxDegree,flags):
# type: (str,str) -> str
""" Find a path between two records in the system.
Args:
startDataSourceCode: The data source for the record you want to find the path from
startRecordID: The ID for the record you want to find the path from
endDataSourceCode: The data source for the record you want to find the path to
endRecordID: The ID for the record you want to find the path to
maxDegree: The maximum path length to search for
flags: control flags.
Return:
str: JSON document with results
"""
_startDsrcCode = self.prepareStringArgument(startDsrcCode)
_startRecordId = self.prepareStringArgument(startRecordId)
_endDsrcCode = self.prepareStringArgument(endDsrcCode)
_endRecordId = self.prepareStringArgument(endRecordId)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findPathByRecordID_V2.restype = c_int
self._lib_handle.G2_findPathByRecordID_V2.argtypes = [c_char_p, c_char_p, c_char_p, c_char_p, c_int, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findPathByRecordID_V2(_startDsrcCode,_startRecordId,_endDsrcCode,_endRecordId,maxDegree,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def findNetworkByRecordID(self,recordList,maxDegree,buildOutDegree,maxEntities):
# type: (str,str) -> str
""" Find a network between entities in the system.
Args:
recordList: The records to search for the network of
maxDegree: The maximum path length to search for between entities
buildOutDegree: The number of degrees to build out the surrounding network
maxEntities: The maximum number of entities to include in the result
Return:
str: JSON document with results
"""
_recordList = self.prepareStringArgument(recordList)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findNetworkByRecordID.restype = c_int
self._lib_handle.G2_findNetworkByRecordID.argtypes = [c_char_p, c_int, c_int, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findNetworkByRecordID(_recordList,maxDegree,buildOutDegree,maxEntities,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def findNetworkByRecordIDV2(self,recordList,maxDegree,buildOutDegree,maxEntities,flags):
# type: (str,str) -> str
""" Find a network between entities in the system.
Args:
recordList: The records to search for the network of
maxDegree: The maximum path length to search for between entities
buildOutDegree: The number of degrees to build out the surrounding network
maxEntities: The maximum number of entities to include in the result
flags: control flags.
Return:
str: JSON document with results
"""
_recordList = self.prepareStringArgument(recordList)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findNetworkByRecordID_V2.restype = c_int
self._lib_handle.G2_findNetworkByRecordID_V2.argtypes = [c_char_p, c_int, c_int, c_int, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findNetworkByRecordID_V2(_recordList,maxDegree,buildOutDegree,maxEntities,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def findPathExcludingByEntityID(self,startEntityID,endEntityID,maxDegree,excludedEntities,flags):
# type: (int) -> str
""" Find a path between two entities in the system.
Args:
startEntityID: The entity ID you want to find the path from
endEntityID: The entity ID you want to find the path to
maxDegree: The maximum path length to search for
excludedEntities: JSON document containing entities to exclude
flags: control flags
Return:
str: JSON document with results
"""
_excludedEntities = self.prepareStringArgument(excludedEntities)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findPathExcludingByEntityID.restype = c_int
self._lib_handle.G2_findPathExcludingByEntityID.argtypes = [c_longlong, c_longlong, c_int, c_char_p, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findPathExcludingByEntityID(startEntityID,endEntityID,maxDegree,_excludedEntities,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return tls_var.buf.value.decode('utf-8')
def findPathIncludingSourceByEntityID(self,startEntityID,endEntityID,maxDegree,excludedEntities,requiredDsrcs,flags):
# type: (int) -> str
""" Find a path between two entities in the system.
Args:
startEntityID: The entity ID you want to find the path from
endEntityID: The entity ID you want to find the path to
maxDegree: The maximum path length to search for
excludedEntities: JSON document containing entities to exclude
requiredDsrcs: JSON document containing data sources to require
flags: control flags
Return:
str: JSON document with results
"""
_excludedEntities = self.prepareStringArgument(excludedEntities)
_requiredDsrcs = self.prepareStringArgument(requiredDsrcs)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findPathIncludingSourceByEntityID.restype = c_int
self._lib_handle.G2_findPathIncludingSourceByEntityID.argtypes = [c_longlong, c_longlong, c_int, c_char_p, c_char_p, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findPathIncludingSourceByEntityID(startEntityID,endEntityID,maxDegree,_excludedEntities,_requiredDsrcs,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return tls_var.buf.value.decode('utf-8')
def findPathExcludingByRecordID(self,startDsrcCode,startRecordId,endDsrcCode,endRecordId,maxDegree,excludedEntities,flags):
# type: (str,str) -> str
""" Find a path between two records in the system.
Args:
startDataSourceCode: The data source for the record you want to find the path from
startRecordID: The ID for the record you want to find the path from
endDataSourceCode: The data source for the record you want to find the path to
endRecordID: The ID for the record you want to find the path to
maxDegree: The maximum path length to search for
excludedEntities: JSON document containing entities to exclude
flags: control flags
Return:
str: JSON document with results
"""
_startDsrcCode = self.prepareStringArgument(startDsrcCode)
_startRecordId = self.prepareStringArgument(startRecordId)
_endDsrcCode = self.prepareStringArgument(endDsrcCode)
_endRecordId = self.prepareStringArgument(endRecordId)
_excludedEntities = self.prepareStringArgument(excludedEntities)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findPathExcludingByRecordID.restype = c_int
self._lib_handle.G2_findPathExcludingByRecordID.argtypes = [c_char_p, c_char_p, c_char_p, c_char_p, c_int, c_char_p, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findPathExcludingByRecordID(_startDsrcCode,_startRecordId,_endDsrcCode,_endRecordId,maxDegree,
_excludedEntities,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def findPathIncludingSourceByRecordID(self,startDsrcCode,startRecordId,endDsrcCode,endRecordId,maxDegree,excludedEntities,requiredDsrcs,flags):
# type: (str,str) -> str
""" Find a path between two records in the system.
Args:
startDataSourceCode: The data source for the record you want to find the path from
startRecordID: The ID for the record you want to find the path from
endDataSourceCode: The data source for the record you want to find the path to
endRecordID: The ID for the record you want to find the path to
maxDegree: The maximum path length to search for
excludedEntities: JSON document containing entities to exclude
requiredDsrcs: JSON document containing data sources to require
flags: control flags
Return:
str: JSON document with results
"""
_startDsrcCode = self.prepareStringArgument(startDsrcCode)
_startRecordId = self.prepareStringArgument(startRecordId)
_endDsrcCode = self.prepareStringArgument(endDsrcCode)
_endRecordId = self.prepareStringArgument(endRecordId)
_excludedEntities = self.prepareStringArgument(excludedEntities)
_requiredDsrcs = self.prepareStringArgument(requiredDsrcs)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_findPathIncludingSourceByRecordID.restype = c_int
self._lib_handle.G2_findPathIncludingSourceByRecordID.argtypes = [c_char_p, c_char_p, c_char_p, c_char_p, c_int, c_char_p, c_char_p, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_findPathIncludingSourceByRecordID(_startDsrcCode,_startRecordId,_endDsrcCode,_endRecordId,maxDegree,
_excludedEntities,_requiredDsrcs,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def getEntityByEntityID(self,entityID):
# type: (int) -> str
""" Find the entity with the given ID
Args:
entityID: The entity ID you want returned. Typically referred to as
ENTITY_ID in JSON results.
Return:
str: JSON document with results
"""
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_getEntityByEntityID.argtypes = [c_longlong, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_getEntityByEntityID(entityID,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return tls_var.buf.value.decode('utf-8')
def getEntityByEntityIDV2(self,entityID,flags):
# type: (int) -> str
""" Find the entity with the given ID
Args:
entityID: The entity ID you want returned. Typically referred to as
ENTITY_ID in JSON results.
flags: control flags.
Return:
str: JSON document with results
"""
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_getEntityByEntityID_V2.restype = c_int
self._lib_handle.G2_getEntityByEntityID_V2.argtypes = [c_longlong, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_getEntityByEntityID_V2(entityID,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return tls_var.buf.value.decode('utf-8')
def getEntityByRecordID(self,dsrcCode,recordId):
# type: (str,str) -> str
""" Get the entity containing the specified record
Args:
dataSourceCode: The data source for the observation.
recordID: The ID for the record
Return:
str: JSON document with results
"""
_dsrcCode = self.prepareStringArgument(dsrcCode)
_recordId = self.prepareStringArgument(recordId)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_getEntityByRecordID.argtypes = [c_char_p, c_char_p, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_getEntityByRecordID(_dsrcCode,_recordId,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def getEntityByRecordIDV2(self,dsrcCode,recordId,flags):
# type: (str,str) -> str
""" Get the entity containing the specified record
Args:
dataSourceCode: The data source for the observation.
recordID: The ID for the record
flags: control flags.
Return:
str: JSON document with results
"""
_dsrcCode = self.prepareStringArgument(dsrcCode)
_recordId = self.prepareStringArgument(recordId)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_getEntityByRecordID_V2.restype = c_int
self._lib_handle.G2_getEntityByRecordID_V2.argtypes = [c_char_p, c_char_p, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_getEntityByRecordID_V2(_dsrcCode,_recordId,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def getRecord(self,dsrcCode,recordId):
# type: (str,str) -> str
""" Get the specified record
Args:
dataSourceCode: The data source for the observation.
recordID: The ID for the record
Return:
str: JSON document with results
"""
_dsrcCode = self.prepareStringArgument(dsrcCode)
_recordId = self.prepareStringArgument(recordId)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_getRecord.argtypes = [c_char_p, c_char_p, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_getRecord(_dsrcCode,_recordId,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def getRecordV2(self,dsrcCode,recordId,flags):
# type: (str,str) -> str
""" Get the specified record
Args:
dataSourceCode: The data source for the observation.
recordID: The ID for the record
flags: control flags.
Return:
str: JSON document with results
"""
_dsrcCode = self.prepareStringArgument(dsrcCode)
_recordId = self.prepareStringArgument(recordId)
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_getRecord_V2.restype = c_int
self._lib_handle.G2_getRecord_V2.argtypes = [c_char_p, c_char_p, c_int, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_getRecord_V2(_dsrcCode,_recordId,flags,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def stats(self):
# type: () -> object
""" Retrieve the workload statistics for the current process.
Resets them after retrieved.
Args:
Return:
object: JSON document with statistics
"""
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_stats.argtypes = [POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_stats(pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
return responseBuf.value.decode('utf-8')
def exportConfig(self):
# type: () -> object
""" Retrieve the G2 engine configuration
Args:
Return:
object: JSON document with G2 engine configuration
"""
resize_return_buffer(None, 65535)
responseBuf = c_char_p(None)
responseSize = c_size_t(0)
self._lib_handle.G2_exportConfig.argtypes = [POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2_exportConfig(pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
elif ret_code < 0:
raise G2ModuleGenericException("ERROR_CODE: " + str(ret_code))
return responseBuf.value.decode('utf-8')
def getActiveConfigID(self):
# type: () -> object
""" Retrieve the active config ID for the G2 engine
Args:
Return:
object: The numeric active config ID
"""
configID = c_longlong(0)
self._lib_handle.G2_getActiveConfigID.argtypes = [POINTER(c_longlong)]
ret_code = self._lib_handle.G2_getActiveConfigID(configID)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
elif ret_code < 0:
raise G2ModuleGenericException("ERROR_CODE: " + str(ret_code))
return configID.value
def getRepositoryLastModifiedTime(self):
# type: () -> object
""" Retrieve the last modified time stamp of the entity store repository
Args:
Return:
object: The last modified time stamp, as a numeric integer
"""
lastModifiedTimeStamp = c_longlong(0)
self._lib_handle.G2_getRepositoryLastModifiedTime.argtypes = [POINTER(c_longlong)]
ret_code = self._lib_handle.G2_getRepositoryLastModifiedTime(lastModifiedTimeStamp)
if ret_code == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif ret_code == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
elif ret_code < 0:
raise G2ModuleGenericException("ERROR_CODE: " + str(ret_code))
return lastModifiedTimeStamp.value
def purgeRepository(self, reset_resolver_=True):
# type: (bool) -> None
""" Purges the G2 repository
Args:
reset_resolver: Re-initializes the engine. Should be left True.
Return:
None
"""
resize_return_buffer(None, 65535)
retval = self._lib_handle.G2_purgeRepository()
if retval == -2:
self._lib_handle.G2_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
elif retval == -1:
raise G2ModuleNotInitialized('G2Module has not been succesfully initialized')
if reset_resolver_ == True:
self.restart()
def restart(self):
""" Internal function """
self.destroy()
self.init()
def destroy(self):
""" Uninitializes the engine
This should be done once per process after init(...) is called.
After it is called the engine will no longer function.
Args:
Return:
None
"""
self._lib_handle.G2_destroy()
|
nilq/baby-python
|
python
|
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
# https://github.com/rensutheart/PyTorch-Deep-Learning-Tutorials/blob/master/part3_MNIST.py
def __init__(self, n_classes):
super(CNN, self).__init__()
# define all the components that will be used in the NN (these can be reused)
self.conv1 = nn.Conv2d(1, 10, kernel_size=5, padding=2) # 1 input feature, 10 output filters
self.conv2 = nn.Conv2d(10, 20, kernel_size=5, padding=2) # 10 input filters, 20 output filters
self.mp = nn.MaxPool2d(2)
self.drop2D = nn.Dropout2d(p=0.25)
self.fc1 = nn.Linear(500, 90)
self.fc2 = nn.Linear(90, n_classes)
def forward(self, x):
# define the acutal network
in_size = x.size(0) # this is the batch size
# you can chain function together to form the layers
x = F.relu(self.mp(self.conv1(x)))
x = F.relu(self.mp(self.conv2(x)))
# x = self.drop2D(x)
x = x.view(in_size, -1) # flatten data, -1 is inferred from the other dimensions (which is 320 here)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
|
nilq/baby-python
|
python
|
# Copyright 2014 - Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from barbicanclient import client as barbicanclient
from barbicanclient.common import auth as barbicanauth
from oslo.config import cfg
from solum.openstack.common import importutils
class BarbicanClient(object):
"""Barbican client wrapper so we can encapsulate logic in one place."""
def __init__(self, insecure=False):
self.insecure = insecure
self._admin_client = None
@property
def admin_client(self):
if not self._admin_client:
# Create connection to API
self._admin_client = self._barbican_admin_init()
return self._admin_client
def _barbican_admin_init(self):
# Import auth_token to have keystone_authtoken settings setup.
importutils.import_module('keystoneclient.middleware.auth_token')
keystone = barbicanauth.KeystoneAuthV2(
auth_url=cfg.CONF.keystone_authtoken.auth_uri,
username=cfg.CONF.keystone_authtoken.admin_user,
password=cfg.CONF.keystone_authtoken.admin_password,
tenant_name=cfg.CONF.keystone_authtoken.admin_tenant_name)
return barbicanclient.Client(auth_plugin=keystone,
insecure=self.insecure)
|
nilq/baby-python
|
python
|
from .R2 import R2
|
nilq/baby-python
|
python
|
import os
def handle_headers(frame, request, response):
# Send a 103 response.
resource_url = request.GET.first(b"resource-url").decode()
link_header_value = "<{}>; rel=preload; as=script".format(resource_url)
early_hints = [
(b":status", b"103"),
(b"link", link_header_value),
]
early_hints_policy = request.GET.first(b"early-hints-policy").decode()
# In this test handler "allowed" or "absent" are only valid policies because
# csp-document-disallow.html always sets CSP to disallow the preload.
# "disallowed" makes no observable changes in the test. Note that
# csp-basic.html covers disallowing preloads in Early Hints.
assert early_hints_policy == "allowed" or early_hints_policy == "absent"
if early_hints_policy == "allowed":
resource_origin = request.GET.first(b"resource-origin").decode()
csp_value = "script-src 'self' 'unsafe-inline' {}".format(resource_origin)
early_hints.append((b"content-security-policy", csp_value))
response.writer.write_raw_header_frame(headers=early_hints,
end_headers=True)
# Send the final response header.
response.status = 200
response.headers["content-type"] = "text/html"
response.write_status_headers()
def main(request, response):
current_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(current_dir, "csp-document-disallow.html")
with open(file_path, "r") as f:
test_content = f.read()
response.writer.write_data(item=test_content, last=True)
|
nilq/baby-python
|
python
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An optimization pass that combines adjacent single-qubit rotations."""
from typing import Iterable, List, Tuple, cast, Optional
import numpy as np
from cirq import ops, extension
from cirq.circuits.circuit import Circuit
from cirq.circuits.optimization_pass import (
PointOptimizationSummary,
PointOptimizer,
)
class MergeSingleQubitGates(PointOptimizer):
"""Combines adjacent constant single-qubit rotations into
SingleQubitMatrixGates.
"""
def __init__(self,
extensions: extension.Extensions = None) -> None:
super().__init__()
self.extensions = extensions or extension.Extensions()
def optimization_at(self,
circuit: Circuit,
index: int,
op: ops.Operation
) -> Optional[PointOptimizationSummary]:
if len(op.qubits) != 1:
return None
indices, gates = self._scan_single_qubit_ops(circuit, index,
op.qubits[0])
if not gates:
return None
# Replace the gates with a max-2-op XY + Z construction.
operations = self._merge_rotations(op.qubits[0], gates)
return PointOptimizationSummary(
clear_span=max(indices) + 1 - index,
clear_qubits=op.qubits,
new_operations=operations)
def _scan_single_qubit_ops(self,
circuit: Circuit,
index: Optional[int],
qubit: ops.QubitId
) -> Tuple[List[int], List[ops.KnownMatrix]]:
operations = [] # type: List[ops.KnownMatrix]
indices = [] # type: List[int]
while index is not None:
op = cast(ops.Operation, circuit.operation_at(qubit, index))
if len(op.qubits) != 1:
break
operation = self.extensions.try_cast(ops.KnownMatrix, op)
if operation is None:
break
indices.append(index)
operations.append(operation)
index = circuit.next_moment_operating_on([qubit], index + 1)
return indices, operations
def _merge_rotations(self,
qubit: ops.QubitId,
operations: Iterable[ops.KnownMatrix]
) -> List[ops.Operation]:
matrix = np.eye(2, dtype=np.complex128)
for op in operations:
matrix = np.dot(op.matrix(), matrix)
return [ops.SingleQubitMatrixGate(matrix)(qubit)]
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from .models import Product
class ProductSerializer(serializers.ModelSerializer):
# Get the image url by serializing `ImageField`
image = serializers.ImageField(max_length=None, allow_empty_file=False, allow_null=True, required=False)
class Meta:
# Model to be serialized
model = Product
# Fields to be serialized
fields = ("id", "name", "description", "price", "stock", "image", "category")
|
nilq/baby-python
|
python
|
import os
import platform
import time
import sys
import importlib
import glob
import subprocess
import selectors
import multiprocess
import paramiko
from comm.platform import linux_win, run_cmd_list, run_cmd
from compute import Config_ini
from compute.log import Log
def get_local_path():
"""
:return:Get the absolute path of the calling package
"""
_path = os.getcwd()
return _path
def get_transfer_local_path():
"""
:return:Get the absolute path of the package
"""
_path = os.path.dirname(os.path.dirname(__file__))
return _path
def get_algo_name():
"""
:return:Get the name of the currently running algorithm
"""
alg_name = Config_ini.alg_name
return alg_name
def get_gen_number():
"""
:return:Get the algebra of NAS iterations
"""
max_gen = Config_ini.max_gen
return int(max_gen)
def get_pop_siz():
"""
:return:Get population size
"""
pop_size = Config_ini.pop_size
return int(pop_size)
def get_exe_path():
exe_path = Config_ini.exe_path
return exe_path
def get_algo_local_dir():
"""
:return:Get the corresponding folder under the running algorithm runtime
"""
top_dir = get_local_path()
alg_name = Config_ini.alg_name
local_dir = os.path.join(top_dir, 'runtime', alg_name)
if not os.path.exists(os.path.dirname(local_dir)):
os.mkdir(os.path.dirname(local_dir))
return local_dir
def get_population_dir():
"""
:return:Get the populations folder in the corresponding folder under the running algorithm runtime and create it
"""
pop_dir = os.path.join(get_algo_local_dir(), 'populations')
if not os.path.exists(pop_dir):
os.makedirs(pop_dir)
return pop_dir
def get_top_dest_dir():
"""
:return:Get the path of the algorithm under the server root path
"""
alg_name = Config_ini.alg_name
tdd = os.path.join('~', alg_name)
return tdd
def get_train_ini_path():
"""
:return:Get the absolute path of train.ini
"""
return os.path.join(get_local_path(), 'train', 'train.ini')
def get_global_ini_path():
"""
:return:Get the absolute path of global.ini
"""
return os.path.join(get_local_path(), 'global.ini')
def exec_cmd_remote(_cmd, need_response=True):
p = subprocess.Popen(_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_str = None
stderr_str = None
if need_response:
sel = selectors.DefaultSelector()
sel.register(p.stdout, selectors.EVENT_READ)
sel.register(p.stderr, selectors.EVENT_READ)
stdout_ = None
stderr_ = None
for key, _ in sel.select():
data = key.fileobj.readlines()
if key.fileobj is p.stdout:
stdout_ = data
else:
stderr_ = data
if stdout_ is not None and len(stdout_) > 0:
stdout_str = ''.join([_.decode('utf-8') for _ in stdout_])
if stderr_ is not None and len(stderr_) > 0:
stderr_str = ''.join([_.decode('utf-8') for _ in stderr_])
return stdout_str, stderr_str
def detect_file_exit(ssh_name, ssh_pwd, ip, port, file_name):
transport = paramiko.Transport((ip, port))
transport.connect(username=ssh_name, password=ssh_pwd)
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.chdir('.')
try:
sftp.stat(file_name)
return True
except:
return False
def init_work_dir(ssh_name, ssh_password, ip, port):
Log.debug('Start to init the work directory in each worker')
alg_name = get_algo_name()
cmd_ = list()
if detect_file_exit(ssh_name, ssh_password, ip, port, alg_name):
system_ver = linux_win(ssh_name, ssh_password, ip, port)
time_str = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
if system_ver == 'linux':
_bak_cmd = 'mv \'%s\' \'%s_bak_%s\'' % (alg_name, alg_name, time_str)
cmd_.append(_bak_cmd)
elif system_ver == 'windows':
_bak_cmd = 'ren %s %s_bak_%s' % (alg_name, alg_name, time_str)
cmd_.append(_bak_cmd)
else:
Log.debug('Current system %s is not windows or linux!' % ip)
_mk_cmd = 'mkdir %s' % alg_name
cmd_.append(_mk_cmd)
for cmd in cmd_:
Log.info('Execute the cmd: %s' % cmd)
stderr_ = run_cmd_list(ssh_name, ssh_password, ip, port, cmd_)
if stderr_:
Log.debug('Stderr: %s' % stderr_)
def init_work_dir_on_all_workers():
Log.info('Init the work directories on each worker')
gpu_info = Config_ini.gpu_info
ls_dataset = ['MNIST', 'CIFAR10', 'CIFAR100']
for sec in gpu_info.keys():
worker_name = gpu_info[sec]['worker_name']
worker_ip = gpu_info[sec]['worker_ip']
ssh_name = gpu_info[sec]['ssh_name']
ssh_password = gpu_info[sec]['ssh_password']
port = gpu_info[sec]['port']
init_work_dir(ssh_name, ssh_password, worker_ip, port)
transfer_training_files(ssh_name, ssh_password, worker_ip, port)
if Config_ini.dataset not in ls_dataset:
transfer_dataset_image(ssh_name, ssh_password, worker_ip, port, Config_ini.data_dir)
def makedirs(sftp, dir_path):
Log.info('Execute the operation: mkdir %s' % dir_path)
try:
sftp.stat(dir_path)
except:
sftp.mkdir(dir_path)
def exec_python(ssh_name, ssh_pwd, ip, port, py_file, args, python_exec):
top_dir = get_top_dest_dir()
py_file = os.path.join(top_dir, py_file).replace('~', '.').replace('\\', '/')
# compute.log输出
Log.info('Execute the remote python file [(%s)%s]' % (ip, py_file))
_exec_cmd = '%s %s %s' % (python_exec, py_file,
' '.join([' '.join([k, v]) for k, v in
args.items()]))
Log.info('Execute the cmd: %s' % _exec_cmd)
p = multiprocess.Process(target=run_cmd, args=(ssh_name, ssh_pwd, ip, port, _exec_cmd))
p.start()
def transfer_file_relative(ssh_name, ssh_pwd, ip, port, source, dest):
"""Use relative path to transfer file, both source and dest are relative path
"""
top_dir = get_top_dest_dir()
full_path_dest = os.path.join(top_dir, dest).replace('~', '.')
full_path_dest = full_path_dest.replace('\\', '/')
full_path_source = os.path.join(get_local_path(), source).replace('\\', '/')
transport = paramiko.Transport((ip, port))
transport.connect(username=ssh_name, password=ssh_pwd)
sftp = paramiko.SFTPClient.from_transport(transport)
# full_path_source = full_path_source.replace(' ','\\\\ ')
makedirs(sftp, os.path.dirname(full_path_dest))
try:
Log.info('Execute the operation: put %s to %s' % (full_path_source, full_path_dest))
sftp.put(full_path_source, full_path_dest)
Log.info('Transfer file successfully...')
except Exception as e:
Log.info('Transfer file failed....')
Log.debug(e)
sftp.close()
def sftp_makedirs(sftp_sess, dir_path):
cwd_bak = sftp_sess.getcwd()
dir_split = [dir_path]
while os.path.dirname(dir_path) != '' and os.path.dirname(dir_path) != '/':
dir_split = [os.path.dirname(dir_path)] + dir_split
dir_path = dir_split[0]
for dir_ in dir_split:
try:
# exists
sftp_sess.stat(dir_)
except:
# absent
sftp_sess.mkdir(dir_)
sftp_sess.chdir(cwd_bak)
def sftp_transfer(sftp_sess, src_path, dst_path):
sftp_makedirs(sftp_sess, os.path.dirname(dst_path))
sftp_sess.put(src_path, dst_path)
def transfer_training_files(ssh_name, ssh_password, worker_ip, port):
training_file_dep = [(v, v) for _, v in get_training_file_dependences().items()]
transport = paramiko.Transport((worker_ip, port))
transport.connect(username=ssh_name, password=ssh_password)
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.chdir('.')
root_dir = sftp.getcwd()
sub_file = os.path.dirname(os.path.dirname(__file__))
sub_file = os.path.join(sub_file, 'runtime/README.MD').replace('\\', '/')
training_file_dep = training_file_dep + [(sub_file, 'runtime/README.MD')]
top_dir = get_top_dest_dir()
for src, dst in training_file_dep:
full_path_source = os.path.join(get_transfer_local_path(), src)
full_path_dest = os.path.join(top_dir, dst).replace('~', root_dir).replace('\\', '/')
if full_path_dest.endswith('training.py'):
full_path_dest = os.path.join(os.path.dirname(os.path.dirname(full_path_dest)), 'training.py').replace('\\',
'/')
Log.debug('Start to sftp: `%s` ==> `%s`' % (full_path_source, full_path_dest))
sftp_transfer(sftp, full_path_source, full_path_dest)
transport.close()
def transfer_dataset_image(ssh_name, ssh_password, worker_ip, port, source):
transport = paramiko.Transport((worker_ip, port))
transport.connect(username=ssh_name, password=ssh_password)
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.chdir('.')
root_dir = sftp.getcwd()
source = source.replace('\\', '/')
dset_name = source.split('/')[-1]
try:
sftp.stat(dset_name)
except:
sftp.mkdir(dset_name)
for root, subdir, files in os.walk(source):
for dir in subdir:
r_d = source.split("/")
local_subdir = os.path.join(root, dir).replace('\\', '/')
l_d = local_subdir.split("/")
r_m = l_d[len(r_d):]
r_m = "/".join(r_m)
remote_subdir = os.path.join(dset_name, r_m).replace('\\', '/')
try:
sftp.stat(remote_subdir)
except:
sftp.mkdir(remote_subdir)
for file in files:
local_dir_path = os.path.join(root, file).replace('\\', '/')
l_d_p = local_dir_path.split("/")
r_d_p = l_d_p[len(r_d):]
r_d_p = "/".join(r_d_p)
remote_dir_path = os.path.join(dset_name, r_d_p).replace('\\', '/')
Log.info('Start to sftp dataset: `%s` ==> `%s`' %
(local_dir_path,
os.path.join(root_dir, remote_dir_path).replace('\\', '/')))
try:
sftp.stat(remote_dir_path)
except:
sftp.put(local_dir_path, remote_dir_path)
transport.close()
def get_dependences_by_module_name(module_name):
import multiprocessing
with multiprocessing.Pool(1) as p:
res = p.map(__help_func, (module_name,))[0]
return res
def get_training_file_dependences():
f_list = list(filter(lambda x: not x.startswith(os.path.join(get_transfer_local_path(), 'runtime')) and
not x.startswith(os.path.join(get_transfer_local_path(), 'venv')) and
not x.startswith(os.path.join(get_transfer_local_path(), '__pycache__')),
glob.iglob(os.path.join(get_transfer_local_path(), '**/*.py'),
recursive=True))) + \
list(filter(lambda x: not x.startswith(os.path.join(get_transfer_local_path(), 'runtime')) and
not x.startswith(os.path.join(get_transfer_local_path(), '__pycache__')),
glob.iglob(os.path.join(get_transfer_local_path(), '**/*.ini'),
recursive=True)))
if platform.system() == 'Windows':
res = {
_.replace(get_transfer_local_path() + '\\', ''):
_.replace(get_transfer_local_path() + '\\', '')
for _ in f_list}
else:
res = {
_.replace(get_transfer_local_path() + '/', ''):
_.replace(get_transfer_local_path() + '/', '')
for _ in f_list}
return res
def get_all_edl_modules():
"""Get name and relative path of the modules in edl project
"""
res = {}
for k, v in sys.modules.items():
if hasattr(v, '__file__'):
if v is not None:
try:
if v.__file__ and 'site-packages' in getattr(v, '__file__'):
pass
else:
project_dir = get_local_path()
if v.__file__ and v.__file__.startswith(project_dir):
res[k] = v.__file__.replace(project_dir + '/', '')
except Exception:
import pdb
pdb.set_trace()
else:
pass
return res
def __help_func(module_name):
importlib.import_module('.', module_name)
res = get_all_edl_modules()
return res
if __name__ == '__main__':
print(get_training_file_dependences())
|
nilq/baby-python
|
python
|
# coding: utf-8
from django.core.management.base import BaseCommand, CommandError
from ...models import Account, Tweet
class Command(BaseCommand):
"""Generates the HTML version of all the Tweets.
Does this by re-saving every Tweet, one-by-one.
For one account:
./manage.py generate_tweet_html --account=philgyford
For all accounts:
./manage.py generate_tweet_html
"""
help = "Generates the HTML version of all the Tweets."
def add_arguments(self, parser):
parser.add_argument(
"--account",
action="store",
default=False,
help="Only generate for one Twitter account.",
)
def handle(self, *args, **options):
tweets = Tweet.objects.all()
# If a screen name is provided, only get the Tweets for that:
if options["account"]:
screen_name = options["account"]
try:
Account.objects.get(user__screen_name=screen_name)
except Account.DoesNotExist:
raise CommandError(
"There's no Account with a screen name of '%s'" % screen_name
)
tweets = tweets.filter(user__screen_name=screen_name)
for tweet in tweets:
tweet.save()
if options.get("verbosity", 1) > 0:
self.stdout.write("Generated HTML for %d Tweets" % tweets.count())
|
nilq/baby-python
|
python
|
import pytest
@pytest.fixture
def supply_AA_BB_CC():
aa=25
bb =35
cc=45
return [aa,bb,cc]
|
nilq/baby-python
|
python
|
#!/usr/pkg/bin/python2.7
from __future__ import print_function
# grep: serach for string patterns in files
import sys
import os
import argparse
import re
def _fg(file, pattern, ops):
with open(file, 'r') as f:
text = f.readlines()
z = len(text)
for i in range(z):
line = text[i]
result = pattern.match(line.strip())
if not result: result = (ops.pattern in line.strip())
if result:
if ops.A:
if i < ops.A_num:
j = i
else:
j = ops.A_num
print(''.join(text[i-j:i]), end='')
print(line, end='')
if ops.B:
if i+ops.B_num > z:
j = z-i
else:
j = ops.B_num
print(''.join(text[i+1:i+j+1]), end='')
def _grep(args):
pattern = re.compile(args.pattern if not args.i else args.pattern.lower())
for file in args.files:
_fg(file, pattern, args)
def main(argv):
# Initialize parser #
parser = argparse.ArgumentParser()
# Add options #
parser.add_argument('-A', dest='A_num', action='store', type=int,
help='Prints traliing lines for each match')
parser.add_argument('-B', dest='B_num', action='store', type=int,
help='Prints leading lines for each match')
parser.add_argument('-i', action='store_true',
help='Makes pattern case insensitive')
parser.add_argument('files', nargs=argparse.REMAINDER)
argv = parser.parse_args()
argv.A = False
argv.B = False
if argv.A_num:
argv.A = True
if argv.B_num:
argv.B = True
if len(argv.files) < 2:
parser.print_help()
return
argv.pattern = argv.files[0]
argv.files = argv.files[1:]
_grep(args=argv)
if __name__ == '__main__':
main(sys.argv)
|
nilq/baby-python
|
python
|
from collections import defaultdict
import dill
import numpy as np
import time
import torch.multiprocessing as mp
mp.set_sharing_strategy('file_system')
from starter_code.infrastructure.log import renderfn
from starter_code.sampler.hierarchy_utils import flatten_rewards, build_interval_tree, set_transformation_ids, get_subreturns_matrix, redistribute_rewards_recursive, visualize_episode_data, visualize_hrl_finish_episode
from starter_code.interfaces.interfaces import StepOutput, PolicyTransformParams
from starter_code.organism.domain_specific import preprocess_state_before_forward
def collect_train_samples_serial(epoch, max_steps, objects, pid=0, queue=None):
"""
Purpose: collect rollouts for max_steps steps
Return: stats_collector
"""
env = objects['env']
stats_collector = objects['stats_collector_builder']()
sampler = objects['sampler']
max_episode_length = objects['max_episode_length']
seed = int(1e6)*objects['seed'] + pid
env.seed(seed)
start = time.time()
num_steps = 0
while num_steps < max_steps:
max_steps_this_episode = min(max_steps - num_steps, max_episode_length)
episode_data = sampler.sample_episode(env=env, max_steps_this_episode=max_steps_this_episode)
stats_collector.append(episode_data)
num_steps += len(episode_data) # this is actually the number of high level timesteps
end = time.time()
objects['printer']('PID: {} Time to collect samples: {}'.format(pid, end-start))
if queue is not None:
queue.put([pid, stats_collector.data])
else:
return stats_collector
def collect_train_samples_parallel(epoch, max_steps, objects, num_workers=10):
"""
Purpose: collect rollouts for max_steps steps using num_workers workers
Return: stats_collector
"""
num_steps_per_worker = max_steps // num_workers
num_residual_steps = max_steps - num_steps_per_worker * num_workers
queue = mp.Manager().Queue()
workers = []
for i in range(num_workers):
worker_steps = num_steps_per_worker + num_residual_steps if i == 0 else num_steps_per_worker
worker_kwargs = dict(
epoch=epoch,
max_steps=worker_steps,
objects=objects,
pid=i+1,
queue=queue)
workers.append(mp.Process(target=collect_train_samples_serial, kwargs=worker_kwargs))
for j, worker in enumerate(workers):
worker.start()
start = time.time()
master_stats_collector = objects['stats_collector_builder']()
for j, worker in enumerate(workers):
worker_pid, worker_stats_data = queue.get()
master_stats_collector.extend(worker_stats_data)
end = time.time()
objects['printer']('Time to extend master_stats_collector: {}'.format(end-start))
for j, worker in enumerate(workers):
worker.join()
assert master_stats_collector.get_total_steps() == max_steps
return master_stats_collector
def step_agent(env, organism, state, step_info_builder, transform_params):
render = transform_params.render
if render: frame = renderfn(env=env, scale=1)
processed_state = preprocess_state_before_forward(state)
organism_output = organism.forward(processed_state, deterministic=transform_params.deterministic)
transform_params = transform_params if organism_output.action.is_subpolicy else None
transform_output = organism_output.action.transform(
state=state,
env=env,
transform_params=transform_params)
step_info = step_info_builder(
state=state,
organism_output=organism_output,
next_state=transform_output.next_state,
info=transform_output.transform_node
)
if render:
step_info.frame = frame
step_info.mask = 0 if transform_output.done else 1
step_output = StepOutput(
done=transform_output.done,
step_info=step_info,
option_length=transform_output.transform_node.get_length())
return transform_output.next_state, step_output
class Sampler():
def __init__(self, organism, step_info, deterministic):
self.organism = organism
self.deterministic = deterministic
self.step_info_builder = step_info
def begin_episode(self, env):
state = env.reset()
return state
def finish_episode(self, state, episode_data, env):
# 1. flatten reward
reward_chain = flatten_rewards(episode_data)
# 2. identify the index of the start and end of its chain
interval_tree = build_interval_tree(episode_data)
# 3. Set the index of the agents for t and t+1
set_transformation_ids(interval_tree)
# 4. get subreturns matrix
subreturns_matrix = get_subreturns_matrix(reward_chain, self.organism.args.gamma)
if self.organism.args.hrl_verbose:
visualize_hrl_finish_episode(episode_data, interval_tree, reward_chain, subreturns_matrix)
# 5. re-distribute rewards
redistribute_rewards_recursive(episode_data, subreturns_matrix)
return episode_data
def trim_step_infos(self, episode_data):
for step in episode_data:
if not step.hierarchy_info.leaf:
setattr(step.hierarchy_info, 'organism', step.hierarchy_info.organism.id_num)
self.trim_step_infos(step.hierarchy_info.path_data)
return episode_data
def get_bids_for_episode(self, episode_data):
episode_bids = defaultdict(lambda: [])
for step in episode_data:
probs = step['action_dist']
for index, prob in enumerate(probs):
episode_bids[index].append(prob)
return episode_bids
def step_through_episode(self, state, env, max_steps_this_episode, render):
episode_data = []
global_clock = 0
while global_clock < max_steps_this_episode:
max_steps_this_option = max_steps_this_episode - global_clock
state, step_output = step_agent(
env=env,
organism=self.organism,
state=state,
step_info_builder=self.step_info_builder,
transform_params=PolicyTransformParams(
max_steps_this_option=max_steps_this_option,
deterministic=self.deterministic,
render=render)
)
episode_data.append(step_output.step_info)
if step_output.done:
break
global_clock += step_output.option_length
step_output.step_info.next_frame = renderfn(env=env, scale=1) # render last frame
if not step_output.done:
assert global_clock == max_steps_this_episode
return state, episode_data
def sample_episode(self, env, max_steps_this_episode, render=False):
state = self.begin_episode(env)
state, episode_data = self.step_through_episode(
state, env, max_steps_this_episode, render)
episode_data = self.finish_episode(state, episode_data, env)
episode_data = self.trim_step_infos(episode_data)
if self.organism.args.hrl_verbose:
visualize_episode_data(episode_data)
return episode_data
|
nilq/baby-python
|
python
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import six
from collections import defaultdict
from paddle.fluid import core
from paddle.fluid import framework
from paddle import _C_ops
final_state_name_mapping = {
"matmul_v2": {
"final_op_name": "final_state_matmul",
"transpose_x": "trans_x",
"transpose_y": "trans_y",
"x": "X",
"y": "Y",
"out": "Out",
},
# "elementwise_add": {
# "final_op_name": "final_state_add",
# "x": "X",
# "y": "Y",
# },
"trunc": {
"final_op_name": "final_state_trunc",
"x": "X",
"out": "Out",
},
"pool2d": {
"final_op_name": "final_state_pool2d",
"x": "X",
"kernel_size": "ksize",
"out": "Out",
},
"abs": {
"final_op_name": "final_state_abs",
"x": "X",
"out": "Out",
},
"digamma": {
"final_op_name": "final_state_digamma",
"x": "X",
"out": "Out",
},
"diagonal": {
"final_op_name": "final_state_diagonal",
"x": "Input",
"offset": "offset",
"axis1": "axis1",
"axis2": "axis2",
"out": "Out",
},
"one_hot": {
"final_op_name": "final_state_one_hot",
"x": "X",
"num_class": "depth",
"out": "Out",
}
}
class Tracer(core.Tracer):
"""
:api_attr: imperative
Tracer is used to execute and record the operators executed, to construct the
computation graph in dygraph model. Tracer has two mode, :code:`train_mode`
and :code:`eval_mode`. In :code:`train_mode`, Tracer would add backward network
automatically and perform AutoGrad by method :code:`loss.backward()`.
In :code:`eval_mode`, Tracer would not add backward network.
This is a low level API, users don't need to use it directly.
"""
def __init__(self):
super(Tracer, self).__init__()
self._train_mode = True
def eager_trace_op(self,
type,
inputs,
outputs,
attrs,
stop_gradient=False,
inplace_map=None):
function_ptr = _C_ops.__dict__[type]
core_ops_args_info = _C_ops.get_core_ops_args_info()
core_ops_args_type_info = _C_ops.get_core_ops_args_type_info()
core_ops_returns_info = _C_ops.get_core_ops_returns_info()
op_args = core_ops_args_info[type]
op_args_type = core_ops_args_type_info[type]
op_returns = core_ops_returns_info[type]
arg_list = []
for i in range(len(op_args)):
arg_name = op_args[i]
arg_type = op_args_type[i]
if arg_name in inputs.keys():
arg_to_append = inputs[arg_name]
elif arg_name in outputs.keys():
arg_to_append = outputs[arg_name]
else:
if "Num" in arg_name:
# Remove "Num" suffix to get out_name
out_name = arg_name[:-3]
assert out_name in outputs.keys()
num_outs = len(outputs[out_name])
arg_to_append = num_outs
else:
arg_to_append = None
if arg_to_append is None:
arg_list.append(arg_to_append)
elif arg_type == "tensor":
if isinstance(arg_to_append, list):
arg_list.append(arg_to_append[0])
else:
arg_list.append(arg_to_append)
elif arg_type == "list":
assert isinstance(arg_to_append, list)
arg_list.append(arg_to_append)
else:
assert arg_type == "int"
assert isinstance(arg_to_append, int)
arg_list.append(arg_to_append)
attrs_list = []
for k, v in attrs.items():
attrs_list.append(k)
attrs_list.append(v)
returns = function_ptr(*arg_list, *attrs_list)
if isinstance(returns, tuple):
for i in range(len(op_returns)):
retname = op_returns[i]
if retname in outputs.keys():
# Replaced outputs by function returns
if isinstance(returns[i], list):
for j in range(len(returns[i])):
outputs[retname][j].reconstruct_from_(returns[i][j],
False)
else:
if isinstance(outputs[retname], list):
outputs[retname][0].reconstruct_from_(returns[i],
False)
else:
outputs[retname].reconstruct_from_(returns[i],
False)
elif isinstance(returns, list):
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
for j in range(len(returns)):
outputs[key][j].reconstruct_from_(returns[j], False)
else:
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
if isinstance(outputs[key], list):
outputs[key][0].reconstruct_from_(returns, False)
else:
outputs[key].reconstruct_from_(returns, False)
def eager_final_state_trace_op(self,
type,
inputs,
outputs,
attrs,
stop_gradient=False,
inplace_map=None):
assert type in final_state_name_mapping.keys()
final_state_type = final_state_name_mapping[type]["final_op_name"]
function_ptr = _C_ops.__dict__[final_state_type]
core_ops_args_info = _C_ops.get_final_state_core_ops_args_info()
core_ops_args_type_info = _C_ops.get_final_state_core_ops_args_type_info(
)
core_ops_returns_info = _C_ops.get_final_state_core_ops_returns_info()
op_args = core_ops_args_info[final_state_type]
op_args_type = core_ops_args_type_info[final_state_type]
op_returns = core_ops_returns_info[final_state_type]
arg_list = []
for i in range(len(op_args)):
eager_arg_name = op_args[i]
arg_type = op_args_type[i]
assert eager_arg_name in final_state_name_mapping[type].keys()
arg_name = final_state_name_mapping[type][eager_arg_name]
if arg_name in inputs.keys():
arg_to_append = inputs[arg_name]
elif arg_name in outputs.keys():
arg_to_append = outputs[arg_name]
elif arg_name in attrs.keys() and arg_type == "":
arg_to_append = attrs[arg_name]
else:
# dispensable
arg_to_append = None
if arg_type == "":
# attribute
arg_list.append(arg_to_append)
elif arg_type == "tensor":
if isinstance(arg_to_append, list):
arg_list.append(arg_to_append[0])
else:
arg_list.append(arg_to_append)
elif arg_type == "list":
assert isinstance(arg_to_append, list)
arg_list.append(arg_to_append)
else:
assert arg_to_append is None
arg_list.append(arg_to_append)
returns = function_ptr(*arg_list)
if isinstance(returns, tuple):
for i in range(len(op_returns)):
eager_retname = op_returns[i]
assert eager_retname in final_state_name_mapping[type].keys()
retname = final_state_name_mapping[type][eager_retname]
if retname in outputs.keys():
# Replaced outputs by function returns
if isinstance(returns[i], list):
for j in range(len(returns[i])):
outputs[retname][j].reconstruct_from_(returns[i][j],
False)
else:
outputs[retname][0].reconstruct_from_(returns[i], False)
elif isinstance(returns, list):
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
for j in range(len(returns)):
outputs[key][j].reconstruct_from_(returns[j], False)
else:
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
if isinstance(outputs[key], list):
outputs[key][0].reconstruct_from_(returns, False)
else:
outputs[key].reconstruct_from_(returns, False)
def trace_op(self,
type,
inputs,
outputs,
attrs,
stop_gradient=False,
inplace_map=None):
if not framework._in_legacy_dygraph():
# inputs : {"sum": [tensor], ...}
# outputs : {"sum": [tensor], ...}
if type in final_state_name_mapping.keys():
final_state_type = final_state_name_mapping[type][
"final_op_name"]
assert final_state_type in _C_ops.__dict__
self.eager_final_state_trace_op(type, inputs, outputs, attrs,
stop_gradient, inplace_map)
else:
self.eager_trace_op(type, inputs, outputs, attrs, stop_gradient,
inplace_map)
else:
self.trace(type, inputs, outputs, attrs,
framework._current_expected_place(), self._has_grad and
not stop_gradient, inplace_map if inplace_map else {})
def train_mode(self):
self._train_mode = True
def eval_mode(self):
self._train_mode = False
|
nilq/baby-python
|
python
|
#-*- coding:utf-8 -*-
# 2.2 变量
message = "Hello Python world!"
print(message)
message = "现在的时间是:2021年6月11日21:07:18"
print(message)
# 2.3 字符串
name = "ada love lace"
print(name.title())
print(name.upper())
print(name.lower())
first_name = "ada"
last_name = "love lace"
full_name = first_name + last_name
print(full_name)
print("Python:\nC:\nObject\n")
print(len(' python '))
print(len(' python '.rstrip()))
print(len(' python '.lstrip()))
print(len(' python '.strip()))
my_name = "my name's liuhanyu"
the_name = 'my "name" is liuhanyu'
# 2.4 数字
print(2 + 3)
print(2 - 3)
print(2 * 3)
print(2 / 3)
print(2 % 3)
# 乘方
print(2 ** 3)
# 次序
print(2 + 3 ** 2 / 2 -1)
print(0.1 + 0.2) # 小数位数不确定,不用担心
age = 23
print("Happy " + str(age) + 'rd Birthday!')
print(3 / 2)
print(3 / 2.0)
|
nilq/baby-python
|
python
|
from .model_108_basicDdSt import BasicDdSt
|
nilq/baby-python
|
python
|
import numpy as np
from py_wake.site._site import UniformWeibullSite
from py_wake.wind_turbines import OneTypeWindTurbines
wt_x = [134205, 134509, 134813, 135118, 135423]
wt_y = [538122, 538095, 538067, 538037, 538012]
power_curve = np.array([[3.0, 0.0],
[4.0, 15.0],
[5.0, 121.0],
[6.0, 251.0],
[7.0, 433.0],
[8.0, 667.0],
[9.0, 974.0],
[10.0, 1319.0],
[11.0, 1675.0],
[12.0, 2004.0],
[13.0, 2281.0],
[14.0, 2463.0],
[15.0, 2500.0],
[16.0, 2500.0],
[17.0, 2500.0],
[18.0, 2500.0],
[19.0, 2500.0],
[20.0, 2500.0],
[21.0, 2500.0],
[22.0, 2500.0],
[23.0, 2500.0],
[24.0, 2500.0],
[25.0, 2500.0]])
# Calculated ct curve using PHATAS (BEM code from ECN)
ct_curve = np.array([[3.0, 0.0],
[4.0, 0.85199],
[5.0, 0.85199],
[6.0, 0.80717],
[7.0, 0.78455],
[8.0, 0.76444],
[9.0, 0.72347],
[10.0, 0.66721],
[11.0, 0.62187],
[12.0, 0.57274],
[13.0, 0.50807],
[14.0, 0.42737],
[15.0, 0.33182],
[16.0, 0.26268],
[17.0, 0.21476],
[18.0, 0.18003],
[19.0, 0.15264],
[20.0, 0.13089],
[21.0, 0.11374],
[22.0, 0.09945],
[23.0, 0.08766],
[24.0, 0.07796],
[25.0, 0.06971]])
class N80(OneTypeWindTurbines):
def __init__(self):
OneTypeWindTurbines.__init__(self, 'N80', diameter=80.0, hub_height=80.0,
ct_func=self._ct, power_func=self._power, power_unit='kW')
def _ct(self, u):
return np.interp(u, ct_curve[:, 0], ct_curve[:, 1])
def _power(self, u):
return np.interp(u, power_curve[:, 0], power_curve[:, 1])
def main():
if __name__ == '__main__':
wt = N80()
print('Diameter', wt.diameter())
print('Hub height', wt.hub_height())
ws = np.arange(3, 25)
import matplotlib.pyplot as plt
plt.plot(ws, wt.power(ws), '.-')
plt.show()
main()
|
nilq/baby-python
|
python
|
import unittest
class Solution:
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
if not citations:
return 0
citations.sort(reverse=True)
h = 0
for i in citations:
if i > h:
h += 1
else:
break
return h
class Test(unittest.TestCase):
def test(self):
self._test([3, 0, 6, 1, 5], 3)
self._test([2, 1], 1)
def _test(self, citations, expected):
actual = Solution().hIndex(citations)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Console script for rps."""
import click
from .log import get_log
import asyncio
from .server import Site
from .api import start
def validate_url(ctx, param, value):
try:
return value
except ValueError:
raise click.BadParameter('url need to be format: tcp://ipv4:port')
@click.command()
@click.option('--unit_id', default=1,
envvar='UNIT_ID',
help='the NM’s Unit ID, ENV: UNIT_ID, default: 1')
@click.option('--device_type', default='plc_430',
envvar='DEVICE_TYPE',
help='NM_DeviceType, also ENV: DEVICE_TYPE')
@click.option('--port', default=80,
envvar='SVC_PORT',
help='Api port, default=80, ENV: SVC_PORT')
@click.option('--debug', is_flag=True)
def main(unit_id, device_type, port, debug):
click.echo("See more documentation at http://www.mingvale.com")
info = {
'unit_id': unit_id,
'device_type': device_type,
'api_port': port,
}
log = get_log(debug)
log.info('Basic Information: {}'.format(info))
loop = asyncio.get_event_loop()
loop.set_debug(0)
try:
site = Site(unit_id, device_type, loop)
site.start()
api_task = loop.create_task(start(port, site))
loop.run_forever()
except OSError as e:
log.error(e)
except KeyboardInterrupt:
if api_task:
api_task.cancel()
loop.run_until_complete(api_task)
finally:
loop.stop()
loop.close()
|
nilq/baby-python
|
python
|
from .controllers.product import ProductController
from .models import commerce
from .models import inventory
from django import forms
from django.db.models import Q
class ApplyCreditNoteForm(forms.Form):
required_css_class = 'label-required'
def __init__(self, user, *a, **k):
''' User: The user whose invoices should be made available as
choices. '''
self.user = user
super(ApplyCreditNoteForm, self).__init__(*a, **k)
self.fields["invoice"].choices = self._unpaid_invoices
def _unpaid_invoices(self):
invoices = commerce.Invoice.objects.filter(
status=commerce.Invoice.STATUS_UNPAID,
).select_related("user")
invoices_annotated = [invoice.__dict__ for invoice in invoices]
users = dict((inv.user.id, inv.user) for inv in invoices)
for invoice in invoices_annotated:
invoice.update({
"user_id": users[invoice["user_id"]].id,
"user_email": users[invoice["user_id"]].email,
})
key = lambda inv: (0 - (inv["user_id"] == self.user.id), inv["id"]) # noqa
invoices_annotated.sort(key=key)
template = (
'Invoice %(id)d - user: %(user_email)s (%(user_id)d) '
'- $%(value)d'
)
return [
(invoice["id"], template % invoice)
for invoice in invoices_annotated
]
invoice = forms.ChoiceField(
required=True,
)
verify = forms.BooleanField(
required=True,
help_text="Have you verified that this is the correct invoice?",
)
class CancellationFeeForm(forms.Form):
required_css_class = 'label-required'
percentage = forms.DecimalField(
required=True,
min_value=0,
max_value=100,
)
class ManualCreditNoteRefundForm(forms.ModelForm):
required_css_class = 'label-required'
class Meta:
model = commerce.ManualCreditNoteRefund
fields = ["reference"]
class ManualPaymentForm(forms.ModelForm):
required_css_class = 'label-required'
class Meta:
model = commerce.ManualPayment
fields = ["reference", "amount"]
# Products forms -- none of these have any fields: they are to be subclassed
# and the fields added as needs be. ProductsForm (the function) is responsible
# for the subclassing.
def ProductsForm(category, products):
''' Produces an appropriate _ProductsForm subclass for the given render
type. '''
# Each Category.RENDER_TYPE value has a subclass here.
cat = inventory.Category
RENDER_TYPES = {
cat.RENDER_TYPE_QUANTITY: _QuantityBoxProductsForm,
cat.RENDER_TYPE_RADIO: _RadioButtonProductsForm,
cat.RENDER_TYPE_ITEM_QUANTITY: _ItemQuantityProductsForm,
cat.RENDER_TYPE_CHECKBOX: _CheckboxProductsForm,
}
# Produce a subclass of _ProductsForm which we can alter the base_fields on
class ProductsForm(RENDER_TYPES[category.render_type]):
pass
products = list(products)
products.sort(key=lambda prod: prod.order)
ProductsForm.set_fields(category, products)
if category.render_type == inventory.Category.RENDER_TYPE_ITEM_QUANTITY:
ProductsForm = forms.formset_factory(
ProductsForm,
formset=_ItemQuantityProductsFormSet,
)
return ProductsForm
class _HasProductsFields(object):
PRODUCT_PREFIX = "product_"
''' Base class for product entry forms. '''
def __init__(self, *a, **k):
if "product_quantities" in k:
initial = self.initial_data(k["product_quantities"])
k["initial"] = initial
del k["product_quantities"]
super(_HasProductsFields, self).__init__(*a, **k)
@classmethod
def field_name(cls, product):
return cls.PRODUCT_PREFIX + ("%d" % product.id)
@classmethod
def set_fields(cls, category, products):
''' Sets the base_fields on this _ProductsForm to allow selecting
from the provided products. '''
pass
@classmethod
def initial_data(cls, product_quantites):
''' Prepares initial data for an instance of this form.
product_quantities is a sequence of (product,quantity) tuples '''
return {}
def product_quantities(self):
''' Yields a sequence of (product, quantity) tuples from the
cleaned form data. '''
return iter([])
def add_product_error(self, product, error):
''' Adds an error to the given product's field '''
''' if product in field_names:
field = field_names[product]
elif isinstance(product, inventory.Product):
return
else:
field = None '''
self.add_error(self.field_name(product), error)
class _ProductsForm(_HasProductsFields, forms.Form):
required_css_class = 'label-required'
pass
class _QuantityBoxProductsForm(_ProductsForm):
''' Products entry form that allows users to enter quantities
of desired products. '''
@classmethod
def set_fields(cls, category, products):
for product in products:
if product.description:
help_text = "$%d each -- %s" % (
product.price,
product.description,
)
else:
help_text = "$%d each" % product.price
field = forms.IntegerField(
label=product.name,
help_text=help_text,
min_value=0,
max_value=500, # Issue #19. We should figure out real limit.
)
cls.base_fields[cls.field_name(product)] = field
@classmethod
def initial_data(cls, product_quantities):
initial = {}
for product, quantity in product_quantities:
initial[cls.field_name(product)] = quantity
return initial
def product_quantities(self):
for name, value in self.cleaned_data.items():
if name.startswith(self.PRODUCT_PREFIX):
product_id = int(name[len(self.PRODUCT_PREFIX):])
yield (product_id, value)
class _RadioButtonProductsForm(_ProductsForm):
''' Products entry form that allows users to enter quantities
of desired products. '''
FIELD = "chosen_product"
@classmethod
def set_fields(cls, category, products):
choices = []
for product in products:
choice_text = "%s -- $%d" % (product.name, product.price)
choices.append((product.id, choice_text))
if not category.required:
choices.append((0, "No selection"))
cls.base_fields[cls.FIELD] = forms.TypedChoiceField(
label=category.name,
widget=forms.RadioSelect,
choices=choices,
empty_value=0,
coerce=int,
)
@classmethod
def initial_data(cls, product_quantities):
initial = {}
for product, quantity in product_quantities:
if quantity > 0:
initial[cls.FIELD] = product.id
break
return initial
def product_quantities(self):
ours = self.cleaned_data[self.FIELD]
choices = self.fields[self.FIELD].choices
for choice_value, choice_display in choices:
if choice_value == 0:
continue
yield (
choice_value,
1 if ours == choice_value else 0,
)
def add_product_error(self, product, error):
self.add_error(self.FIELD, error)
class _CheckboxProductsForm(_ProductsForm):
''' Products entry form that allows users to say yes or no
to desired products. Basically, it's a quantity form, but the quantity
is either zero or one.'''
@classmethod
def set_fields(cls, category, products):
for product in products:
field = forms.BooleanField(
label='%s -- %s' % (product.name, product.price),
required=False,
)
cls.base_fields[cls.field_name(product)] = field
@classmethod
def initial_data(cls, product_quantities):
initial = {}
for product, quantity in product_quantities:
initial[cls.field_name(product)] = bool(quantity)
return initial
def product_quantities(self):
for name, value in self.cleaned_data.items():
if name.startswith(self.PRODUCT_PREFIX):
product_id = int(name[len(self.PRODUCT_PREFIX):])
yield (product_id, int(value))
class _ItemQuantityProductsForm(_ProductsForm):
''' Products entry form that allows users to select a product type, and
enter a quantity of that product. This version _only_ allows a single
product type to be purchased. This form is usually used in concert with
the _ItemQuantityProductsFormSet to allow selection of multiple
products.'''
CHOICE_FIELD = "choice"
QUANTITY_FIELD = "quantity"
@classmethod
def set_fields(cls, category, products):
choices = []
if not category.required:
choices.append((0, "---"))
for product in products:
choice_text = "%s -- $%d each" % (product.name, product.price)
choices.append((product.id, choice_text))
cls.base_fields[cls.CHOICE_FIELD] = forms.TypedChoiceField(
label=category.name,
widget=forms.Select,
choices=choices,
initial=0,
empty_value=0,
coerce=int,
)
cls.base_fields[cls.QUANTITY_FIELD] = forms.IntegerField(
label="Quantity", # TODO: internationalise
min_value=0,
max_value=500, # Issue #19. We should figure out real limit.
)
@classmethod
def initial_data(cls, product_quantities):
initial = {}
for product, quantity in product_quantities:
if quantity > 0:
initial[cls.CHOICE_FIELD] = product.id
initial[cls.QUANTITY_FIELD] = quantity
break
return initial
def product_quantities(self):
our_choice = self.cleaned_data[self.CHOICE_FIELD]
our_quantity = self.cleaned_data[self.QUANTITY_FIELD]
choices = self.fields[self.CHOICE_FIELD].choices
for choice_value, choice_display in choices:
if choice_value == 0:
continue
yield (
choice_value,
our_quantity if our_choice == choice_value else 0,
)
def add_product_error(self, product, error):
if self.CHOICE_FIELD not in self.cleaned_data:
return
if product.id == self.cleaned_data[self.CHOICE_FIELD]:
self.add_error(self.CHOICE_FIELD, error)
self.add_error(self.QUANTITY_FIELD, error)
class _ItemQuantityProductsFormSet(_HasProductsFields, forms.BaseFormSet):
required_css_class = 'label-required'
@classmethod
def set_fields(cls, category, products):
raise ValueError("set_fields must be called on the underlying Form")
@classmethod
def initial_data(cls, product_quantities):
''' Prepares initial data for an instance of this form.
product_quantities is a sequence of (product,quantity) tuples '''
f = [
{
_ItemQuantityProductsForm.CHOICE_FIELD: product.id,
_ItemQuantityProductsForm.QUANTITY_FIELD: quantity,
}
for product, quantity in product_quantities
if quantity > 0
]
return f
def product_quantities(self):
''' Yields a sequence of (product, quantity) tuples from the
cleaned form data. '''
products = set()
# Track everything so that we can yield some zeroes
all_products = set()
for form in self:
if form.empty_permitted and not form.cleaned_data:
# This is the magical empty form at the end of the list.
continue
for product, quantity in form.product_quantities():
all_products.add(product)
if quantity == 0:
continue
if product in products:
form.add_error(
_ItemQuantityProductsForm.CHOICE_FIELD,
"You may only choose each product type once.",
)
form.add_error(
_ItemQuantityProductsForm.QUANTITY_FIELD,
"You may only choose each product type once.",
)
products.add(product)
yield product, quantity
for product in (all_products - products):
yield product, 0
def add_product_error(self, product, error):
for form in self.forms:
form.add_product_error(product, error)
@property
def errors(self):
_errors = super(_ItemQuantityProductsFormSet, self).errors
if False not in [not form.errors for form in self.forms]:
return []
else:
return _errors
class VoucherForm(forms.Form):
required_css_class = 'label-required'
voucher = forms.CharField(
label="Voucher code",
help_text="If you have a voucher code, enter it here",
required=False,
)
def staff_products_form_factory(user):
''' Creates a StaffProductsForm that restricts the available products to
those that are available to a user. '''
products = inventory.Product.objects.all()
products = ProductController.available_products(user, products=products)
product_ids = [product.id for product in products]
product_set = inventory.Product.objects.filter(id__in=product_ids)
class StaffProductsForm(forms.Form):
''' Form for allowing staff to add an item to a user's cart. '''
product = forms.ModelChoiceField(
widget=forms.Select,
queryset=product_set,
)
quantity = forms.IntegerField(
min_value=0,
)
return StaffProductsForm
def staff_products_formset_factory(user):
''' Creates a formset of StaffProductsForm for the given user. '''
form_type = staff_products_form_factory(user)
return forms.formset_factory(form_type)
class InvoicesWithProductAndStatusForm(forms.Form):
required_css_class = 'label-required'
invoice = forms.ModelMultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
queryset=commerce.Invoice.objects.all(),
)
def __init__(self, *a, **k):
category = k.pop('category', None) or []
product = k.pop('product', None) or []
status = int(k.pop('status', None) or 0)
category = [int(i) for i in category]
product = [int(i) for i in product]
super(InvoicesWithProductAndStatusForm, self).__init__(*a, **k)
qs = commerce.Invoice.objects.filter(
status=status or commerce.Invoice.STATUS_UNPAID,
).filter(
Q(lineitem__product__category__in=category) |
Q(lineitem__product__in=product)
)
# Uniqify
qs = commerce.Invoice.objects.filter(
id__in=qs,
)
qs = qs.select_related("user__attendee__attendeeprofilebase")
qs = qs.order_by("id")
self.fields['invoice'].queryset = qs
# self.fields['invoice'].initial = [i.id for i in qs] # UNDO THIS LATER
class InvoiceEmailForm(InvoicesWithProductAndStatusForm):
ACTION_PREVIEW = 1
ACTION_SEND = 2
ACTION_CHOICES = (
(ACTION_PREVIEW, "Preview"),
(ACTION_SEND, "Send emails"),
)
from_email = forms.CharField()
subject = forms.CharField()
body = forms.CharField(
widget=forms.Textarea,
)
action = forms.TypedChoiceField(
widget=forms.RadioSelect,
coerce=int,
choices=ACTION_CHOICES,
initial=ACTION_PREVIEW,
)
|
nilq/baby-python
|
python
|
from typing import Tuple
from hypothesis import given
from gon.base import (Compound,
Geometry)
from gon.hints import Scalar
from tests.utils import (equivalence,
robust_invert)
from . import strategies
@given(strategies.geometries_with_coordinates_pairs)
def test_basic(geometry_with_factors: Tuple[Geometry, Scalar, Scalar]
) -> None:
geometry, factor_x, factor_y = geometry_with_factors
result = geometry.scale(factor_x, factor_y)
assert isinstance(result, Geometry)
assert equivalence(isinstance(result, Compound),
isinstance(geometry, Compound))
@given(strategies.geometries_with_non_zero_coordinates_pairs)
def test_round_trip(geometry_with_non_zero_factors
: Tuple[Geometry, Scalar, Scalar]) -> None:
geometry, factor_x, factor_y = geometry_with_non_zero_factors
result = geometry.scale(factor_x, factor_y)
assert (result.scale(robust_invert(factor_x), robust_invert(factor_y))
== geometry)
@given(strategies.geometries)
def test_neutral_factor(geometry: Geometry) -> None:
result = geometry.scale(1)
assert result == geometry
@given(strategies.empty_compounds_with_coordinates_pairs)
def test_empty(geometry_with_factors: Tuple[Geometry, Scalar, Scalar]) -> None:
geometry, factor_x, factor_y = geometry_with_factors
result = geometry.scale(factor_x, factor_y)
assert result == geometry
|
nilq/baby-python
|
python
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from math import fabs
import compas
import numpy as np
import scipy as sp
import scipy.linalg
import scipy.sparse
import scipy.sparse.linalg
def compute_displacement_x(mesh, gate_points, x_size, z_size, coeff_diffusion=1):
"""calculate an x displacement for every non-gate non-boundary point,
given the pre-assigned displacements of the gate points. The left and
right edges of the wall are treated as fixed boundaries (Dirichlet) and
top and bottom as zero-gradient (Neumann)
"""
n = z_size
m = x_size
nm= n*m
rhs = np.zeros(nm) #equation right hand side
dia1 = np.ones(nm)*-4 #main diagonal of K matrix
dia2 = np.ones(nm-1) #second diagonal (up) corresponding to right neighbors
dia2[np.arange(0,nm-1,n)]+=1 #Neumann BC using ghost-points
dia2[np.arange(n-1,nm-1,n)]-=1 #Neumann BC using ghost-points
dia3 = np.ones(nm-1) #diagonal corresponding to left neighbors
dia3[np.arange(n-2,nm-1,n)]+=1 #Neumann
dia3[np.arange(n-1,nm-1,n)]-=1 #Neumann
dia4 = np.ones(nm-n) #diagonal corresponding to up neghbors
dia5 = np.ones(nm-n) #diagonal corresponding to bottom neighbors
#sparse coef. (stiffness) matrix built out of 5 diagonals
K = sp.sparse.diags([dia1, dia2, dia3, dia4, dia5], [0, 1, -1, n, -n], format='csc')
#BCdofs is the vector of degrees of freedom with dirichlet (displacement) boundary condition
#at first left and right sides are added to BCdofs
BCdofs=np.concatenate((np.arange(0,n),np.arange(nm-n,nm)))
#assigning zero displacement to left and right
BCvals=np.zeros(BCdofs.size)
#for applying the dirichlet, for dofs with displacement BC, the rhs is set to the according dispalcement
rhs[BCdofs]=BCvals
#loop over gate points as additional displacement BCs (non-zero, in contrast to left & right edges)
for vertex in (gate_points):
glob_id = mesh.vertex_attribute(vertex, "glob_id")
x_disp = mesh.vertex_attribute(vertex, "x_disp")
rhs[glob_id]=x_disp
BCdofs = np.append(BCdofs,glob_id)
#constructing an identitiy matrix (named Iinter) of size (nm x nm) with zero value on diagonals
# of displacement BC dofs. If applied on (multiplied by) K, only non-BC (internal) dofs will remain
diaInter=np.ones(nm)
diaInter[BCdofs]=0.0
Iinter = sp.sparse.diags([diaInter], [0], format='csc')
#constructing an identitiy matrix (named Ibc) of size (nm x nm) with zero value on diagonals
# of internal (non-BC) dofs. If applied on (multiplied by) K, only BC (internal) dofs will remain
diaBC=np.zeros(nm)
diaBC[BCdofs]=1.0
Ibc = sp.sparse.diags([diaBC], [0], format='csc')
#applying dirichlet on K, by zeroing out rows and columns of BC dofs and setting BC-diagonals to 1
K_BC= Iinter * K * Iinter + Ibc
#modifying the rhs for non-BC dofs to account for the eliminated dofs
# the operation below assignes -K_internal*x_BC to rhs_internal (and doesn't change rhs_BC)
rhs = rhs - Iinter * (K-(Ibc * K ))* rhs
#solving the system
sol = scipy.sparse.linalg.spsolve(K_BC,rhs)
return sol
def compute_displacement_z(mesh, gate_points, x_size, z_size, coeff_diffusion=1):
"""calculate an x displacement for every non-gate non-boundary point,
given the pre-assigned displacements of the gate points. The left and
right edges of the wall are treated as fixed boundaries (Dirichlet) and
top and bottom as zero-gradient (Neumann)
"""
n = z_size
m = x_size
nm= n*m
rhs = np.zeros(nm) #equation right hand side
dia1 = np.ones(nm)*-4 #main diagonal of K matrix
dia2 = np.ones(nm-1) #second diagonal (up) corresponding to right neighbors
dia3 = np.ones(nm-1) #diagonal corresponding to left neighbors
dia4 = np.ones(nm-n) #diagonal corresponding to up neghbors
dia4[:n]+=1
dia5 = np.ones(nm-n) #diagonal corresponding to bottom neighbors
dia5[-n:]+=1
#sparse coef. (stiffness) matrix built out of 5 diagonals
K = sp.sparse.diags([dia1, dia2, dia3, dia4, dia5], [0, 1, -1, n, -n], format='csc')
#BCdofs is the vector of degrees of freedom with dirichlet (displacement) boundary condition
BCdofs=np.concatenate((np.arange(0,nm,n),np.arange(n-1,nm,n)))
BCvals=np.zeros(BCdofs.size)
#for applying the dirichlet, for dofs with displacement BC, the rhs is set to the according dispalcement
rhs[BCdofs]=BCvals
#loop over gate points as additional displacement BCs (non-zero, in contrast to left & right edges)
for vertex in (gate_points):
glob_id = mesh.vertex_attribute(vertex, "glob_id")
x_disp = mesh.vertex_attribute(vertex, "z_disp")
rhs[glob_id]=x_disp
BCdofs = np.append(BCdofs,glob_id)
#constructing an identitiy matrix (named Iinter) of size (nm x nm) with zero value on diagonals
# of displacement BC dofs. If applied on (multiplied by) K, only non-BC (internal) dofs will remain
diaInter=np.ones(nm)
diaInter[BCdofs]=0.0
Iinter = sp.sparse.diags([diaInter], [0], format='csc')
#constructing an identitiy matrix (named Ibc) of size (nm x nm) with zero value on diagonals
# of internal (non-BC) dofs. If applied on (multiplied by) K, only BC (internal) dofs will remain
diaBC=np.zeros(nm)
diaBC[BCdofs]=1.0
Ibc = sp.sparse.diags([diaBC], [0], format='csc')
#applying dirichlet on K, by zeroing out rows and columns of BC dofs and setting BC-diagonals to 1
K_BC= Iinter * K * Iinter + Ibc
#modifying the rhs for non-BC dofs to account for the eliminated dofs
# the operation below assignes -K_internal*x_BC to rhs_internal (and doesn't change rhs_BC)
rhs = rhs - Iinter * (K-(Ibc * K ))* rhs
#solving the system
sol = scipy.sparse.linalg.spsolve(K_BC,rhs)
return sol
|
nilq/baby-python
|
python
|
"""
A class to hold polytopes in H-representation.
Francesc Font-Clos
Oct 2018
"""
import numpy as np
class Polytope(object):
"""A polytope in H-representation."""
def __init__(self, A=None, b=None):
"""
Create a polytope in H-representation.
The polytope is defined as the set of
points x in Rn such that
A x <= b
"""
# dimensionality verifications
assert A is not None and b is not None
assert len(b.shape) == 1
assert len(A.shape) == 2
assert A.shape[0] == len(b)
# store data
self.A = A
self.b = b
self.dim = A.shape[1]
self.nplanes = A.shape[0]
self._find_auxiliar_points_in_planes()
def check_inside(self, point):
"""Check if a point is inside the polytope."""
checks = self.A@point <= self.b
check = np.all(checks)
return check
def _find_auxiliar_points_in_planes(self):
"""Find an auxiliar point for each plane."""
aux_points = [self._find_auxiliar_point(self.A[i],
self.b[i])
for i in range(self.nplanes)]
self.auxiliar_points = aux_points
def _find_auxiliar_point(self, Ai, bi):
"""Find an auxiliar point for one plane."""
p = np.zeros(self.dim)
j = np.argmax(Ai != 0)
p[j] = bi / Ai[j]
return p
|
nilq/baby-python
|
python
|
import udfalcon
def test_outputs_return_results():
assert isinstance(udfalcon.fly({'output': 'return', 'mode': 'test'}), dict)
|
nilq/baby-python
|
python
|
import re
def main():
eventRegex = r'\s*\n*-{50,}\s*\n*'
placeRegex = r'(?i)(?:place|yer|location|mekan)\s*:\s+(.*?)\s*?[\n\r]'
dateRegex = r'(?i)(?:date|tarih|deadline)\s*:\s+(.*?)\s*?[\n\r]'
timeRegex = r'(?i)(?:time|zaman)\s*:\s+(.*?)\s*?[\n\r]'
testData = getTestData()
for i in range(len(testData)):
events = re.split(eventRegex, testData[i])
for j, event in enumerate(events):
eventPlace = re.findall(placeRegex, event)
eventDate = re.findall(dateRegex, event)
eventTime = re.findall(timeRegex, event)
file = open("results/" + str(i) + "/" + str(j) + ".txt","a")
file.write(event)
# print("The event place is: {}, \nThe event date is: {}, \nThe event time is: {}, \nThe event is:\n {}".format(eventPlace, eventDate, eventTime, event))
file.close()
def getTestData():
"""looping through the test date and returning the data as a String array"""
testData = []
for i in range(12):
file = open("testData/test" + str(i) + ".txt", "r")
text = file.readlines()
file.close()
testData.append("".join(text))
return testData
if __name__== "__main__":
main()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.