content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from typing import Any, AsyncGenerator
from dependency_injector.wiring import Provide, inject
from graphql import GraphQLResolveInfo
from containers import SDContainer
from models import OnPathway
from .subscription_type import subscription
@subscription.source("onPathwayUpdated")
@inject
async def on_pathway_updated_generator(
_: Any = None,
info: GraphQLResolveInfo = None,
pathwayId: int = None,
includeDischarged: bool = False,
pub=Provide[SDContainer.pubsub_service]
) -> AsyncGenerator:
topic = pub.subscribe("on-pathway-updated")
async with topic as subscriber:
async for on_pathway in subscriber:
if int(on_pathway.pathway_id) == int(pathwayId):
if (
(not includeDischarged and
not on_pathway.is_discharged)
or includeDischarged
):
yield on_pathway
@subscription.field("onPathwayUpdated")
async def on_pathway_updated_field(
obj: OnPathway = None,
info: GraphQLResolveInfo = None,
pathwayId: int = None,
includeDischarged: bool = None,
):
return obj
|
nilq/baby-python
|
python
|
import sys
import tableauserverclient as TSC
rob = TSC.RequestOptions.Builder()
ro = rob.file("==", "MyFile").project("==", "Default")._sort("foo", "desc")._pagesize(50)._build()
print ro.filters
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Detect tissue regions in a whole slide image.
#############################################################################
# Copyright Vlad Popovici <popovici@bioxlab.org>
#
# Licensed under the MIT License. See LICENSE file in root folder.
#############################################################################
from datetime import datetime
import hashlib
_time = datetime.now()
__author__ = "Vlad Popovici <popovici@bioxlab.org>"
__version__ = "1.0"
__description__ = {
'name': 'detect_tissue',
'unique_id' : hashlib.md5(str.encode('detect_tissue' + __version__)).hexdigest(),
'version': __version__,
'timestamp': _time.isoformat(),
'input': [None],
'output': [None],
'params': dict()
}
from tinydb import TinyDB, Query
import simplejson as json
import geojson as gjson
import configargparse as opt
import numpy as np
from pathlib import Path
from shapely.affinity import translate
from qpath.base import WSIInfo, MRI
from qpath.annot import Annotation
from qpath.mask import mask_to_external_contours
from qpath.tissue import detect_foreground
from qpath.utils import NumpyJSONEncoder
# minimum object sizes (areas, in px^2) for different magnifications to be considered as "interesting"
min_obj_size = {'0.3125': 1500, '1.25': 50000, '2.5': 100000, '5.0': 500000}
WORK_MAG_1 = 0.3125
WORK_MAG_2 = 2.5
def main():
p = opt.ArgumentParser(description="Detect tissue regions in a whole slide image.")
p.add_argument("--mri_path", action="store", help="root folder for the multiresolution image (ZARR format)",
required=True)
p.add_argument("--out", action="store",
help="JSON file for storing the resulting annotation (will be saved to ../annot/ relative to ZARR path)",
required=True)
p.add_argument("--annotation_name", action="store", help="name of the resulting annotation",
default="tissue", required=False)
p.add_argument("--min_area", action="store", type=int, default=None,
help="minimum area of a tissue region", required=False)
p.add_argument("--he", action="store_true", help="use H&E-specific method for detecting the objects")
p.add_argument("--track_processing", action="store_true",
help="should this action be stored in the <-RUN-detect_tissue.json> file for the slide?")
args = p.parse_args()
if args.min_area is None:
args.min_area = min_obj_size[str(WORK_MAG_2)]
else:
min_obj_size[str(WORK_MAG_2)] = args.min_area
in_path = Path(args.mri_path).expanduser().absolute()
out_path = (in_path.parent.parent / 'annot').expanduser().absolute()
__description__['params'] = vars(args)
__description__['input'] = [str(in_path)]
__description__['output'] = [str(out_path / args.out)]
if args.track_processing:
(out_path.parent / '.run').mkdir(exist_ok=True)
with open(out_path.parent / '.run' / 'run-detect_tissue.json', 'w') as f:
json.dump(__description__, f, indent=2)
# print(__description__)
wsi = WSIInfo(in_path)
img_src = MRI(in_path)
# use a two pass strategy: first detect a bounding box, then zoom-in and
# detect the final mask
level = wsi.get_level_for_magnification(WORK_MAG_1)
img = img_src.get_plane(level=level)
mask, _ = detect_foreground(img, method='fesi', min_area=min_obj_size[str(WORK_MAG_1)])
contours = mask_to_external_contours(mask, approx_factor=0.0001)
# find the bounding box of the contours:
xmin, ymin = img.shape[:2]
xmax, ymax = 0, 0
for c in contours:
minx, miny, maxx, maxy = c.geom.bounds
xmin = min(xmin, minx)
ymin = min(ymin, miny)
xmax = max(xmax, maxx)
ymax = max(ymax, maxy)
# some free space around the ROI and rescale to new magnification level:
f = WORK_MAG_2 / WORK_MAG_1
xmin = int(f * max(0, xmin - 5))
ymin = int(f * max(0, ymin - 5))
xmax = int(f * min(img.shape[1] - 1, xmax + 5))
ymax = int(f * min(img.shape[0] - 1, ymax + 5))
# print("ROI @{}x: {},{} -> {},{}".format(WORK_MAG_2, xmin, ymin, xmax, ymax))
level = wsi.get_level_for_magnification(WORK_MAG_2)
img = img_src.get_region_px(xmin, ymin,
width=xmax - xmin, height=ymax - ymin,
level=level, as_type=np.uint8)
# print("Image size 2: {}x{}".format(img.shape[0], img.shape[1]))
if args.he:
mask, _ = detect_foreground(img, method='simple-he', min_area=min_obj_size[str(WORK_MAG_2)])
else:
mask, _ = detect_foreground(img, method='fesi',
laplace_ker=15, gauss_ker=17, gauss_sigma=25.0,
morph_open_ker=5, morph_open_iter=7, morph_blur=17,
min_area=min_obj_size[str(WORK_MAG_2)])
contours = mask_to_external_contours(mask,
approx_factor=0.00005,
min_area=min_obj_size[str(WORK_MAG_2)])
# don't forget to shift detections by (xmin, ymin) to obtain coords in original space for
# this magnification level...
for c in contours:
c.geom = translate(c.geom, xoff=xmin, yoff=ymin)
c._name = "tissue"
# ...and get image extent at working magnification
img_shape = img_src.extent(level)
annot = Annotation(name=args.annotation_name,
image_shape={'height': int(img_shape[1]), 'width': int(img_shape[0])},
magnification=WORK_MAG_2)
annot.add_annotations(contours)
# get back to native magnification...
annot.set_magnification(wsi.get_native_magnification())
# ...and correct the image extent (due to rounding it may be off by a few pixels), since
# we actually know it:
img_shape = img_src.extent(0)
annot._image_shape = dict(width=img_shape[0], height=img_shape[1])
with open(out_path / args.out , 'w') as f:
gjson.dump(annot.asGeoJSON(), f, cls=NumpyJSONEncoder)
annot_idx = out_path.parent / '.annot_idx.json'
with TinyDB(annot_idx) as db:
q = Query()
r = db.search(q.unique_id == __description__['unique_id'])
if len(r) == 0:
# empty DB or no such record
db.insert({'unique_id' : __description__['unique_id'],
'annotator': __description__['name'], 'parameters': __description__['params']})
else:
db.update({'annotator': __description__['name'], 'parameters': __description__['params']},
q.unique_id == __description__['unique_id'])
return
##
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
numbers = []
posbetter = []
poslower = []
for c in range(0, 5):
numbers.append(float(input('Type a number: ')))
for pos, value in enumerate(numbers):
if value == max(numbers):
posbetter.append(pos)
if value == min(numbers):
poslower.append(pos)
print(f'The better number typed was {max(numbers)} in the position {posbetter}')
print(f'The lower number typed was {min(numbers)} in the position {poslower}')
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import statsmodels.formula.api as smf
import statsmodels.api as sm
import pickle
#df = pd.read_pickle('sav.txt')
X = pd.read_pickle('Xpart.csv')
Y = pd.read_pickle('Ypart.csv')
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
## Prediction
model = LinearRegression()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
## RMSE
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print('RMSE: ', rmse)
accu = round((model.score(x_test, y_test)*100), 2)
print('Accuracy: ', accu, '%')# *100 = percenatge of model accurate
## Calculate the new OLS
x_train = sm.add_constant(x_train)
fit_new = sm.OLS(y_train, x_train).fit()
ols_print = fit_new.summary()
print('new OLS: ', ols_print)
## save
with open('summary', 'wb') as f:
pickle.dump([rmse, fit_new],f)
|
nilq/baby-python
|
python
|
from numbers import Number
import numpy as np
import sympy
from collections.abc import Mapping
from sympy import Symbol
from sympy.core.relational import Relational
from toy.unit import DIMENSIONLESS, parse_unit_msg
from toy.utils import as_dict, is_numeric
from toy.core.value import Value
base_model = None
class ModelMeta(type):
"""
Metaclass for all Model subclasses.
"""
@classmethod
def __prepare__(mcs, name, bases):
if base_model is None:
return {}
return Environment()
def __new__(mcs, name, bases, ns):
global base_model
if base_model is None:
base_model = type.__new__(mcs, name, bases, ns)
return base_model
else:
return type.__new__(mcs, name, bases, ns.finalize())
class Environment(Mapping):
"""
A mapping that interprets a Model class creation.
"""
def __init__(self, ns=None, values=None, equations=None, invariants=None):
self.namespace = as_dict(ns)
self.values = as_dict(values)
self.equations = as_dict(equations)
self.invariants = as_dict(invariants)
self.symbols = {'t'}
self.namespace['t'] = Symbol('t')
self.namespace['values'] = self.values
self.namespace['equations'] = self.equations
self.namespace['invariants'] = self.invariants
self.lower = {}
self.upper = {}
def __iter__(self):
yield from self.namespace
def __getitem__(self, key):
return self.namespace[key]
def __len__(self):
return len(self.namespace)
def __setitem__(self, k, v):
if k.startswith('_'):
self.namespace[k] = v
elif k.startswith('D_'):
self.declare_derivative(k[2:], v)
elif k == 'bounds':
self.declare_bounds(v)
else:
self.declare_value(k, v)
def _add_symbol(self, name):
self.symbols.add(name)
self.namespace[name] = Symbol(name, real=True)
def declare_derivative(self, name, spec):
"""
Derivative declarations automatically promotes value to a dynamic
variable. Spec can be a constant, a function, or an expression.
"""
if name not in self.values:
raise TypeError(f'derivative of unknown variable: D_{name}')
self.equations[name] = spec
self._add_symbol('D_' + name)
def declare_value(self, name, value):
"""
Value declarations can have many different forms
``name = value``:
...
``name = value, '[unit] description``:
...
"""
unit = DIMENSIONLESS
msg = ''
if isinstance(value, tuple):
value, spec = value
unit, msg = parse_unit_msg(spec)
if not isinstance(value, Value):
value = Value(name, value, unit=unit, description=msg)
self.values[name] = value
self._add_symbol(name)
def declare_bounds(self, bounds):
"""
Declare variable bounds.
"""
# Lists are spliced
if isinstance(bounds, (list, tuple, set)):
for boundary in bounds:
self.declare_bounds(boundary)
return
# Bound is expressed as an inequality such as x > y
if isinstance(bounds, Relational):
self._declare_relational_bound(bounds)
else:
raise NotImplementedError(bounds)
def _declare_relational_bound(self, bound):
lhs, rhs = bound.lhs, bound.rhs
# Auxiliary functions
gt = lambda x: isinstance(x, (sympy.StrictGreaterThan, sympy.GreaterThan))
lt = lambda x: isinstance(x, (sympy.StrictLessThan, sympy.LessThan))
# Normalize "number {op} expr" or "other {op} symbol"
if is_numeric(lhs) or isinstance(rhs, Symbol):
rhs, lhs = lhs, rhs
# symbol {op} number
if isinstance(lhs, Symbol) and is_numeric(rhs):
if gt(bound):
self.lower[str(lhs)] = rhs
elif lt(bound):
self.upper[str(lhs)] = rhs
else:
raise TypeError(f'invalid bound expression: {bound}')
# Other bound
else:
raise NotImplementedError(bound)
def finalize(self):
"""
Return a namespace dictionary used to create the Model subclass.
"""
symbols = self.symbols
return {k: v for k, v in self.namespace.items() if k not in symbols}
|
nilq/baby-python
|
python
|
import cv2
import time
import csv
import os
import picamera
import picamera.array
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(36, GPIO.OUT)
GPIO.setup(33, GPIO.OUT)
GPIO.setup(10, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
def gpio_fun():
val = ""
if GPIO.input(36) == 1:
## print("Left")
val+="1"
if GPIO.input(33) == 1:
## print("Right")
val += "2"
if GPIO.input(10) == 1:
## print("Back")
val += "3"
if GPIO.input(11) == 1:
## print("Forward")
val += "4"
if 1 != (GPIO.input(36) or GPIO.input(33) or GPIO.input(10) or GPIO.input(11)):
## print("Open")
val += "0"
print(val)
return val
img_dir = './ training_data'
filename = 'training_data.csv'
if not os.path.exists(img_dir):
os.makedirs(img_dir)
training_start_time = time.time()
start_time= int(time.time())
try:
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (320, 240)
while True:
camera.capture(stream, 'bgr', use_video_port=True)
cv2.imshow("video_frames", stream.array)
stop_time = int(time.time())
print(stop_time - start_time)
if int(stop_time - start_time) > 0:
start_time = stop_time
print("TIME PER SEC ",int(time.time()))
print("part2",stop_time - start_time)
# Writting the frames
cv2.imwrite(img_dir+'/ img-{}.jpg'.format(int(time.time())), stream.array)
print("image write done")
# Writting the csv
row = []
row.append('img-{}'.format(int(time.time())))
row.append(gpio_fun())
with open(filename, 'a', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
csvFile.close()
# reset the stream before the next capture
stream.seek(0)
stream.truncate()
# It Means Press ESC Key to Exit the Loop
k = cv2.waitKey(30) & 0xff
if k ==27:
break
except Exception as e:
print("type err ", e)
cv2.destroyAllWindows()
print("TOTAL TRAINING TIME CAPTURED ",time.time()-training_start_time)
GPIO.cleanup()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Module with utility functions to call the GPT translation API"""
import json
import requests
# ==============================================================================
# CONSTANT DEFINITION
# ==============================================================================
API_EXCEPTIONS = (requests.HTTPError,)
# ==============================================================================
# CLASS AND FUNCTION DEFINITION
# ==============================================================================
class GPTClient:
def __init__(self, api_key) -> None:
self.api_key = api_key
self.url = "https://gpt-text-generation.p.rapidapi.com/complete"
self.host = "gpt-text-generation.p.rapidapi.com"
def format_prompt(self, text, task, input_desc, output_desc, example_in=None, example_out=None):
"""
Returns prompt of form:
Correct grammar mistakes.
Original: Where do you went?
Standard American English: Where did you go?
Original: Where is you?
Standard American English:
Args:
task: The task for GPT, e.g. Correct grammar mistakes.
input_desc: Description of input column
output_desc: Description of output column
example_in: Example of an input text
example_out: Example of desired output text
Returns:
prompt: Formatted prompt
"""
### Preprocess
text = text.replace("\n", "")
### Put all together
task_prompt = f"{task}\n\n"
if example_in:
example_prompt = f"{input_desc}: {example_in}\n{output_desc}: {example_out}\n\n"
else:
example_prompt = ""
final_prompt = f"{input_desc}: {text}\n{output_desc}:"
full_prompt = task_prompt + example_prompt + final_prompt
return full_prompt
def generate(
self,
text,
task,
input_desc,
output_desc,
example_in=None,
example_out=None,
temperature=0.8,
):
"""
Generates Text.
"""
prompt = self.format_prompt(text, task, input_desc, output_desc, example_in, example_out)
print("Sending prompt:")
print(prompt)
response = requests.post(
url=self.url,
data=json.dumps({"prompt": prompt, "temperature": temperature}),
headers={
"content-type": "application/json",
"x-rapidapi-key": self.api_key,
"x-rapidapi-host": self.host,
},
)
if "generation" in response.text:
# Returns text from the response object which is a json string, so no need to dump it into json anymore
return response.text
else:
# Extract & send error related information
user_message = (
"Encountered the following error while sending an API request:"
+ f" Error Code: {response.status_code}"
+ f" Error message: {response.text}"
)
raise requests.HTTPError(user_message)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 14:33:34 2019
"""
import os
def walklevel(some_dir, level=1):
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
# -*- coding: utf-8 -*-
from os import listdir
from numpy import sqrt
from numpy import log
from numpy import sum
from numpy import dot
from numpy import loadtxt
from numpy import zeros
from numpy import eye
from numpy import maximum
from pandas import DataFrame
from pandas import concat
MY_EPSILON = 1e-20
def buildLabeledDatabaseFromDirectory(directory_location):
"""
@brief : build a pandas database to perform learning on subtrees.
@param directory_location : path to folder containing all patients to load in the database
For each patient, load all descriptors with legends and all probabilistic labels with legend.
The directory_location folder is expected to contain one folder per patient.
Each patient folder is expected to contain one folder per acquisition (case).
Each case is expected to contain the following:
- subtree_descriptors.txt : containing descriptors for every subtree
- subtree_label_probabilities.txt.txt : containing all labels probability for every subtree
- computed_descriptors_meaning.txt : containing all descriptors meaning (legend)
- label_names.txt : containing all labels meaning (legend)
"""
differentPatients = listdir(directory_location)
PandasDB = DataFrame()
all_features = set({})
all_labels = set({})
for patDir in differentPatients:
subtreeDir = listdir(directory_location + "/" + patDir)
for caseDir in subtreeDir:
currLoc2read = directory_location + "/" + patDir + "/" + caseDir
curr_case_X = loadtxt(currLoc2read + "/subtree_descriptors.txt")
curr_case_Y = loadtxt(currLoc2read + "/subtree_label_probabilities.txt")
with open(currLoc2read + "/computed_descriptors_meaning.txt", "r") as descriptorsLegendFile:
currDescriptorsLegend = descriptorsLegendFile.read().split()
for leg in currDescriptorsLegend:
all_features.add(leg)
curr_dataframe_X = DataFrame(curr_case_X, columns=currDescriptorsLegend)
curr_dataframe_X["patient"] = patDir
curr_dataframe_X["case"] = caseDir
(curr_case_label_indexes, curr_case_label_names) = loadLabelLegend(currLoc2read + "/label_names.txt")
# add read labels to the set of all labels
for lab in curr_case_label_names:
all_labels.add(lab)
curr_dataframe_Y = DataFrame(curr_case_Y, columns=curr_case_label_names)
curr_dataframe = concat([curr_dataframe_X,curr_dataframe_Y], axis=1)
PandasDB = PandasDB.append(curr_dataframe)
all_features = list(all_features)
all_labels = list(all_labels)
return (PandasDB, all_features, all_labels)
def loadLabelLegend(label_legend_location):
res_label_legend = []
res_label_index = []
with open(label_legend_location, "r") as labelLegendFile:
for line in labelLegendFile.read().splitlines():
splitRes = line.split(" : ")
res_label_index.append(int(splitRes[0]))
res_label_legend.append(splitRes[1].replace(" ", "_"))
return (res_label_index, res_label_legend)
def loadSubtreeDescriptors(subtree_descriptors_location, subtree_descriptors_meaning_location):
descriptors = loadtxt(subtree_descriptors_location)
with open(subtree_descriptors_meaning_location, "r") as descriptorsLegendFile:
currDescriptorsLegend = descriptorsLegendFile.read().split()
descriptorsDataframe = DataFrame(descriptors, columns=currDescriptorsLegend)
return descriptorsDataframe
def computePatientLengthConfusionMatrix(patient_prediction_location, patient_ground_truth_location, labels_names_file):
"""
@brief: Compute the patient length confusion matrix given the location of its prediction and ground truth.
@param patient_prediction_location : folder containing the prediction data
@param patient_ground_truth_location : folder containing the ground truth data
@param labels_names_file : file containing the name of the labels (stored as integer)
We define the length confusion matrix as the confusion matrix were branches contribute with respect to their length.
Length is computed based on the branches stored in patient_ground_truth_location.
The matrix is defined with the following convention:
- each line correspond to a given prediction class
- each column correspond to a given ground truth class
Both folders are assumed to have a particular hierarchy:
- The folder patient_ground_truth_location:
* all branches named "branch????.txt"
* a "branch_labels.txt" file
-The folder patient_prediction_location:
* all branches named "branch????.txt"
* a file "recomputed_labels.txt"
N.B. It is assumed that the number of branches in both folder are identical and that the files storing labels have the same number lines.
"""
# Loading:
ground_truth_couple_branchID_labelNb = loadtxt(patient_ground_truth_location + "/branch_labels.txt")
prediction_couple_branchID_labelNb = loadtxt(patient_prediction_location + "/recomputed_labels.txt")
(label_index, label_legend) = loadLabelLegend(labels_names_file)
# Assert that all sizes are correct
assert (len(prediction_couple_branchID_labelNb) == len(ground_truth_couple_branchID_labelNb))
# Compute length for all branches
branch_length = []
for branch_index in prediction_couple_branchID_labelNb[:,0]:
curr_branch = loadtxt(patient_ground_truth_location + "/branch" + format(int(branch_index), '04d') + ".txt")
if len(curr_branch) == 0:
# print("Ignoring empty branch in computePatientLengthConfusionMatrix")
curr_length = 0.0
else:
curr_XYZ = curr_branch[:,0:-1] # Ignore the radius when computing the length
diff_XYZ = curr_XYZ[1:] - curr_XYZ[0:-1]
elementary_distances = sqrt(sum(diff_XYZ * diff_XYZ, axis=1))
curr_length = sum(elementary_distances)
branch_length.append(curr_length)
# Add the unknown label if not present:
if label_index.count(-1) == 0:
label_index.append(-1)
label_legend.append("Unknown")
# compute confusion matrix
nb_labels = len(label_legend)
nb_branches = ground_truth_couple_branchID_labelNb.shape[0]
resulting_confusion_matrix = zeros((nb_labels, nb_labels))
for branchID in range(nb_branches):
int_label_GT = int(ground_truth_couple_branchID_labelNb[branchID,1])
int_label_pred = int(prediction_couple_branchID_labelNb[branchID,1])
index_GT = label_index.index(int_label_GT)
index_pred = label_index.index(int_label_pred)
resulting_confusion_matrix[index_pred, index_GT] += branch_length[branchID]
# return the confusion matrix with legend
return (resulting_confusion_matrix, label_legend)
def computePatientConfusionMatrix(patient_prediction_location, patient_ground_truth_location, labels_names_file):
"""
@brief: Compute the patient confusion matrix given the location of its prediction and ground truth.
@param patient_prediction_location : folder containing the prediction data
@param patient_ground_truth_location : folder containing the ground truth data
@param labels_names_file : file containing the name of the labels (stored as integer)
We define the confusion matrix as the length confusion matrix with column normalization.
It represents the repartition (ratio) of predicted labels for a given GT label.
As for the length confusion matrix, it is defined with the following convention:
- each line correspond to a given prediction class
- each column correspond to a given ground truth class
Both folders are assumed to have a particular hierarchy:
- The folder patient_ground_truth_location:
* all branches named "branch????.txt"
* a "branch_labels.txt" file
-The folder patient_prediction_location:
* all branches named "branch????.txt"
* a file "recomputed_labels.txt"
N.B. It is assumed that the number of branches in both folder are identical and that the files storing labels have the same number lines.
"""
# compute the patient length confusion matrix:
(resulting_confusion_matrix, label_legend) = computePatientLengthConfusionMatrix(patient_prediction_location, patient_ground_truth_location, labels_names_file)
# normalize each column:
totalColumnLength = sum(resulting_confusion_matrix, axis=0)
totalColumnLength = maximum(totalColumnLength, MY_EPSILON) # prevent 0-division
resulting_confusion_matrix /= totalColumnLength
# return the confusion matrix with legend
return (resulting_confusion_matrix, label_legend)
def PatientClassificationMetric(confusion_matrix):
"""
@brief : compute the classification metric on a patient from its confusion matrix
The metric is defined as the ratio between the length of *badly* annotated branches over the total length of branches.
@param confusion_matrix : length confusion matrix (as computed in utils.computePatientLengthConfusionMatrix
"""
(nb_line, nb_col) = confusion_matrix.shape
assert nb_line == nb_col
total_length = sum(confusion_matrix)
confusion_without_diagonal = confusion_matrix * (1 - eye(nb_line))
bad_annotation_length = sum(confusion_without_diagonal)
return (bad_annotation_length / total_length)
def plotConfusionMatrix(confusion_matrix, label_legend):
""" Plot a length confusion matrix with legend """
from matplotlib.pyplot import imshow, xticks, yticks, show, figure, xlabel, ylabel, colorbar
figure()
imshow(confusion_matrix)
xticks(range(len(label_legend)), label_legend, rotation='vertical')
xlabel("GT label")
ylabel("Predicted label")
yticks(range(len(label_legend)), label_legend)
colorbar()
show()
def mergeLabelsInDatabase(PandasDB, first_label_to_merge, second_label_to_merge, merged_label_name):
assert isinstance(PandasDB, DataFrame)
merged_column = DataFrame(PandasDB[first_label_to_merge] + PandasDB[second_label_to_merge], columns=[merged_label_name])
PandasDB = PandasDB.drop(columns=[first_label_to_merge, second_label_to_merge])
PandasDB = concat([PandasDB, merged_column], axis=1)
return PandasDB
def manualFeatureGaussianization(PandasDB):
"""
Heuristically defined transformation of features used to "Gaussianize" their distributions.
In practice, sqrt, log and identity are used.
"""
assert isinstance(PandasDB, DataFrame)
identity = lambda x : x
logCompatibleWithZero = lambda x : log(1.0 + x)
transformation2apply = {}
transformation2apply["length"] = sqrt
transformation2apply["end_points_length"] = sqrt
transformation2apply["average_radius"] = identity
transformation2apply["stdev_radius"] = identity
transformation2apply["average_curvature"] = identity
transformation2apply["stdev_curvature"] = identity
transformation2apply["invariant_moment_1"] = logCompatibleWithZero
transformation2apply["invariant_moment_2"] = logCompatibleWithZero
transformation2apply["invariant_moment_3"] = logCompatibleWithZero
transformation2apply["lambda_1"] = identity
transformation2apply["lambda_2"] = identity
transformation2apply["lambda_3"] = identity
transformation2apply["principal_direction_x"] = identity
transformation2apply["principal_direction_y"] = identity
transformation2apply["principal_direction_z"] = identity
transformation2apply["average_endpoint_direction_x"] = identity
transformation2apply["average_endpoint_direction_y"] = identity
transformation2apply["average_endpoint_direction_z"] = identity
for feature in PandasDB.columns :
if feature in transformation2apply.keys() :
PandasDB[feature] = PandasDB[feature].apply(transformation2apply[feature])
def extractFeatures(PandasDB, invariant="rotation_translation"):
"""
Extract a set of features used to perform classification based on their invariance to some transformations.
Admissible invariants are:
- rotation_translation
- translation
- none
By default, rotation and translation invariant are used
"""
assert isinstance(PandasDB, DataFrame)
if invariant == "rotation_translation":
features = ["length" , "end_points_length" , "average_radius" , "stdev_radius" , "average_curvature" , "stdev_curvature" , "invariant_moment_1" , "invariant_moment_2" , "invariant_moment_3" , "lambda_1" , "lambda_2" , "lambda_3" ]
elif invariant == "translation":
features = ["length" , "end_points_length" , "average_radius" , "stdev_radius" , "average_curvature" , "stdev_curvature" , "invariant_moment_1" , "invariant_moment_2" , "invariant_moment_3" , "lambda_1" , "lambda_2" , "lambda_3" , "principal_direction_x" , "principal_direction_y" , "principal_direction_z" , "average_endpoint_direction_x" , "average_endpoint_direction_y" , "average_endpoint_direction_z"]
else :
features = ["length" , "end_points_length" , "average_radius" , "stdev_radius" , "average_curvature" , "stdev_curvature" , "invariant_moment_1" , "invariant_moment_2" , "invariant_moment_3" , "lambda_1" , "lambda_2" , "lambda_3" , "principal_direction_x" , "principal_direction_y" , "principal_direction_z" , "average_endpoint_direction_x" , "average_endpoint_direction_y" , "average_endpoint_direction_z", 'baricenter_x', 'baricenter_y', 'baricenter_z', 'weighted_baricenter_x', 'weighted_baricenter_y', 'weighted_baricenter_z']
return DataFrame(PandasDB, columns=features)
def compute_balanced_sample_weight(Y):
"""
Compute weights to apply to each example Y to have balanced training.
This function extends the function sklearn.utils.class_weight.compute_class_weight
to Y that are not 1-hot encoded but with probabilities.
"""
sum_of_proba = sum(Y, axis=0)
total = sum(sum_of_proba)
nb_classes = Y.shape[1]
class_weight = total / (float(nb_classes) * sum_of_proba + MY_EPSILON)
sample_weight = dot(Y,class_weight)
return(sample_weight)
def sample_weighted_mean_squared_error(Y_true, Y_pred):
weights = compute_balanced_sample_weight(Y_true)
sumSquared = ((Y_pred - Y_true)**2).mean(axis=1)
weightedSum = sum(sumSquared * weights, axis=0)
res = weightedSum / sum(weights)
return res
def keras_sample_weighted_mean_squared_error(Y_true, Y_pred):
import keras.backend as K
sum_of_proba = K.sum(Y_true, axis=0)
total = K.sum(sum_of_proba)
nb_cases = K.shape(Y_true)[0]
nb_classes = K.shape(Y_true)[1]
class_weight = total / ( K.cast(nb_classes, dtype='float32') * sum_of_proba + MY_EPSILON)
weights = K.dot(Y_true, K.reshape(class_weight, (nb_classes, -1)))
sumSquared = K.mean((Y_pred - Y_true)**2, axis=1)
weightedSum = K.sum(K.reshape(sumSquared, (nb_cases, -1)) * weights)
res = weightedSum / K.sum(weights)
return res
|
nilq/baby-python
|
python
|
import urequests
class InfluxDBClient():
def __init__(self,url, token, org, bucket):
self.url=url
self.token=token
self.org=org
self.params={"bucket":bucket ,"org":org}
self.headers={"Authorization":"Token {}".format(token)}
self.conveq=lambda x: ["{}={}".format(k,v) for k,v in x.items()]
def write(self,point,measurement,tag=None):
if not tag: tag = {}
data=[str(point)]+self.conveq(tag)
data=",".join(data)
data=data+" "+",".join(self.conveq(measurement))
params="&".join(self.conveq(self.params))
response = urequests.post(
self.url+'/api/v2/write?'+params,data=data,
headers=self.headers)
return response
|
nilq/baby-python
|
python
|
'''
Calculate astrometric motion of star over given time frame using JPL Horizons ephermerides
and SIMBAD proper motion and parallax
'''
from __future__ import print_function
import matplotlib
matplotlib.use('agg')
import csv
import jplephem
import de421
from jplephem.spk import SPK
kernel = SPK.open('de430.bsp')
import numpy as np
import matplotlib.pyplot as pl
import astropy.units as u
from astropy.constants import G
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import units as u
from datetime import datetime
import matplotlib.ticker as ticker
import warnings
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
warnings.filterwarnings('ignore', category=UserWarning)
startTime = datetime.now()
def convert_coords(array):
RADEC = SkyCoord(array[0],array[1],frame='icrs')
RA = RADEC.ra.rad
DEC = RADEC.dec.rad
return RA, DEC
def proj_RA_DEC(RA, DEC, pm_ra,pm_dec,prlx,epoch,JD):
"""
Takes the star's ra, dec, parallax, proper motion and a given epoch.
Returns the parallax and proper motion shift in star's position, in milliarcseconds (sexagesimal seconds for raoff; decimal seconds for decoff)
INPUTS:
RA -- RA position of star, in radians
DEC -- DEC position of star, in radians
parallax -- parallax of star, in MILLIarcseconds (1000.0*1/distance_pc)
epoch -- epoch (decimal years) to compute parallax offset (scalar or monotonically increasing vector)
OUTPUTS:
prop_DEC -- Declination of each point along a time baseline
prop_RA -- Right Ascension of each point along a time baseline
prop_par_DEC -- parallax shift in star's position, in milliarcseconds
prop_par_RA -- parallax shift in star's position, in milliarcseconds
"""
x,y,z = earthPos(JD) # get earth geocenter ephemerides using jplephem package
prop_DEC = DEC_J2000 + pm_dec * epoch # Calculate DEC first here
if np.size(prop_DEC)>1:
prop_RA = RA + pm_ra * epoch/np.cos(prop_DEC[0]) # rads
else:
prop_RA = RA + pm_ra * epoch/np.cos(prop_DEC) # rads
prop_par_RA = prop_RA + prlx/np.cos(prop_DEC)*(x*np.sin(prop_RA) - y*np.cos(prop_RA))
prop_par_DEC = prop_DEC + prlx*(x*np.cos(prop_RA)*np.sin(prop_DEC) + y*np.sin(prop_RA)*np.sin(prop_DEC) - z* np.cos(prop_RA))
return prop_DEC,prop_RA,prop_par_RA,prop_par_DEC
def earthPos(JD):
eph = jplephem.Ephemeris(de421)
barycenter = eph.position('earthmoon', JD)/ 1.49597870700e+8
moonvector = eph.position('moon', JD) / 1.49597870700e+8
earthPos = (barycenter - moonvector * eph.earth_share)
x = earthPos[0,:]
y = earthPos[1,:]
z = earthPos[2,:]
return x,y,z
def mas2rad(mas):
return mas*2*np.pi/1000/3600/360 # rads
def rad2mas(rad):
return rad*360/2/np.pi*3600*1000
def Sec(theta):
return 1/np.cos(theta)
# Set these values for each system
target = 'HAT-P-7'
params = 'TRENDS_ast_params.txt'
data_name = target+ '_ast.txt'
npoints =25 # number of points that makes up the "tornado path"
## Read in Data
labels = np.loadtxt(params, delimiter=',', dtype=np.str, usecols=[0])
ICRS = np.loadtxt(params, delimiter=',', dtype=np.str,usecols=[1,2])
values = np.loadtxt(params, delimiter=',', dtype=np.float,usecols=[3,4,5,6,7,8])
data = np.loadtxt(data_name, delimiter=',', dtype=np.float)
a = np.where(np.char.find(labels, target) > -1) # string comparison
ind = a[0][0] # save index and strip extra array things. not the cleanest code
# Convert input params into radians
RA_J2000, DEC_J2000 = convert_coords(ICRS[ind,:]) # convert coordinates to radians
values = mas2rad(values) # convert everything else to radians now
# Assign SIMBAD parameters individual names
prlx = values[0][0] # mas
dprlx = values[0][1] # mas
pm_ra = values[0][2] # mas/yr
dpm_ra = values[0][3] # mas/yr
pm_dec = values[0][4] # mas/yr
dpm_dec = values[0][5] # mas/yr
# Assign measured astrometry parameters individual names
D0_NS = data[0][3] # NS initial positions - mas
D0_EW = data[0][1] # EW initial positions - mas
JD = data[:,0] # full JD
NS = data[:,3] # NS data
EW = data[:,1] # EW data
dEW = data[:,2] # EW error
dNS = data[:,4] # NS error
# Convert JD into decimal year
t = Time(JD,format = 'jd').decimalyear
# create time baseline for tornado path
# dt = np.max(t)-np.min(t)
# tl = np.linspace(np.min(t),np.max(t),npoints)
tl = np.linspace(np.floor(np.min(t)), np.ceil(np.max(t)), npoints)
JDL = Time(tl,format = 'decimalyear').jd
# Print out system parameters... dont really need this anymore
print("""Astrometric parameters in radians:
Name : {0}
RA_J2000 = {1}
DEC_J2000 = {2}
Prlx = {3} +/- {4}
pm_ra = {5} +/- {6}
pm_dec = {7} +/- {8}
D_NS = {9}
D_EW = {10}
""".format(labels[ind],RA_J2000,DEC_J2000,prlx,dprlx,pm_ra,dpm_ra,pm_dec,dpm_dec,D0_NS,D0_EW))
# position = kernel[0,3].compute(JD)/ 1.49597870700e+8
# print(position)
# Use jplephem with de421 to determine position of Earth geocenter wrt SS Barycenter
# earthPos = np.loadtxt('HAT-P-7.txt', delimiter='\t', dtype=np.float)
# print(earthPos)
# print(earthPos)
# Combine proper motion and parallax with RA and DEC to project star path in time
# Calculate initial positions
x0,y0,z0=earthPos(JD[0])
_,_,RA_start,DEC_start = proj_RA_DEC(RA_J2000, DEC_J2000, pm_ra,pm_dec,prlx,t[0],JD[0])#,x0,y0,z0)
# Calculate full curves
x,y,z = earthPos(JDL)
prop_DEC,prop_RA,prop_par_RA,prop_par_DEC = proj_RA_DEC(RA_J2000, DEC_J2000, pm_ra,pm_dec,prlx,tl,JDL)#,x,y,z)
# pl.figure()
# pl.plot(prop_DEC,tl)
# pl.show()
# Error Propagation
dDEC = mas2rad(dNS[0])
dRA = mas2rad(dEW[0])
# RA Error
sigma_ra_2 = Sec(DEC_J2000 + pm_dec*tl)**2 *(dprlx**2 * (y*np.cos(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)) - x*np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)))**2 +\
dpm_ra**2*prlx**2*tl**2*Sec(DEC_J2000 + pm_dec*tl)**2 * (x*np.cos(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)) + y*np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)))**2 +\
dpm_dec**2*prlx**2*tl**2*(np.cos(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl))*(-y + pm_ra*tl*x*Sec(DEC_J2000 + pm_dec*tl)) + (x + pm_ra*tl*y*Sec(DEC_J2000 + pm_dec*tl))*\
np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)))**2*np.tan(DEC_J2000 + pm_dec*tl)**2)+\
prlx**2*Sec(DEC_J2000 + pm_dec*tl)**2 *( dRA**2*(x*np.cos(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)) + y*np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)))**2 + dDEC**2*\
(np.cos(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl))*(-y + pm_ra*tl*x*Sec(DEC_J2000 + pm_dec*tl)) + (x + pm_ra*tl*y*Sec(DEC_J2000 + pm_dec*tl))*\
np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)))**2*np.tan(DEC_J2000 + pm_dec*tl)**2)
sigma_ra = np.sqrt(rad2mas(np.sqrt(sigma_ra_2))**2+np.min(dEW)**2)
# DEC Error
sigma_dec_2=dprlx**2 *(np.cos(RA_J2000 + pm_ra* tl* Sec(DEC_J2000 + pm_dec* tl))* (-z + x* np.sin(DEC_J2000 + pm_dec* tl)) + y *np.sin(DEC_J2000 + pm_dec* tl)\
*np.sin(RA_J2000 + pm_ra *tl *Sec(DEC_J2000 + pm_dec* tl)))**2 + dRA**2 *prlx**2 *(y *np.cos(RA_J2000 + pm_ra* tl *Sec(DEC_J2000 + pm_dec* tl))\
*np.sin(DEC_J2000 + pm_dec* tl) + (z* - x *np.sin(DEC_J2000 + pm_dec* tl))*np.sin(RA_J2000 + pm_ra *tl *Sec(DEC_J2000 + pm_dec* tl)))**2\
+dpm_ra**2 *prlx**2* tl**2* (z *Sec(DEC_J2000 + pm_dec* tl) *np.sin(RA_J2000 +pm_ra*tl *Sec(DEC_J2000 + pm_dec*tl)) + \
(y* np.cos(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)) - x* np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)))*np.tan(DEC_J2000 + pm_dec*tl))**2\
+dDEC**2*prlx**2*(np.cos(DEC_J2000 +pm_dec*tl)* (x *np.cos(RA_J2000 + pm_ra*tl *Sec(DEC_J2000 + pm_dec*tl)) +y* np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)))\
+pm_ra*tl* np.tan(DEC_J2000 +pm_dec*tl)* (z* Sec(DEC_J2000 + pm_dec*tl)*np.sin(RA_J2000 + pm_ra*tl*Sec(DEC_J2000 + pm_dec*tl)) + \
(y* np.cos(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)) - x* np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl))) *np.tan(DEC_J2000 + pm_dec*tl)))**2 +\
dpm_dec**2* prlx**2* tl**2* (np.cos(DEC_J2000 +pm_dec*tl)* (x* np.cos(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)) +y *np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)))\
+pm_ra*tl *np.tan(DEC_J2000 + pm_dec*tl)* (z* Sec(DEC_J2000 + pm_dec*tl)* np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl)) + (y* np.cos(RA_J2000 + pm_ra*tl\
*Sec(DEC_J2000 + pm_dec*tl)) - x* np.sin(RA_J2000 + pm_ra*tl* Sec(DEC_J2000 + pm_dec*tl))) *np.tan(DEC_J2000 + pm_dec*tl)))**2
sigma_dec = np.sqrt(rad2mas(np.sqrt(sigma_dec_2))**2+np.min(dNS)**2)
# Total change in RA and DEC in mas
dprop_par_RA_mas = rad2mas(prop_par_RA - RA_start)
dprop_par_DEC_mas = rad2mas(prop_par_DEC - DEC_start)
# dprop_par_RA_mas = ((prop_par_RA-prop_par_RA[0])*u.rad).to(u.arcsec).value *1000
# dprop_par_DEC_mas = ((prop_par_DEC-prop_par_DEC[0])*u.rad).to(u.arcsec).value *1000
# Convert RA and DEC to NS,EW
prop_par_EW_mas=-dprop_par_RA_mas*np.cos(prop_DEC) # EW motion of a background object relative to star (mas)
prop_par_NS_mas=-dprop_par_DEC_mas # NS motion of a background object relative to star (mas)
# Start from initial NS,EW offset (start from the first data point)
NS_vector = D0_NS + prop_par_NS_mas
EW_vector = D0_EW + prop_par_EW_mas
hfont = {'fontname':'Helvetica'}
# plt.title('title',**csfont)
# plt.xlabel('xlabel', **hfont)
majorLocator = MultipleLocator(1)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(0.2)
'''
trying to fix the fill between plots.
'''
fig, axarr=pl.subplots(2,sharex=True)
axarr[1].xaxis.set_major_locator(majorLocator)
axarr[1].xaxis.set_major_formatter(majorFormatter)
axarr[1].xaxis.set_minor_locator(minorLocator)
axarr[0].plot(tl, NS_vector, color="black", lw=2, alpha=1)
# Plot North Offset 1 and 2 sigma errors
axarr[0].fill_between(tl, NS_vector, NS_vector+sigma_dec,alpha=0.3,color='black',linewidth=0)
axarr[0].fill_between(tl, NS_vector, NS_vector-sigma_dec,alpha=0.3,color='black',linewidth=0)
axarr[0].fill_between(tl, NS_vector, NS_vector+2*sigma_dec,alpha=0.2,color='black',linewidth=0)
axarr[0].fill_between(tl, NS_vector, NS_vector-2*sigma_dec,alpha=0.2,color='black',linewidth=0)
axarr[0].errorbar(t,NS, yerr=dNS, fmt=".k")
axarr[0].text(0.8, 0.8,target , fontweight='bold', fontsize = 25, horizontalalignment='center', verticalalignment='center', transform=axarr[0].transAxes)
# pl.errorbar(t, data, yerr=rverr, fmt=".k")
axarr[0].set_ylabel("North offset (mas)",fontweight='bold',fontsize=16)
axarr[0].yaxis.set_major_locator(MaxNLocator(prune='both'))
axarr[0].locator_params(axis = 'y', nbins = 6)
axarr[1].plot(tl, EW_vector, color="black", lw=2, alpha=1)
# Plot East Offset 1 and 2 sigma errors
axarr[1].fill_between(tl, EW_vector, EW_vector+sigma_ra,alpha=0.3,color='black',linewidth=0)
axarr[1].fill_between(tl, EW_vector, EW_vector-sigma_ra,alpha=0.3,color='black',linewidth=0)
axarr[1].fill_between(tl, EW_vector, EW_vector+2*sigma_ra,alpha=0.2,color='black',linewidth=0)
axarr[1].fill_between(tl, EW_vector, EW_vector-2*sigma_ra,alpha=0.2,color='black',linewidth=0)
axarr[1].errorbar(t,EW, yerr=dEW, fmt=".k")
# axarr[1].plot(tl, EW_vector-sigma_ra, color="black", lw=1, alpha=0.9)
axarr[1].set_ylabel("East offset (mas)",fontweight='bold',fontsize=16)
axarr[1].set_xlabel("Epoch (yrs)",fontweight='bold',fontsize=16)
axarr[1].yaxis.set_major_locator(MaxNLocator(prune='both'))
# axarr[1].get_xaxis().get_major_formatter().set_useOffset(False)
axarr[1].locator_params(axis = 'y', nbins = 6) #(or axis = 'y')
pl.xlim([np.min(tl),np.max(tl)])
fig.tight_layout()
fig.subplots_adjust(hspace=0.001) # no horizontal space between figures
pl.savefig(target + '_plot_NSEW.png')
# print(NS_vector,EW_vector)
# Plot PA and SEP now
# print(NS_vector**2,EW_vector**2)
PA =np.abs((180./np.pi)*np.arctan2(NS_vector,EW_vector))+90
SEP = np.sqrt(NS_vector**2+EW_vector**2)
# print(PA,SEP)
fig, axarr=pl.subplots(2,sharex=True)
axarr[1].xaxis.set_major_locator(majorLocator)
axarr[1].xaxis.set_major_formatter(majorFormatter)
axarr[1].xaxis.set_minor_locator(minorLocator)
axarr[0].plot(tl, PA, color="black", lw=2, alpha=1)
# Plot North Offset 1 and 2 sigma errors
# axarr[0].fill_between(tl, NS_vector, NS_vector+sigma_dec,alpha=0.3,color='black',linewidth=0)
# axarr[0].fill_between(tl, NS_vector, NS_vector-sigma_dec,alpha=0.3,color='black',linewidth=0)
# axarr[0].fill_between(tl, NS_vector, NS_vector+2*sigma_dec,alpha=0.2,color='black',linewidth=0)
# axarr[0].fill_between(tl, NS_vector, NS_vector-2*sigma_dec,alpha=0.2,color='black',linewidth=0)
# axarr[0].errorbar(t,NS, yerr=dNS, fmt=".k")
axarr[0].text(0.8, 0.8,target , fontweight='bold', fontsize = 25, horizontalalignment='center', verticalalignment='center', transform=axarr[0].transAxes)
# pl.errorbar(t, data, yerr=rverr, fmt=".k")
axarr[0].set_ylabel("PA (rad)",fontweight='bold',fontsize=16)
axarr[0].yaxis.set_major_locator(MaxNLocator(prune='both'))
axarr[0].locator_params(axis = 'y', nbins = 6)
axarr[1].plot(tl, SEP, color="black", lw=2, alpha=1)
# Plot East Offset 1 and 2 sigma errors
# axarr[1].fill_between(tl, EW_vector, EW_vector+sigma_ra,alpha=0.3,color='black',linewidth=0)
# axarr[1].fill_between(tl, EW_vector, EW_vector-sigma_ra,alpha=0.3,color='black',linewidth=0)
# axarr[1].fill_between(tl, EW_vector, EW_vector+2*sigma_ra,alpha=0.2,color='black',linewidth=0)
# axarr[1].fill_between(tl, EW_vector, EW_vector-2*sigma_ra,alpha=0.2,color='black',linewidth=0)
# axarr[1].errorbar(t,EW, yerr=dEW, fmt=".k")
# axarr[1].plot(tl, EW_vector-sigma_ra, color="black", lw=1, alpha=0.9)
axarr[1].set_ylabel("Sep (mas)",fontweight='bold',fontsize=16)
axarr[1].set_xlabel("Epoch (yrs)",fontweight='bold',fontsize=16)
axarr[1].yaxis.set_major_locator(MaxNLocator(prune='both'))
# axarr[1].get_xaxis().get_major_formatter().set_useOffset(False)
axarr[1].locator_params(axis = 'y', nbins = 6) #(or axis = 'y')
pl.xlim([np.min(tl),np.max(tl)])
fig.tight_layout()
fig.subplots_adjust(hspace=0.001) # no horizontal space between figures
pl.savefig(target + '_plot_PASEP.png')
pl.figure()
pl.plot(EW_vector,NS_vector,color='black')
pl.errorbar(EW[0],NS[0],yerr=dEW[0],xerr=dNS[0],fmt='ok',markersize=12)
pl.errorbar(EW[1],NS[1],yerr=dEW[1],xerr=dNS[1],fmt='dk',markersize=12)
# pl.errorbar(EW[2],NS[2],yerr=dEW[2],xerr=dNS[2],fmt='sk',markersize=12)
[]
pl.xlabel('East Offset (mas)')
pl.ylabel('North Offset (mas)')
pl.tight_layout()
pl.savefig('test.png')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
@Project : activationFunction
@Author : Xu-Shan Zhao
@Filename: activationFunction202003121138.py
@IDE : PyCharm
@Time1 : 2020-03-12 11:38:55
@Time2 : 2020/3/12 11:38
@Month1 : 3月
@Month2 : 三月
"""
import torch
import matplotlib.pyplot as plt
x_data = torch.arange(-6, 6, 0.01)
y_tanh = torch.tanh(x_data)
y_sigmoid = torch.sigmoid(x_data)
y_relu = torch.relu(x_data)
y_leakyrelu = torch.nn.functional.leaky_relu(x_data, negative_slope=0.05)
y_prelu = torch.prelu(x_data, weight=torch.tensor(0.25))
y_rrelu = torch.rrelu(x_data, lower=0., upper=1)
plt.ion()
plt.cla()
plt.plot(x_data.data.numpy(), y_tanh.data.numpy(), c='red', label='tanh')
plt.legend()
plt.xlabel('x')
plt.ylabel('tanh(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_sigmoid.data.numpy(), c='red', label='sigmoid')
plt.legend()
plt.xlabel('x')
plt.ylabel('Sigmoid(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_relu.data.numpy(), c='red', label='ReLU')
plt.legend()
plt.xlabel('x')
plt.ylabel('ReLU(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_leakyrelu.data.numpy(), c='red', label='Leaky ReLU')
plt.legend()
plt.xlabel('x')
plt.ylabel('Leaky ReLU(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_prelu.data.numpy(), c='red', label='PReLU')
plt.legend()
plt.xlabel('x')
plt.ylabel('PReLU(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_rrelu.data.numpy(), c='red', label='RReLU')
plt.legend()
plt.xlabel('x')
plt.ylabel('RReLU(x)')
plt.pause(1)
plt.cla()
plt.plot(x_data.data.numpy(), y_tanh.data.numpy(), c='red', linestyle='-', label='tanh')
plt.plot(x_data.data.numpy(), y_sigmoid.data.numpy(), c='blue', linestyle='-.', label='sigmoid')
plt.plot(x_data.data.numpy(), y_relu.data.numpy(), c='green', linestyle='--', label='ReLU')
plt.plot(x_data.data.numpy(), y_leakyrelu.data.numpy(), c='black', linestyle=':', label='Leaky ReLU')
plt.plot(x_data.data.numpy(), y_prelu.data.numpy(), c='orange', label='PReLU')
plt.plot(x_data.data.numpy(), y_rrelu.data.numpy(), c='gold', label='RReLU')
plt.legend()
plt.ylim(-1.05, 1.5)
plt.xlabel('x')
plt.pause(1)
plt.ioff()
plt.show()
|
nilq/baby-python
|
python
|
with open('day1_input.txt') as file:
input = file.read()
# Part 1
sum = 0
for i in range(len(input) - 1):
if input[i] == input[i + 1]:
sum += int(input[i])
if input[len(input) - 1] == input[0]:
sum += int(input[0])
print(sum)
# Part 2
sum = 0
forward = len(input) / 2
for i in range(forward):
if input[i] == input[i + forward]:
sum += int(input[i])
for i in range(forward, len(input)):
if input[i] == input[(i + forward) % len(input)]:
sum += int(input[i])
print(sum)
|
nilq/baby-python
|
python
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.dev.testing import requires_py3
from ..utils import get_check
pytestmark = [
requires_py3,
pytest.mark.openmetrics,
pytest.mark.openmetrics_transformers,
pytest.mark.openmetrics_transformers_metadata,
]
def test_basic(aggregator, datadog_agent, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP kubernetes_build_info A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.
# TYPE kubernetes_build_info gauge
kubernetes_build_info{buildDate="2016-11-18T23:57:26Z",compiler="gc",gitCommit="3872cb93abf9482d770e651b5fe14667a6fca7e0",gitTreeState="dirty",gitVersion="v1.6.0-alpha.0.680+3872cb93abf948-dirty",goVersion="go1.7.3",major="1",minor="6+",platform="linux/amd64"} 1
""" # noqa: E501
)
check = get_check(
{'metrics': [{'kubernetes_build_info': {'name': 'version', 'type': 'metadata', 'label': 'gitVersion'}}]}
)
check.check_id = 'test:instance'
dd_run_check(check)
version_metadata = {
'version.major': '1',
'version.minor': '6',
'version.patch': '0',
'version.release': 'alpha.0.680',
'version.build': '3872cb93abf948-dirty',
'version.raw': 'v1.6.0-alpha.0.680+3872cb93abf948-dirty',
'version.scheme': 'semver',
}
datadog_agent.assert_metadata('test:instance', version_metadata)
datadog_agent.assert_metadata_count(len(version_metadata))
aggregator.assert_all_metrics_covered()
def test_options(aggregator, datadog_agent, dd_run_check, mock_http_response):
mock_http_response(
"""
# HELP kubernetes_build_info A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.
# TYPE kubernetes_build_info gauge
kubernetes_build_info{buildDate="2016-11-18T23:57:26Z",compiler="gc",gitCommit="3872cb93abf9482d770e651b5fe14667a6fca7e0",gitTreeState="dirty",gitVersion="v1.6.0-alpha.0.680+3872cb93abf948-dirty",goVersion="go1.7.3",major="1",minor="6+",platform="linux/amd64"} 1
""" # noqa: E501
)
check = get_check(
{
'metrics': [
{
'kubernetes_build_info': {
'name': 'version',
'type': 'metadata',
'label': 'gitVersion',
'scheme': 'regex',
'final_scheme': 'semver',
'pattern': 'v(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<fix>\\d+)',
}
}
],
}
)
check.check_id = 'test:instance'
dd_run_check(check)
version_metadata = {
'version.major': '1',
'version.minor': '6',
'version.fix': '0',
'version.raw': 'v1.6.0-alpha.0.680+3872cb93abf948-dirty',
'version.scheme': 'semver',
}
datadog_agent.assert_metadata('test:instance', version_metadata)
datadog_agent.assert_metadata_count(len(version_metadata))
aggregator.assert_all_metrics_covered()
|
nilq/baby-python
|
python
|
# CSV to JSON
# 1. run the following from the root of project directory: `cd data/achievements; python3 achievements-gen.py`
# 2. commit and push changes
# 3. website will automatically update
import csv, json
with open("./achievements.csv", encoding='utf-8', mode='r') as fin:
with open("./achievements.json", mode="w") as fout:
csv_data = list(csv.reader(fin))
json_data = []
for (_i, row) in enumerate(csv_data[1:]):
i = _i + 2
try:
# Handle Year columns
if row[0].strip() != "":
try:
int_year = int(row[0])
except:
raise TypeError("Invalid year on Row {}.".format(i))
json_data.append({
"year": int_year,
"competitions": []
})
# Handle Competition columns
if row[1].strip() != "":
if row[2].strip() == "": raise ValueError("Missing Region on Row {}.".format(i))
json_data[-1]["competitions"].append({
"code": row[1],
"region": row[2],
"desc": row[3],
"awards": []
})
# Handle Award columns
json_data[-1]["competitions"][-1]["awards"].append({
"category": row[4],
"title": row[5],
"team": row[6],
"recipients": row[7].split(", ")
})
except:
raise RuntimeError("Error occurred on Row {}. This error may or may not have been due to input data.".format(i))
json_data.reverse()
for year in json_data:
year["competitions"].reverse()
print(json.dumps(json_data, indent=2, sort_keys=True))
fout.write(json.dumps(json_data, indent=2, sort_keys=True))
|
nilq/baby-python
|
python
|
"""
CODE ADAPTED FROM: https://github.com/sjblim/rmsn_nips_2018
Treatment Effects with RNNs:
Common routines to use across all training scripts
"""
import logging
import numpy as np
import pandas as pd
import tensorflow as tf
import treatments.RMSN.configs
import treatments.RMSN.libs.net_helpers as helpers
from treatments.RMSN.libs.model_rnn import RnnModel
ROOT_FOLDER = treatments.RMSN.configs.ROOT_FOLDER
MODEL_ROOT = treatments.RMSN.configs.MODEL_ROOT
logger = logging.getLogger()
logger.disabled = True
# --------------------------------------------------------------------------
# Training routine
# --------------------------------------------------------------------------
def train(
net_name,
expt_name,
training_dataset,
validation_dataset,
dropout_rate,
memory_multiplier,
num_epochs,
minibatch_size,
learning_rate,
max_norm,
use_truncated_bptt,
num_features,
num_outputs,
model_folder,
hidden_activation,
output_activation,
tf_config,
additonal_info="",
b_use_state_initialisation=False,
b_use_seq2seq_feedback=False,
b_use_seq2seq_training_mode=False,
adapter_multiplier=0,
b_use_memory_adapter=False,
):
"""
Common training routine to all RNN models - seq2seq + standard
"""
min_epochs = 1
tf.reset_default_graph()
with tf.Graph().as_default(), tf.Session(config=tf_config) as sess:
tf_data_train = convert_to_tf_dataset(training_dataset)
tf_data_valid = convert_to_tf_dataset(validation_dataset)
# Setup default hidden layer size
hidden_layer_size = int(memory_multiplier * num_features)
if b_use_state_initialisation:
full_state_size = int(training_dataset["initial_states"].shape[-1])
adapter_size = adapter_multiplier * full_state_size
else:
adapter_size = 0
# Training simulation
model_parameters = {
"net_name": net_name,
"experiment_name": expt_name,
"training_dataset": tf_data_train,
"validation_dataset": tf_data_valid,
"test_dataset": None,
"dropout_rate": dropout_rate,
"input_size": num_features,
"output_size": num_outputs,
"hidden_layer_size": hidden_layer_size,
"num_epochs": num_epochs,
"minibatch_size": minibatch_size,
"learning_rate": learning_rate,
"max_norm": max_norm,
"model_folder": model_folder,
"hidden_activation": hidden_activation,
"output_activation": output_activation,
"backprop_length": 60, # backprop over 60 timesteps for truncated backpropagation through time
"softmax_size": 0, # not used in this paper, but allows for categorical actions
"performance_metric": "xentropy" if output_activation == "sigmoid" else "mse",
"use_seq2seq_feedback": b_use_seq2seq_feedback,
"use_seq2seq_training_mode": b_use_seq2seq_training_mode,
"use_memory_adapter": b_use_memory_adapter,
"memory_adapter_size": adapter_size,
}
# Get the right model
model = RnnModel(model_parameters)
serialisation_name = model.serialisation_name
if helpers.hyperparameter_result_exists(model_folder, net_name, serialisation_name):
logging.warning("Combination found: skipping {}".format(serialisation_name))
return helpers.load_hyperparameter_results(model_folder, net_name)
training_handles = model.get_training_graph(
use_truncated_bptt=use_truncated_bptt, b_use_state_initialisation=b_use_state_initialisation
)
validation_handles = model.get_prediction_graph(
use_validation_set=True, with_dropout=False, b_use_state_initialisation=b_use_state_initialisation
)
# Start optimising
num_minibatches = int(np.ceil(training_dataset["scaled_inputs"].shape[0] / model_parameters["minibatch_size"]))
i = 1
epoch_count = 1
step_count = 1
min_loss = np.inf
with sess.as_default():
sess.run(tf.global_variables_initializer())
optimisation_summary = pd.Series([])
while True:
try:
loss, _ = sess.run([training_handles["loss"], training_handles["optimiser"]])
# Flog output
logging.info(
"Epoch {} | iteration = {} of {}, loss = {} | net = {} | info = {}".format(
epoch_count, step_count, num_minibatches, loss, model.net_name, additonal_info
)
)
if step_count == num_minibatches:
# Reinit dataset
sess.run(validation_handles["initializer"])
means = []
UBs = []
LBs = []
while True:
try:
mean, upper_bound, lower_bound = sess.run(
[
validation_handles["mean"],
validation_handles["upper_bound"],
validation_handles["lower_bound"],
]
)
means.append(mean)
UBs.append(upper_bound)
LBs.append(lower_bound)
except tf.errors.OutOfRangeError:
break
means = np.concatenate(means, axis=0)
active_entries = validation_dataset["active_entries"]
output = validation_dataset["scaled_outputs"]
if model_parameters["performance_metric"] == "mse":
validation_loss = np.sum((means - output) ** 2 * active_entries) / np.sum(active_entries)
elif model_parameters["performance_metric"] == "xentropy":
_, _, features_size = output.shape
partition_idx = features_size
# Do binary first
validation_loss = np.sum(
(
output[:, :, :partition_idx] * -np.log(means[:, :, :partition_idx] + 1e-5)
+ (1 - output[:, :, :partition_idx])
* -np.log(1 - means[:, :, :partition_idx] + 1e-5)
)
* active_entries[:, :, :partition_idx]
) / (np.sum(active_entries[:, :, :partition_idx]))
optimisation_summary[epoch_count] = validation_loss
# Compute validation loss
logging.info(
"Epoch {} Summary| Validation loss = {} | net = {} | info = {}".format(
epoch_count, validation_loss, model.net_name, additonal_info
)
)
if np.isnan(validation_loss):
logging.warning("NAN Loss found, terminating routine")
break
# Save model and loss trajectories
if validation_loss < min_loss and epoch_count > min_epochs:
cp_name = serialisation_name + "_optimal"
helpers.save_network(sess, model_folder, cp_name, optimisation_summary)
min_loss = validation_loss
# Update
epoch_count += 1
step_count = 0
step_count += 1
i += 1
except tf.errors.OutOfRangeError:
break
# Save final
cp_name = serialisation_name + "_final"
helpers.save_network(sess, model_folder, cp_name, optimisation_summary)
helpers.add_hyperparameter_results(optimisation_summary, model_folder, net_name, serialisation_name)
hyperparam_df = helpers.load_hyperparameter_results(model_folder, net_name)
logging.info("Terminated at iteration {}".format(i))
sess.close()
return hyperparam_df
# --------------------------------------------------------------------------
# Test routine
# --------------------------------------------------------------------------
def test(
test_dataset,
tf_config,
net_name,
expt_name,
dropout_rate,
num_features,
num_outputs,
memory_multiplier,
num_epochs,
minibatch_size,
learning_rate,
max_norm,
hidden_activation,
output_activation,
model_folder,
b_use_state_initialisation=False,
b_dump_all_states=False,
b_mse_by_time=False,
b_use_seq2seq_feedback=False,
b_use_seq2seq_training_mode=False,
adapter_multiplier=0,
b_use_memory_adapter=False,
):
"""
Common test routine to all RNN models - seq2seq + standard
"""
# Start with graph
tf.reset_default_graph()
with tf.Session(config=tf_config) as sess:
tf_data_test = convert_to_tf_dataset(test_dataset)
# For decoder training with external state inputs
if b_use_state_initialisation:
full_state_size = int(test_dataset["initial_states"].shape[-1])
adapter_size = adapter_multiplier * full_state_size
else:
adapter_size = 0
# Training simulation
model_parameters = {
"net_name": net_name,
"experiment_name": expt_name,
"training_dataset": tf_data_test,
"validation_dataset": tf_data_test,
"test_dataset": tf_data_test,
"dropout_rate": dropout_rate,
"input_size": num_features,
"output_size": num_outputs,
"hidden_layer_size": int(memory_multiplier * num_features),
"num_epochs": num_epochs,
"minibatch_size": minibatch_size,
"learning_rate": learning_rate,
"max_norm": max_norm,
"model_folder": model_folder,
"hidden_activation": hidden_activation,
"output_activation": output_activation,
"backprop_length": 60,
# Length for truncated backpropagation over time, matches max time steps here.
"softmax_size": 0, # not used in this paper, but allows for categorical actions
"performance_metric": "xentropy" if output_activation == "sigmoid" else "mse",
"use_seq2seq_feedback": b_use_seq2seq_feedback,
"use_seq2seq_training_mode": b_use_seq2seq_training_mode,
"use_memory_adapter": b_use_memory_adapter,
"memory_adapter_size": adapter_size,
}
# Start optimising
with sess.as_default():
sess.run(tf.global_variables_initializer())
# Get the right model
model = RnnModel(model_parameters)
handles = model.get_prediction_graph(
use_validation_set=False if "treatment_rnn" not in net_name else None,
with_dropout=False,
b_use_state_initialisation=b_use_state_initialisation,
b_dump_all_states=b_dump_all_states,
)
# Load checkpoint
serialisation_name = model.serialisation_name
cp_name = serialisation_name + "_optimal"
_ = helpers.load_network(sess, model_folder, cp_name)
# Init
sess.run(handles["initializer"])
# Get all the data out in chunks
means = []
UBs = []
LBs = []
states = []
while True:
try:
mean, upper_bound, lower_bound, ave_states = sess.run(
[handles["mean"], handles["upper_bound"], handles["lower_bound"], handles["ave_states"]]
)
means.append(mean)
UBs.append(upper_bound)
LBs.append(lower_bound)
states.append(ave_states)
except tf.errors.OutOfRangeError:
break
means = np.concatenate(means, axis=0)
states = np.concatenate(states, axis=0)
active_entries = (
test_dataset["active_entries"] if net_name != "treatment_rnn" else test_dataset["active_entries"]
)
output = test_dataset["scaled_outputs"] if net_name != "treatment_rnn" else test_dataset["scaled_outputs"]
# prediction_map[net_name] = means
# output_map[net_name] = output
if b_mse_by_time:
mse = np.sum((means - output) ** 2 * active_entries, axis=0) / np.sum(active_entries, axis=0)
else:
mse = np.sum((means - output) ** 2 * active_entries) / np.sum(active_entries)
# results[net_name] = mse
# print(net_name, mse)
sess.close()
return means, output, mse, states
# --------------------------------------------------------------------------
# Data processing functions
# --------------------------------------------------------------------------
def convert_to_tf_dataset(dataset_map):
key_map = {
"inputs": dataset_map["scaled_inputs"],
"outputs": dataset_map["scaled_outputs"],
"active_entries": dataset_map["active_entries"],
"sequence_lengths": dataset_map["sequence_lengths"],
}
if "propensity_weights" in dataset_map:
key_map["propensity_weights"] = dataset_map["propensity_weights"]
if "initial_states" in dataset_map:
key_map["initial_states"] = dataset_map["initial_states"]
tf_dataset = tf.data.Dataset.from_tensor_slices(key_map)
return tf_dataset
def get_processed_data(dataset, b_predict_actions, b_use_actions_only):
previous_treatments = dataset["previous_treatments"]
current_treatments = dataset["current_treatments"]
covariates = dataset["current_covariates"]
dataset_outputs = dataset["outputs"]
sequence_lengths = dataset["sequence_lengths"]
active_entries = dataset["active_entries"]
# Parcelling INPUTS
if b_predict_actions:
if b_use_actions_only:
inputs = previous_treatments
actions = previous_treatments
else:
# Uses current covariate, to remove confounding effects between action and current value
inputs = np.concatenate([covariates, previous_treatments], axis=2)
actions = previous_treatments
else:
inputs = np.concatenate([covariates, current_treatments], axis=2)
actions = current_treatments
# Parcelling OUTPUTS
if b_predict_actions:
outputs = current_treatments
else:
outputs = dataset_outputs
return {
"scaled_inputs": inputs,
"scaled_outputs": outputs,
"actions": actions,
"sequence_lengths": sequence_lengths,
"active_entries": active_entries,
}
|
nilq/baby-python
|
python
|
#####################################
# ColumnRelationships.py
#####################################
# Description:
# * Map all relationships between columns.
from abc import abstractmethod, ABC
from enum import Enum
from itertools import combinations, product
import numpy as np
from pandas import DataFrame
from sortedcontainers import SortedDict
class ColumnRelationships(object):
"""
* Immutable object that store relationships between columns.
"""
def __init__(self, data):
"""
Inputs:
* data: Expecting dataframe of columns.
"""
self.__relationships = ColumnRelationships.MapRelationships(data)
###############
# Properties:
###############
@property
def Relationships(self):
return self.__relationships
###############
# Interface Methods:
###############
def ToDataFrame(self, countinfo = True):
"""
* Return full symmetric dataframe matrix as representation of object.
Inputs:
* countinfo: If True, fills cell with "leftcount_rightcount", else fills with
full relationship name, ex "one_one".
"""
return ColumnRelationships.__AsDataFrame(self.__relationships, countinfo)
@classmethod
def MapRelationships(cls, data):
"""
* Map all relationships between columns in passed data.
Inputs:
* data: Expecting dataframe of columns.
Output:
* Returns lower triangular Dataframe of relationships with columns as dimensions.
"""
results = SortedDict({col : {} for col in data.columns})
combs = combinations(data.columns, 2)
for comb in combs:
if comb[0] != comb[1]:
results[comb[0]][comb[1]] = cls.__MapRelationships(data, comb[0], comb[1])
return results
###############
# Private Helpers:
###############
@classmethod
def __MapRelationships(cls, data, col1, col2):
"""
* Determine if one-to-one/one-to-many/many-to-one/many-to-many relationship exists
between columns.
"""
colset = data[[col1, col2]].drop_duplicates([col1, col2])
left_max = colset.groupby(col1).count().max()[0]
right_max = colset.groupby(col2).count().max()[0]
if left_max==1:
if right_max==1:
enum = RelationshipEnum.ONE_TO_ONE
else:
enum = RelationshipEnum.ONE_TO_MANY
else:
if right_max==1:
enum = RelationshipEnum.MANY_TO_ONE
else:
enum = RelationshipEnum.MANY_TO_MANY
return ColumnRelationship(enum, left_max, right_max)
@staticmethod
def __AsDataFrame(data, countinfo):
"""
* Convert stored dictionary into symmetric DataFrame.
"""
sorted_keys = sorted(list(data.keys()))
newdata = SortedDict()
for key in sorted_keys:
newdata[key] = SortedDict()
for subkey in sorted_keys:
if subkey == key:
newdata[key][subkey] = '='
elif subkey not in data[key]:
newdata[key][subkey] = data[subkey][key].Reverse().CountStr if countinfo == True else data[subkey][key].Reverse().TypeStr
else:
newdata[key][subkey] = data[key][subkey].CountStr if countinfo == True else data[key][subkey].TypeStr
return DataFrame(newdata)
class RelationshipEnum(Enum):
ONE_TO_ONE = 0
ONE_TO_MANY = 1
MANY_TO_ONE = 2
MANY_TO_MANY = 3
class ColumnRelationship:
"""
* Immutable class representing relationship
between two columns.
"""
__countstr = '%d_%d'
__typestrs = { RelationshipEnum.ONE_TO_ONE: "one_one",
RelationshipEnum.ONE_TO_MANY : "one_many",
RelationshipEnum.MANY_TO_MANY : "many_many",
RelationshipEnum.MANY_TO_ONE : "many_one" }
def __init__(self, enum, leftcount, rightcount):
self.__type = enum
self.__leftcount = leftcount
self.__rightcount = rightcount
def __eq__(self, val):
return self.__type == val.Type
def __str__(self):
"""
* Return CountStr by default.
"""
return self.CountStr
#############
# Properties:
#############
@property
def CountStr(self):
"""
* Return string of form "<KeyGroupByCount>_<ValueGroupByCount>".
"""
return self.__countstr % (self.__leftcount, self.__rightcount)
@property
def Type(self):
"""
* Return type enumeration.
"""
return self.__type
@property
def TypeStr(self):
"""
* Type enumeration in string form (ex: "one_one").
"""
return ColumnRelationship.__typestrs[self.__type]
##############
# Interface Methods:
##############
def Reverse(self):
"""
* Return new relationship with reversed characteristics.
"""
enum = self.__type
if self.__type == RelationshipEnum.MANY_TO_ONE:
enum = RelationshipEnum.ONE_TO_MANY
elif self.__type == RelationshipEnum.ONE_TO_MANY:
enum = RelationshipEnum.MANY_TO_ONE
return ColumnRelationship(enum, self.__rightcount, self.__leftcount)
|
nilq/baby-python
|
python
|
from collections import OrderedDict
from elasticsearch.exceptions import RequestError
from elasticsearch_dsl import Search
import settings
from core.cursor import decode_cursor, get_next_cursor
from core.exceptions import (APIPaginationError, APIQueryParamsError,
APISearchError)
from core.filter import filter_records
from core.group_by import (get_group_by_results,
get_group_by_results_external_ids,
get_group_by_results_transform, group_by_records,
group_by_records_transform, is_transform)
from core.paginate import Paginate
from core.search import check_is_search_query, full_search
from core.sort import sort_records
from core.utils import (get_field, map_filter_params, map_sort_params,
set_number_param)
from core.validate import validate_params
def shared_view(request, fields_dict, index_name, default_sort):
"""Primary function used to search, filter, and aggregate across all five entities."""
# params
validate_params(request)
cursor = request.args.get("cursor")
filter_params = map_filter_params(request.args.get("filter"))
group_by = request.args.get("group_by") or request.args.get("group-by")
page = set_number_param(request, "page", 1)
per_page = (
set_number_param(request, "per-page", 25)
if not group_by
else set_number_param(request, "per-page", 200)
)
search = request.args.get("search")
sort_params = map_sort_params(request.args.get("sort"))
s = Search(index=index_name)
# pagination
paginate = Paginate(group_by, page, per_page)
paginate.validate()
if group_by:
s = s.extra(size=0)
else:
s = s.extra(size=per_page)
if cursor and page != 1:
raise APIPaginationError("Cannot use page parameter with cursor.")
if cursor and cursor != "*":
decoded_cursor = decode_cursor(cursor)
s = s.extra(search_after=decoded_cursor)
# search
if search and search != '""':
s = full_search(index_name, s, search)
# filter
if filter_params:
s = filter_records(fields_dict, filter_params, s)
# sort
is_search_query = check_is_search_query(filter_params, search)
# do not allow sorting by relevance score without search query
if not is_search_query and sort_params and "relevance_score" in sort_params:
raise APIQueryParamsError(
"Must include a search query (such as ?search=example or /filter=display_name.search:example) in order to sort by relevance_score."
)
if sort_params:
s = sort_records(fields_dict, group_by, sort_params, s)
elif is_search_query and not sort_params and index_name.startswith("works"):
s = s.sort("_score", "publication_date", "id")
elif is_search_query and not sort_params:
s = s.sort("_score", "-works_count", "id")
elif not group_by:
s = s.sort(*default_sort)
# group by
transform = False
if group_by:
field = get_field(fields_dict, group_by)
transform = is_transform(field, index_name, filter_params)
if (
type(field).__name__ == "DateField"
or type(field).__name__ == "RangeField"
and field.param != "publication_year"
and field.param != "level"
):
raise APIQueryParamsError("Cannot group by date or number fields.")
elif field.param == "referenced_works":
raise APIQueryParamsError(
"Group by referenced_works is not supported at this time."
)
elif field.param == "cited_by" or field.param == "related_to":
raise APIQueryParamsError("Cannot group cited_by or related_to filters.")
if transform:
s = group_by_records_transform(field, index_name, sort_params)
else:
s = group_by_records(field, s, sort_params)
if not group_by:
try:
response = s[paginate.start : paginate.end].execute()
except RequestError as e:
if "search_after has" in str(e) and "but sort has" in str(e):
raise APIPaginationError("Cursor value is invalid.")
else:
raise APISearchError("Something went wrong.")
count = s.count()
else:
response = s.execute()
if group_by in settings.EXTERNAL_ID_FIELDS:
count = 2
elif transform:
count = len(response)
else:
count = len(response.aggregations.groupby.buckets)
result = OrderedDict()
result["meta"] = {
"count": count,
"db_response_time_ms": response.took,
"page": page if not cursor else None,
"per_page": 200 if group_by else per_page,
}
result["results"] = []
if cursor:
result["meta"]["next_cursor"] = get_next_cursor(response)
if group_by:
if group_by in settings.EXTERNAL_ID_FIELDS:
result["group_by"] = get_group_by_results_external_ids(response)
elif transform:
result["group_by"] = get_group_by_results_transform(group_by, response)
else:
result["group_by"] = get_group_by_results(group_by, response)
else:
result["group_by"] = []
result["results"] = response
if settings.DEBUG:
print(s.to_dict())
return result
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
import sys
import platform
# python version check
python_min_version = (3, 6, 2)
python_min_version_str = '.'.join(map(str, python_min_version))
if sys.version_info < python_min_version:
print(
f"You are using Python {platform.python_version()}. At least Python >={python_min_version_str} is required.")
sys.exit(-1)
setup(
name='gooogloo',
version='0.0.1',
author='sansmoraxz',
# fix this to use `find_packages`
packages=['gooogloo', 'gooogloo.modules', 'gooogloo.modules.utils'],
url='https://github.com/sansmoraxz/py-gooogloo',
license='LICENSE',
description='Easy google search for python'
)
|
nilq/baby-python
|
python
|
import pytest
from spacy.lang.ja import Japanese
def test_ja_morphologizer_factory():
pytest.importorskip("sudachipy")
nlp = Japanese()
morphologizer = nlp.add_pipe("morphologizer")
assert morphologizer.cfg["extend"] is True
|
nilq/baby-python
|
python
|
from base import CodeTyper, SNIPPETS_ROOT, COMMAND
import panel as pn
pn.extension(sizing_mode="stretch_width")
CodeTyper(
title="# Cross Filtering with hvPlot, Holoviews and PANEL",
value=SNIPPETS_ROOT/"holoviews_linked_brushing_app.py",
command="$ pip install panel holoviews hvplot shapely\n" + COMMAND,
accent_base_color="#ff286e"
).servable()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
'''
Transcoder classes to be used in combination with a Coherence MediaServer,
using GStreamer pipelines for the actually work and feeding the output into
a http response.
'''
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
from gi.repository import GObject
Gst.init(None)
import os.path
import urllib.request
import urllib.parse
import urllib.error
from twisted.web import resource, server
from twisted.internet import protocol
from coherence import log
import struct
def get_transcoder_name(transcoder):
return transcoder.name
class InternalTranscoder(object):
'''Just a class to inherit from and which we can look
for upon creating our list of available transcoders.'''
class FakeTransformer(Gst.Element, log.LogAble):
logCategory = 'faker_datasink'
_sinkpadtemplate = Gst.PadTemplate.new(
'sinkpadtemplate',
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any(),
)
_srcpadtemplate = Gst.PadTemplate.new(
'srcpadtemplate',
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any(),
)
def __init__(self, destination=None, request=None):
Gst.Element.__init__(self)
log.LogAble.__init__(self)
self.sinkpad = Gst.Pad.new_from_template(self._sinkpadtemplate, 'sink')
self.srcpad = Gst.Pad.new_from_template(self._srcpadtemplate, 'src')
self.add_pad(self.sinkpad)
self.add_pad(self.srcpad)
self.sinkpad.set_chain_function_full(self.chainfunc)
self.buffer = ''
self.buffer_size = 0
self.proxy = False
self.got_new_segment = False
self.closed = False
@staticmethod
def get_fake_header():
return (
struct.pack('>L4s', 32, 'ftyp')
+ b'mp42\x00\x00\x00\x00mp42mp41isomiso2'
)
def chainfunc(self, pad, buffer):
if self.proxy:
# we are in proxy mode already
self.srcpad.push(buffer)
return Gst.FlowReturn.OK
self.buffer = self.buffer + buffer.data
if not self.buffer_size:
try:
self.buffer_size, a_type = struct.unpack(
'>L4s', self.buffer[:8]
)
except Exception:
return Gst.FlowReturn.OK
if len(self.buffer) < self.buffer_size:
# we need to buffer more
return Gst.FlowReturn.OK
buffer = self.buffer[self.buffer_size :]
fake_header = self.get_fake_header()
n_buf = Gst.Buffer(fake_header + buffer)
self.proxy = True
self.srcpad.push(n_buf)
return Gst.FlowReturn.OK
GObject.type_register(FakeTransformer)
class DataSink(Gst.Element, log.LogAble):
logCategory = 'transcoder_datasink'
_sinkpadtemplate = Gst.PadTemplate.new(
'sinkpadtemplate',
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any(),
)
def __init__(self, destination=None, request=None):
Gst.Element.__init__(self)
log.LogAble.__init__(self)
self.sinkpad = Gst.Pad.new_from_template(self._sinkpadtemplate, 'sink')
self.add_pad(self.sinkpad)
self.sinkpad.set_chain_function_full(self.chainfunc)
self.sinkpad.set_event_function_full(self.eventfunc)
self.destination = destination
self.request = request
if self.destination is not None:
self.destination = open(self.destination, 'wb')
self.buffer = ''
self.data_size = 0
self.got_new_segment = False
self.closed = False
def chainfunc(self, pad, inst, buffer):
size = buffer.get_size()
buf_data = buffer.extract_dup(0, size)
if not isinstance(buf_data, bytes):
buf = buffer.encode('ascii')
if self.closed:
return Gst.FlowReturn.OK
if self.destination is not None:
self.destination.write(buf_data)
elif self.request is not None:
self.buffer += buf_data
if len(self.buffer) > 200000:
self.request.write(self.buffer)
self.buffer = b''
else:
self.buffer += buffer.data
self.data_size += size
return Gst.FlowReturn.OK
def eventfunc(self, pad, inst, event):
if event.type == Gst.Event.new_stream_start('').type:
if not self.got_new_segment:
self.got_new_segment = True
else:
self.closed = True
elif event.type == Gst.Event.new_eos().type:
if self.destination is not None:
self.destination.close()
elif self.request is not None:
if len(self.buffer) > 0:
self.request.write(self.buffer)
self.request.finish()
return True
GObject.type_register(DataSink)
class GStreamerPipeline(resource.Resource, log.LogAble):
logCategory = 'gstreamer'
addSlash = True
def __init__(self, pipeline, content_type):
self.pipeline_description = pipeline
self.contentType = content_type
self.requests = []
# if stream has a streamheader (something that has to be prepended
# before any data), then it will be a tuple of GstBuffers
self.streamheader = None
self.parse_pipeline()
resource.Resource.__init__(self)
log.LogAble.__init__(self)
def parse_pipeline(self):
self.pipeline = Gst.parse_launch(self.pipeline_description)
self.appsink = Gst.ElementFactory.make('appsink', 'sink')
self.appsink.set_property('emit-signals', True)
self.pipeline.add(self.appsink)
enc = self.pipeline.get_by_name('enc')
enc.link(self.appsink)
self.appsink.connect('new-preroll', self.new_preroll)
self.appsink.connect('new-buffer', self.new_buffer)
self.appsink.connect('eos', self.eos)
def start(self, request=None):
self.info(
f'GStreamerPipeline start {request} {self.pipeline_description}'
)
self.requests.append(request)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished, request)
def new_preroll(self, appsink):
self.debug('new preroll')
buffer = appsink.emit('pull-preroll')
if not self.streamheader:
# check caps for streamheader buffer
caps = buffer.get_caps()
s = caps[0]
if 'streamheader' in s:
self.streamheader = s['streamheader']
self.debug('setting streamheader')
for r in self.requests:
self.debug('writing streamheader')
for h in self.streamheader:
r.write(h.data)
for r in self.requests:
self.debug('writing preroll')
r.write(buffer.data)
def new_buffer(self, appsink):
buffer = appsink.emit('pull-buffer')
if not self.streamheader:
# check caps for streamheader buffers
caps = buffer.get_caps()
s = caps[0]
if 'streamheader' in s:
self.streamheader = s['streamheader']
self.debug('setting streamheader')
for r in self.requests:
self.debug('writing streamheader')
for h in self.streamheader:
r.write(h.data)
for r in self.requests:
r.write(buffer.data)
def eos(self, appsink):
self.info('eos')
for r in self.requests:
r.finish()
self.cleanup()
def getChild(self, name, request):
self.info(f'getChild {name}, {request}')
return self
def render_GET(self, request):
self.info(f'render GET {request}')
request.setResponseCode(200)
if hasattr(self, 'contentType'):
request.setHeader(b'Content-Type', self.contentType)
request.write(b'')
headers = request.getAllHeaders()
if 'connection' in headers and headers['connection'] == 'close':
pass
if self.requests:
if self.streamheader:
self.debug('writing streamheader')
for h in self.streamheader:
request.write(h.data)
self.requests.append(request)
else:
self.parse_pipeline()
self.start(request)
return server.NOT_DONE_YET
def render_HEAD(self, request):
self.info(f'render HEAD {request}')
request.setResponseCode(200)
request.setHeader(b'Content-Type', self.contentType)
request.write(b'')
def requestFinished(self, result, request):
self.info(f'requestFinished {result}')
# TODO: we need to find a way to destroy the pipeline here
# from twisted.internet import reactor
# reactor.callLater(0, self.pipeline.set_state, Gst.State.NULL)
self.requests.remove(request)
if not self.requests:
self.cleanup()
def on_message(self, bus, message):
t = message.type
print('on_message', t)
if t == Gst.Message.ERROR:
# err, debug = message.parse_error()
# print(f'Error: {err}', debug)
self.cleanup()
elif t == Gst.Message.EOS:
self.cleanup()
def cleanup(self):
self.info('pipeline cleanup')
self.pipeline.set_state(Gst.State.NULL)
self.requests = []
self.streamheader = None
class BaseTranscoder(resource.Resource, log.LogAble):
logCategory = 'transcoder'
addSlash = True
def __init__(self, uri, destination=None, content_type=None):
if uri[:7] not in ['file://', 'http://']:
uri = 'file://' + urllib.parse.quote(uri) # FIXME
self.uri = uri
self.destination = destination
self.contentType = None
self.pipeline = None
resource.Resource.__init__(self)
log.LogAble.__init__(self)
self.info(f'uri {uri} {type(uri)}')
def getChild(self, name, request):
self.info(f'getChild {name}, {request}')
return self
def render_GET(self, request):
self.info(f'render GET {request}')
request.setResponseCode(200)
if self.contentType is not None:
request.setHeader(b'Content-Type', self.contentType)
request.write(b'')
headers = request.getAllHeaders()
if 'connection' in headers and headers['connection'] == 'close':
pass
self.start(request)
return server.NOT_DONE_YET
def render_HEAD(self, request):
self.info(f'render HEAD {request}')
request.setResponseCode(200)
request.setHeader(b'Content-Type', self.contentType)
request.write(b'')
def requestFinished(self, result):
self.info(f'requestFinished {result}')
''' we need to find a way to destroy the pipeline here
'''
# from twisted.internet import reactor
# reactor.callLater(0, self.pipeline.set_state, Gst.State.NULL)
GObject.idle_add(self.cleanup)
def on_message(self, bus, message):
t = message.type
print('on_message', t)
if t == Gst.Message.ERROR:
# err, debug = message.parse_error()
# print(f'Error: {err}', debug)
self.cleanup()
elif t == Gst.Message.EOS:
self.cleanup()
def cleanup(self):
self.pipeline.set_state(Gst.State.NULL)
def start(self, request=None):
'''This method should be sub classed for each
class which inherits from BaseTranscoder'''
pass
class PCMTranscoder(BaseTranscoder, InternalTranscoder):
contentType = 'audio/L16;rate=44100;channels=2'
name = 'lpcm'
def start(self, request=None):
self.info(f'PCMTranscoder start {request} {self.uri}')
self.pipeline = Gst.parse_launch(
f'{self.uri} ! decodebin ! audioconvert name=conv'
)
conv = self.pipeline.get_by_name('conv')
caps = Gst.Caps.from_string(
'audio/x-raw-int,rate=44100,endianness=4321,'
+ 'channels=2,width=16,depth=16,signed=true'
)
# FIXME: UGLY. 'filter' is a python builtin!
filter = Gst.ElementFactory.make('capsfilter', 'filter')
filter.set_property('caps', caps)
self.pipeline.add(filter)
conv.link(filter)
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
filter.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class WAVTranscoder(BaseTranscoder, InternalTranscoder):
contentType = 'audio/x-wav'
name = 'wav'
def start(self, request=None):
self.info(f'start {request}')
self.pipeline = Gst.parse_launch(
f'{self.uri} ! decodebin ! audioconvert ! wavenc name=enc'
)
enc = self.pipeline.get_by_name('enc')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
# bus = self.pipeline.get_bus()
# bus.connect('message', self.on_message)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class MP3Transcoder(BaseTranscoder, InternalTranscoder):
contentType = 'audio/mpeg'
name = 'mp3'
def start(self, request=None):
self.info(f'start {request}')
self.pipeline = Gst.parse_launch(
f'{self.uri} ! decodebin ! audioconvert ! lame name=enc'
)
enc = self.pipeline.get_by_name('enc')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class MP4Transcoder(BaseTranscoder, InternalTranscoder):
''' Only works if H264 inside Quicktime/MP4 container is input
Source has to be a valid uri
'''
contentType = 'video/mp4'
name = 'mp4'
def start(self, request=None):
self.info(f'start {request}')
self.pipeline = Gst.parse_launch(
f'{self.uri} ! qtdemux name=d ! queue ! h264parse '
+ f'! mp4mux name=mux d. ! queue ! mux.'
)
mux = self.pipeline.get_by_name('mux')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
mux.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class MP2TSTranscoder(BaseTranscoder, InternalTranscoder):
contentType = 'video/mpeg'
name = 'mpegts'
def start(self, request=None):
self.info(f'start {request}')
# FIXME - mpeg2enc
self.pipeline = Gst.parse_launch(
f'mpegtsmux name=mux {self.uri} ! decodebin2 name=d ! queue '
+ f'! ffmpegcolorspace ! mpeg2enc ! queue ! mux. d. '
+ f'! queue ! audioconvert ! twolame ! queue ! mux.'
)
enc = self.pipeline.get_by_name('mux')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class ThumbTranscoder(BaseTranscoder, InternalTranscoder):
'''
Should create a valid thumbnail according to the DLNA spec
.. warning:: Neither width nor height must exceed 160px
'''
contentType = 'image/jpeg'
name = 'thumb'
def start(self, request=None):
self.info(f'start {request}')
'''
# what we actually want here is a pipeline that calls
# us when it knows about the size of the original image,
# and allows us now to adjust the caps-filter with the
# calculated values for width and height
new_width = 160
new_height = 160
if original_width > 160:
new_heigth = \
int(float(original_height) * (160.0/float(original_width)))
if new_height > 160:
new_width = \
int(float(new_width) * (160.0/float(new_height)))
elif original_height > 160:
new_width = \
int(float(original_width) * (160.0/float(original_height)))
'''
try:
type = request.args['type'][0]
except IndexError:
type = 'jpeg'
if type == 'png':
self.pipeline = Gst.parse_launch(
f'{self.uri} ! decodebin2 ! videoscale '
+ f'! video/x-raw-yuv,width=160,height=160 ! pngenc name=enc'
)
self.contentType = 'image/png'
else:
self.pipeline = Gst.parse_launch(
f'{self.uri} ! decodebin2 ! videoscale '
+ f'! video/x-raw-yuv,width=160,height=160 ! jpegenc name=enc'
)
self.contentType = 'image/jpeg'
enc = self.pipeline.get_by_name('enc')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class GStreamerTranscoder(BaseTranscoder):
'''
A generic Transcoder based on GStreamer.
'''
pipeline_description = None
'''
The pipeline which will be parsed upon calling the start method,
has to be set as the attribute :attr:`pipeline_description` to
the instantiated class.
'''
def start(self, request=None):
if self.pipeline_description is None:
raise NotImplementedError(
'Warning: operation cancelled. You must set a value for '
+ 'GStreamerTranscoder.pipeline_description'
)
self.info(f'start {request}')
self.pipeline = Gst.parse_launch(self.pipeline_description % self.uri)
enc = self.pipeline.get_by_name('mux')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(Gst.State.PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class ExternalProcessProtocol(protocol.ProcessProtocol):
def __init__(self, caller):
self.caller = caller
def connectionMade(self):
print('pp connection made')
def outReceived(self, data):
# print(f'outReceived with {len(data):d} bytes!')
self.caller.write_data(data)
def errReceived(self, data):
# print(f'errReceived! with {len(data):d} bytes!')
print('pp (err):', data.strip())
def inConnectionLost(self):
# print('inConnectionLost! stdin is closed! (we probably did it)')
pass
def outConnectionLost(self):
# print('outConnectionLost! The child closed their stdout!')
pass
def errConnectionLost(self):
# print('errConnectionLost! The child closed their stderr.')
pass
def processEnded(self, status_object):
print(f'processEnded, status {status_object.value.exitCode:d}')
print('processEnded quitting')
self.caller.ended = True
self.caller.write_data('')
class ExternalProcessProducer(object):
logCategory = 'externalprocess'
def __init__(self, pipeline, request):
self.pipeline = pipeline
self.request = request
self.process = None
self.written = 0
self.data = ''
self.ended = False
request.registerProducer(self, 0)
def write_data(self, data):
if data:
# print(f'write {len(data):d} bytes of data')
self.written += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.ended:
print('closing')
self.request.unregisterProducer()
self.request.finish()
self.request = None
def resumeProducing(self):
# print('resumeProducing', self.request)
if not self.request:
return
if self.process is None:
argv = self.pipeline.split()
executable = argv[0]
argv[0] = os.path.basename(argv[0])
from twisted.internet import reactor
self.process = reactor.spawnProcess(
ExternalProcessProtocol(self), executable, argv, {}
)
def pauseProducing(self):
pass
def stopProducing(self):
print('stopProducing', self.request)
self.request.unregisterProducer()
self.process.loseConnection()
self.request.finish()
self.request = None
class ExternalProcessPipeline(resource.Resource, log.LogAble):
logCategory = 'externalprocess'
addSlash = False
pipeline_description = None
contentType = None
def __init__(self, uri):
self.uri = uri
resource.Resource.__init__(self)
log.LogAble.__init__(self)
def getChildWithDefault(self, path, request):
return self
def render(self, request):
print('ExternalProcessPipeline render')
if self.pipeline_description is None:
raise NotImplementedError(
'Warning: operation cancelled. You must set a value for '
+ 'ExternalProcessPipeline.pipeline_description'
)
if self.contentType is not None:
request.setHeader(b'Content-Type', self.contentType)
ExternalProcessProducer(self.pipeline_description % self.uri, request)
return server.NOT_DONE_YET
def transcoder_class_wrapper(klass, content_type, pipeline):
def create_object(uri):
transcoder = klass(uri)
transcoder.contentType = content_type
transcoder.pipeline_description = pipeline
return transcoder
return create_object
class TranscoderManager(log.LogAble):
'''
Singleton class which holds information about all available transcoders.
They are put into a transcoders dict with their id as the key.
We collect all internal transcoders by searching for all subclasses of
InternalTranscoder, the class will be the value.
Transcoders defined in the config are parsed and stored as a dict in the
transcoders dict.
In the config, a transcoder description has to look like this:
*** preliminary, will be extended and
might even change without further notice ***
.. code-block:: xml
<transcoder>
<pipeline>%s ...</pipeline> <!-- we need a %s here to insert the
source uri (or can we have all the
times pipelines we can prepend with
a '%s !') and an element named mux
where we can attach our sink -->
<type>gstreamer</type> <!-- could be gstreamer or process -->
<name>mpegts</name>
<target>video/mpeg</target>
<fourth_field> <!-- value for the 4th field of the
protocolInfo phalanx, default is
'*' -->
</transcoder>
'''
logCategory = 'transcoder_manager'
_instance_ = None # Singleton
def __new__(cls, *args, **kwargs):
'''Creates the singleton.'''
if cls._instance_ is None:
obj = super(TranscoderManager, cls).__new__(cls)
if 'coherence' in kwargs:
obj.coherence = kwargs['coherence']
cls._instance_ = obj
return cls._instance_
def __init__(self, coherence=None):
'''
Initializes the class :class:`TranscoderManager`.
It should be called at least once with the main
:class:`~coherence.base.Coherence` class passed as an argument,
so we have access to the config.
'''
log.LogAble.__init__(self)
self.transcoders = {}
for transcoder in InternalTranscoder.__subclasses__():
self.transcoders[get_transcoder_name(transcoder)] = transcoder
if coherence is not None:
self.coherence = coherence
try:
transcoders_from_config = self.coherence.config['transcoder']
if isinstance(transcoders_from_config, dict):
transcoders_from_config = [transcoders_from_config]
except KeyError:
transcoders_from_config = []
for transcoder in transcoders_from_config:
# FIXME: is anyone checking if all keys are given ?
pipeline = transcoder['pipeline']
if '%s' not in pipeline:
self.warning(
"Can't create transcoder %r:"
+ " missing placehoder '%%s' in 'pipeline'",
transcoder,
)
continue
try:
transcoder_name = transcoder['name'] # .decode('ascii')
except UnicodeEncodeError:
self.warning(
"Can't create transcoder %r:"
+ " the 'name' contains non-ascii letters",
transcoder,
)
continue
transcoder_type = transcoder['type'].lower()
if transcoder_type == 'gstreamer':
wrapped = transcoder_class_wrapper(
GStreamerTranscoder,
transcoder['target'],
transcoder['pipeline'],
)
elif transcoder_type == 'process':
wrapped = transcoder_class_wrapper(
ExternalProcessPipeline,
transcoder['target'],
transcoder['pipeline'],
)
else:
self.warning(f'unknown transcoder type {transcoder_type}')
continue
self.transcoders[transcoder_name] = wrapped
# FIXME reduce that to info later
self.warning(f'available transcoders {self.transcoders}')
def select(self, name, uri, backend=None):
# FIXME:why do we specify the name when trying to get it?
if backend is not None:
''' try to find a transcoder provided by the backend
and return that here,
if there isn't one continue with the ones
provided by the config or the internal ones
'''
pass
transcoder = self.transcoders[name](uri)
return transcoder
|
nilq/baby-python
|
python
|
import posix_ipc
import utils
params = utils.read_params()
try:
posix_ipc.unlink_message_queue(params["MESSAGE_QUEUE_NAME"])
s = "message queue %s removed" % params["MESSAGE_QUEUE_NAME"]
print (s)
except:
print ("queue doesn't need cleanup")
print ("\nAll clean!")
|
nilq/baby-python
|
python
|
import keywords as kw
import pca_tsne as pt
import math
import re
import numpy as np
from collections import Counter
from sklearn.cluster import KMeans
def conventional_kmeans(data, tfidf, kmeans_size_keywords, k):
matrix = tfidf.fit_transform(data.setting_value)
fit = KMeans(n_clusters=k, random_state=20).fit(matrix)
means_clusters = fit.predict(matrix)
distances = fit.transform(matrix)
sse = 0
i = 0
for cluster in means_clusters:
sse = sse + distances[i][cluster]
i = i + 1
print("\nSSE = {}".format(sse))
ssd = 0
i = 0
for cluster in means_clusters:
ssd = ssd + math.pow(distances[i][cluster] - (sse/2702), 2)
i = i + 1
ssd = math.sqrt(ssd/2702)
print("\nSSD = {}".format(ssd))
sizes = np.bincount(means_clusters)
top_keywords = kw.get_top_keywords(matrix, means_clusters, tfidf.get_feature_names(), 10)
index = 1
for size in sizes:
regex = "{}(.*)".format(index)
cluster_keywords = re.search(regex, top_keywords).group(1)
print("Cluster {}".format(index))
print(cluster_keywords)
kmeans_size_keywords.append([size, cluster_keywords])
index += 1
print("\nClusters Size")
print(sizes)
pt.plot_tsne_pca(matrix, means_clusters)
return means_clusters
def iteractive_kmeans(data, tfidf, clusters_size_keywords, t):
sse = 0
dists_array = []
while (data.size > 0):
print("Applying TFIDF...\n")
matrix = tfidf.fit_transform(data.setting_value)
k = 1
found = False
while (found == False):
print("Clustering with k = {}...".format(k))
fit = KMeans(n_clusters=k, random_state=20).fit(matrix)
means_clusters = fit.predict(matrix)
distances = fit.transform(matrix)
cluster_size = np.bincount(means_clusters)
print("Clusters sizes = {}".format(cluster_size))
min_sizes = sorted(i for i in cluster_size if i <= 50)
print("Min cluster sizes = {}\n".format(min_sizes))
if min_sizes:
rows_removal = []
counts = Counter(means_clusters)
print("Counter occurrences = {}\n".format(counts))
for min_size in min_sizes:
min_element = list(counts.keys())[list(counts.values()).index(min_size)]
print("Current min_element = {}".format(min_element))
del counts[min_element]
print("Removed min_element {} from counter {}\n".format(min_element, counts))
print("Removing smallest cluster elements = {} with occurrences = {}".format(min_element, min_size))
print("Getting element indexes...")
min_element_positions = [index for index, value in enumerate(means_clusters) if value == min_element]
rows_removal.extend(min_element_positions)
min_size_position = list(cluster_size).index(min_size) + 1
print("Getting cluster {} size and top keywords...".format(min_size_position))
top_keywords = kw.get_top_keywords(matrix, means_clusters, tfidf.get_feature_names(), 10)
regex = "{}(.*)".format(min_size_position)
min_cluster_keywords = re.search(regex, top_keywords).group(1)
print("Top keywords are {}\n".format(min_cluster_keywords))
clusters_size_keywords.append([min_size, min_cluster_keywords, data.iloc[min_element_positions]])
print("Being removed {} elements...".format(len(rows_removal)))
print("Old data size = {}".format(data.index))
data = data.drop(data.index[rows_removal]).reset_index(drop=True)
print("New data size = {}\n".format(data.index))
for position in rows_removal:
print("Adding sse of element {} of cluster {}".format(position, means_clusters[position]))
sse = sse + distances[position][means_clusters[position]]
dists_array.append(distances[position][means_clusters[position]])
print("Current sse = {}".format(sse))
found = True
else:
print("k = {} failed\n".format(k))
k = k + 2
ssd = 0
for dist in dists_array:
ssd = ssd + math.pow(dist - (sse/2702),2)
ssd = math.sqrt(ssd/2702)
print("Total ssd = {}".format(ssd))
|
nilq/baby-python
|
python
|
from yunionclient.common import base
class CdnDomain(base.ResourceBase):
pass
class CdnDomainManager(base.StandaloneManager):
resource_class = CdnDomain
keyword = 'cdn_domain'
keyword_plural = 'cdn_domains'
_columns = ["ID", "Name", "Status", "Cloudaccount_id", "External_id", "Cname", "Origins", "ServiceType", "Area"]
_admin_columns = []
|
nilq/baby-python
|
python
|
import simplejson
import urllib2
import feedparser
import logging
from datetime import timedelta
from django.http import Http404, HttpResponse
from django.template import loader, TemplateDoesNotExist, RequestContext
from django.shortcuts import render_to_response
from django.core.cache import cache
from molly.utils.views import BaseView
from molly.utils.breadcrumbs import NullBreadcrumb
logger = logging.getLogger(__name__)
class IndexView(BaseView):
def get_metadata(self, request):
return {
'exclude_from_search': True}
breadcrumb = NullBreadcrumb
def initial_context(self, request):
return {
'blog_feed': self._cache(self._get_blog_feed, 'blog',
args=[getattr(self.conf,
'blog_rss_url')], timeout=300),
'blog_url': getattr(self.conf, 'blog_url', None),
'facebook_url': getattr(self.conf, 'facebook_url', None),
'twitter_username': getattr(self.conf, 'twitter_username', None),
'twitter_widget_id': getattr(self.conf, 'twitter_widget_id', None),
}
def handle_GET(self, request, context):
# Can't render fragment
if 'fragment' in self.FORMATS: del self.FORMATS['fragment']
return self.render(request, context, 'desktop/index',
expires=timedelta(days=1))
def _cache(self, f, key, args=None, kwargs=None, timeout=None):
key = '.'.join(['molly', self.conf.local_name, key])
value = cache.get(key)
if value is None:
value = f(*(args or ()), **(kwargs or {}))
cache.set(key, value, timeout)
return value
def _get_blog_feed(self, url):
if not url:
return None
try:
return feedparser.parse(url)
except Exception, e:
logger.warn("Failed to fetch blog feed.", exc_info=True)
return None
|
nilq/baby-python
|
python
|
from rpy import r
import os.path
# find out where the temp directory is
tempdir = r.tempdir()
# write its name into a file
f = open('tempdir','w')
f.write(tempdir)
f.close()
# put something there..
r.postscript(os.path.join(tempdir,"foo.ps"))
r.plot(1,1)
r.dev_off()
|
nilq/baby-python
|
python
|
import os
from math import ceil
from fastapi import FastAPI, Form
from fastapi.responses import FileResponse
from fastapi.middleware.cors import CORSMiddleware
from app.aws_s3 import S3
from app.mongo import MongoDB
API = FastAPI(
title='DocDB DS API',
version="1.0.0",
docs_url='/',
)
API.db = MongoDB()
API.s3 = S3()
API.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
@API.get("/version")
async def version():
return API.version
@API.post("/search")
async def search(query: str, page_number: int = 0, results_per_page: int = 100):
start = page_number * results_per_page
stop = start + results_per_page
search_results = API.db.search(query)[start:stop]
count = API.db.count({"$text": {"$search": query}})
n_pages = ceil(count / results_per_page)
return {"Pages": n_pages, "Count": count, "Response": list(search_results)}
@API.get("/lookup/{file_id}")
async def lookup(file_id: str):
""" Returns everything for a single match
Example: https://ds.humanrightsfirstdocdb.dev/lookup/76737668329
{'Response': {'box_id': String,
'name': String,
'summary': String,
'path': String,
'url': String,
'tags': Array of Strings,
'text': String}}
"""
return {"Response": API.db.find_one({"box_id": file_id})}
@API.get("/thumbnail/{file_id}")
async def thumbnail(file_id: str):
""" Returns the jpg thumbnail for a single document.
Returns default image on error.
"""
file_name = f"{file_id}.jpg"
file_path = f"app/thumbnails/{file_name}"
if not os.path.exists(file_path):
API.s3.download("docdb-thumbnails", file_name, file_path)
if os.path.exists(file_path):
return FileResponse(file_path, media_type="image/jpg")
else:
return FileResponse("app/thumbnails/default.jpg", media_type="image/jpg")
@API.get("/raw_text/{file_id}")
async def raw_text(file_id: str):
file = API.db.find_one({"box_id": file_id})
file_name = file["name"].replace(".pdf", ".txt")
file_path = f"app/text-files/{file_name}"
with open(file_path, "w") as f:
f.write(file["text"])
return FileResponse(file_path, media_type="text/plain")
@API.post("/add_tag")
async def add_tag(file_id: str = Form(...), tag: str = Form(...)):
""" Adds a custom tag to a document """
API.db.push_list({"box_id": file_id}, "tags", tag)
return {'Result': 'Success', "file_id": file_id, "tag": tag}
@API.delete("/remove_tag")
async def remove_tag(file_id: str, tag: str):
""" Removes a tag from a document """
API.db.pull_list({"box_id": file_id}, "tags", tag)
return {'Result': 'Success', "file_id": file_id, "tag": tag}
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
""" Analyse des efforts statiques dans un treillis
(assemblage de barres et de pivots)
Le problème est traité en 2D (dans un plan)
Pierre Haessig — Mars 2013
"""
from __future__ import division, print_function, unicode_literals
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
### 1) Construction du treillis ###
# Points du treillis
N_piv = 5
print('treillis à {} pivots (dont 2 pivots de fixation)'.format(N_piv))
# Choix des points d'accroche:
iP_A = 0 # 1er point du treillis
iP_B = 1 # 2ème point
#iP_B = N_piv-1 # dernier point
# Dimensions d'une cellule élémentaire du treillis
dx = 1.
dy = 2.
h = 0. # amplitude des variations de hauteur
def courbe_y(x, haut):
'''courbe de "mise en forme" du treillis'''
L = (N_piv-1)*dx # Longueur totale du treillis
y = h*x*(L-x)/(L/2)**2 # parabole
#y = h*np.sin(2*np.pi/L*x) # sinus (1 période)
#y = h*np.sqrt((L/2)**2 - (x-L/2)**2) # demi-cercle
if haut: # Partie haute du treillis
return y + dy
else: # Partie basse
return y
# end courbe_y()
# Calcul des points:
iy = 0
ix = 0
# Liste des points du treillis:
pivots = []
for i in range(N_piv):
# Calcul des coordonnées:
x = ix*dx
y = courbe_y(x, iy==1)
pivots.append((x,y))
# Incrémentation des compteurs:
iy = (iy+1) % 2
ix = ix + 1
# Conversion en positions entières
#pivots = [(int(x),int(y)) for x,y in pivots]
# Coordonnées des points d'accroche:
P_A = pivots[iP_A]
P_B = pivots[iP_B]
print('points d\'accroche: {!s} et {!s}'.format(P_A, P_B))
# Construction des barres:
barres = []
# Accroches à l'intérieur du treillis
for i, Pi in enumerate(pivots[:-2]):
# Chaque point s'accroche aux 2 suivants:
barres.append((Pi,pivots[i+1]))
barres.append((Pi,pivots[i+2]))
# Barre entre les deux derniers points:
barres.append((pivots[-2], pivots[-1]))
## Problèmes d'hyperstaticité:
# Si on détecte une barre entre P_A et P_B on l'enlève (car elle est en hyperstaticité)
barres = [(P1,P2) for (P1,P2) in barres
if (P1,P2) != (P_A,P_B) and (P1,P2) != (P_B,P_A)]
if len(barres) > (N_piv-2)*2:
print('Il y a une barre en trop:')
guess = 1
ind = raw_input('barre à enlever ({} par défaut) > '.format(guess))
if ind.strip() == '':
ind = int(guess)
else:
ind = int(ind)
print('barre enlevée : {!s}'.format(barres.pop(ind)))
N_bar = len(barres)
print('treillis à {} barres'.format(N_bar))
### Calcul du vecteurs directeur des barres:
barres_arr = np.array(barres) # shape is (2N, 2, 2)
# "AB = OB - OA":
barre_dir = barres_arr[:,1,:] - barres_arr[:,0,:]
# Longueur des barres:
barre_l = np.sqrt((barre_dir**2).sum(axis = 1))
# normalisation:
barre_dir = barre_dir/barre_l.reshape(-1,1)
### Matrice d'incidence:
Inc_mat = np.zeros((N_piv, len(barres)), dtype=int)
for j, bj in enumerate(barres):
P1, P2 = bj
# Remarquons la convention de signe:
i1 = pivots.index(P1)
Inc_mat[i1,j] = -1 # la barre bj "quitte" P1
i2 = pivots.index(P2)
Inc_mat[i2,j] = +1 # la barre bj "arrive à" P2
print('Matrice d\'incidence:')
print(str(Inc_mat).replace('0','.'))
# Enlever les lignes correspondant au pivot d'accroche
piv_ind = range(N_piv)
piv_ind.remove(iP_A)
piv_ind.remove(iP_B)
# Matrice d'incidence réduite:
Inc_mat_red = Inc_mat[piv_ind, :]
### Construction du système d'équation à inverser:
# 1) Matrice A
Ax = Inc_mat_red*barre_dir[:,0]
Ay = Inc_mat_red*barre_dir[:,1]
# ou bien: Ax = np.dot(Inc_mat_red,np.diag(barre_dir[:,0]))
A = np.vstack((Ax, Ay))
# Image de la matrice:
# plt.imshow(A, interpolation='nearest')
# 2) Vecteur b : force extérieure appliquée à chaque pivot:
F_ext = np.zeros((N_piv, 2))
# Appui sur le dernier pivot:
F_ext[-1] = (0., -1) # effort vers le bas
## Pesanteur sur tous les pivots:
#F_ext[:,1] = -1/N_piv
## Appui sur la clé de voute:
#F_ext[N_piv//2] = (0,-1)
print('Force extérieure (en cartésien) appliquée à chaque pivot:')
print(F_ext.T)
# Empilement des composantes selon x et y:
b_ext = np.hstack((F_ext[piv_ind,0], F_ext[piv_ind,1]))
# 3) Résolution: Force de traction sur les barres
trac_barres = np.linalg.solve(A,b_ext)
print('Effort de traction sur chaque barre:')
print(trac_barres.round(2))
trac_max = np.max(np.abs(trac_barres))
print(' -> effort max (en val. absolue) : {:.1f}'.format(trac_max))
# Efforts sur les points d'accroche (de la part du treillis et de l'extérieur):
resul_A = -np.inner(Inc_mat[iP_A,:]*trac_barres, barre_dir.T) + F_ext[iP_A]
resul_B = -np.inner(Inc_mat[iP_B,:]*trac_barres, barre_dir.T) + F_ext[iP_B]
print('action du treillis sur pt A: {!s} ({:.2f})'.format(resul_A, np.linalg.norm(resul_A,2)))
print('action du treillis sur pt B: {!s} ({:.2f})'.format(resul_B, np.linalg.norm(resul_B,2)))
### Tracé du treillis #########################################################
fig = plt.figure('efforts treillis', figsize=(12,5))
ax = fig.add_subplot(111, title='efforts sur le treillis '
'({:d} pivots, {:d} barres)'.format(N_piv, N_bar))
# Échelle pour le tracé des forces
F_scale = 0.4*barre_l.mean()/trac_max
# Couleur des efforts:
F_color = (0,0.8,0) # vert
# Colormap pour colorer les barres selon l'effort subit: rouge-bleu
col_list = [(0.9,0,0.0), (0.7,0.7,0.7), (0,0,0.9)]
rb = mpl.colors.LinearSegmentedColormap.from_list('red-blue', col_list)
cm = cm_rb
#cm = plt.cm.coolwarm_r
# TODO: essayer de tracer les forces avec des FancyArrowPatch:
# a = mpl.patches.FancyArrowPatch((0,0), (1,1), arrowstyle='->, head_width=5,head_length=10')
# ax.add_patch(a)
# Tracé des barres et des efforts
for j, bj in enumerate(barres):
# Coordonnées des 2 pivots d'accroche:
(x1,y1), (x2, y2) = bj
# direction :
uj = barre_dir[j]
# effort de traction sur la barre bj:
trac = trac_barres[j]
color = cm(trac/(2*trac_max)+0.5)
plt.plot((x1, x2), (y1, y2), '-', color = color, lw=4, zorder=1)
# Tracé des efforts barre -> pivot1 et pivot 2
plt.arrow(x1, y1, +trac*uj[0]*F_scale, +trac*uj[1]*F_scale,
zorder=2, head_width=0.05*dx, lw=0, width=0.02*dx, color=F_color)
plt.arrow(x2, y2, -trac*uj[0]*F_scale, -trac*uj[1]*F_scale,
zorder=2, head_width=0.05*dx, lw=0, width=0.02*dx, color=F_color)
# end for each barre
# Couleur des pivots
piv_color = (1.,1.,1.) # blanc
piv_color_AB = (1.,1.,0.5) # jaune clair
piv_alpha = 1 # opaque
# Tracé des pivots
for i, Pi in enumerate(pivots):
# Tracé du pivot:
marker = 'D' if Pi in (P_A,P_B) else 'o' # marqueur Diamond 'D' ou disque 'o'
color = piv_color_AB if Pi in (P_A,P_B) else piv_color
plt.plot(Pi[0], Pi[1], marker, ms=8, c=color, alpha = piv_alpha, zorder=3)
# Force extérieur s'appliquant sur le pivot
Fi = F_ext[i]
if Fi.any():
plt.arrow(Pi[0], Pi[1], Fi[0]*F_scale, Fi[1]*F_scale,
zorder=2, head_width=0.05*dx, lw=0, width=0.02*dx, color=(1,0,0))
if Pi in (P_A,P_B):
F_soutien = -resul_A if Pi==P_A else -resul_B
plt.arrow(Pi[0], Pi[1], F_soutien[0]*F_scale, F_soutien[1]*F_scale,
zorder=2, head_width=0.05*dx, lw=0, width=0.02*dx, color=(1,1,0))
# end for each pivot
# Limites du tracé
plt.xlim(min([x for (x,y) in pivots]) - dx*1,
max([x for (x,y) in pivots]) + dx*1)
plt.ylim(min([y for (x,y) in pivots]) - dy*.3,
max([y for (x,y) in pivots]) + dy*.3)
# Couleur de fond:
ax.patch.set_fc((0.9,)*3)
ax.set_aspect('equal')
plt.grid(False)
fig.tight_layout()
plt.show()
|
nilq/baby-python
|
python
|
import logging
from datetime import datetime
from enum import Enum
import ntplib
from scapy.layers.ntp import NTPHeader
from ntp_raw import RawNTP
_PCK_1_YEAR = 1995
_PCK_2_YEAR = 2000
class CP3Mode(Enum):
"""
In CP3 an NTP package can be marked as a type 1, or 2 package or as nothing. This class represents the different
types.
"""
NONE = 0,
PCK_1 = 1,
PCK_2 = 2,
@staticmethod
def from_year(year: int):
if year == _PCK_1_YEAR:
return CP3Mode.PCK_1
elif year == _PCK_2_YEAR:
return CP3Mode.PCK_2
else:
return CP3Mode.NONE
class CP3Package(RawNTP):
def __init__(self, ntp_pck: NTPHeader = NTPHeader(), log: logging.Logger = logging.getLogger('CP3Package-Logger')):
"""
A child of RawNTP which adds functionality in order to extract and insert CP3 specific data into
and from a NTPRaw package.
"""
super().__init__(ntp_pck)
self.log: logging.Logger = log
def _extract_transmit_year(self) -> int:
year = datetime.fromtimestamp(ntplib.ntp_to_system_time(self.ntp().sent)).year
return year
def get_cp3_mode(self) -> CP3Mode:
transmit_year = self._extract_transmit_year()
if transmit_year is None or transmit_year == 0:
return CP3Mode.NONE
return CP3Mode.from_year(transmit_year)
def extract_payload(self) -> str:
return self.origin_timestamp()[0:32] + self.receive_timestamp()[0:32]
def add_payload(self,payload_bits):
self.set_origin_timestamp(payload_bits[0:32]+self.origin_timestamp()[32:64])
self.set_receive_timestamp(payload_bits[32:64]+self.receive_timestamp()[32:64])
def set_cp3_mode_1(self):
self._set_cp3_mode(_PCK_1_YEAR)
def set_cp3_mode_2(self):
self._set_cp3_mode(_PCK_2_YEAR)
def _set_cp3_mode(self,year:int):
ntp = self.ntp()
time = ntplib.system_to_ntp_time(datetime.fromtimestamp(ntplib.ntp_to_system_time(ntp.sent))
.replace(year=year).timestamp())
ntp.sent = time
raw = RawNTP(ntp)
self.set_transmit_timestamp(raw.transmit_timestamp())
|
nilq/baby-python
|
python
|
from helusers.oidc import ApiTokenAuthentication as HelApiTokenAuth
from django.conf import settings
class ApiTokenAuthentication(HelApiTokenAuth):
def __init__(self, *args, **kwargs):
super(ApiTokenAuthentication, self).__init__(*args, **kwargs)
def authenticate(self, request):
jwt_value = self.get_jwt_value(request)
if jwt_value is None:
return None
payload = self.decode_jwt(jwt_value)
user, auth = super(ApiTokenAuthentication, self).authenticate(request)
# amr (Authentication Methods References) should contain the used auth
# provider name e.g. suomifi
if payload.get('amr') in settings.STRONG_AUTH_PROVIDERS:
user.has_strong_auth = True
else:
user.has_strong_auth = False
user.save()
return (user, auth)
|
nilq/baby-python
|
python
|
from inquire_sql_backend.semantics.embeddings.vector_models import VECTOR_EMBEDDERS
def vector_embed_sentence(sent, tokenized=False, model="default"):
embed_func = VECTOR_EMBEDDERS[model]
return embed_func(sent, batch=False, tokenized=tokenized)
def vector_embed_sentence_batch(sent, tokenized=False, model="default"):
embed_func = VECTOR_EMBEDDERS[model]
return embed_func(sent, batch=True, tokenized=tokenized)
|
nilq/baby-python
|
python
|
import torch.nn as nn
import torch
from .conv2d_repeat import Conv2dRepeat
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.activ = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.activ(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.activ(out)
return out
class ResNetCifar(nn.Module):
def __init__(self, block, layers, width=1, num_classes=1000, args=None):
super(ResNetCifar, self).__init__()
self.inplanes = 16
self.args = args
self.width = width
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.activ = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16 * width, layers[0], stride=1)
self.layer2 = self._make_layer(block, 32 * width, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64 * width, layers[2], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc = nn.Linear(64 * width, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0.01)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
conv_module = nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False)
bn_module = nn.BatchNorm2d(planes * block.expansion)
downsample = nn.Sequential(conv_module, bn_module)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.activ(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNet(nn.Module):
def __init__(self, block, layers, width=1, num_classes=1000, args=None):
super(ResNet, self).__init__()
self.inplanes = 64
self.args = args
self.width = width
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.activ = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64*self.width, layers[0])
self.layer2 = self._make_layer(block, 128*self.width, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256*self.width, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512*self.width, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc = nn.Linear(512 * block.expansion * self.width, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0.01)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
conv_module = nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False)
bn_module = nn.BatchNorm2d(planes * block.expansion)
downsample = nn.Sequential(conv_module, bn_module)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.activ(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet(model_name, num_classes=1000, args=None):
k = int(model_name.split('_')[-1])
model = ResNet(BasicBlock, [2, 2, 2, 2], width=k, num_classes=num_classes, args=args)
return model
def resnetcifar(model_name, num_classes=10, args=None):
k = int(model_name.split('_')[-1])
model = ResNetCifar(BasicBlock, [2, 2, 2], width=k, num_classes=num_classes, args=args)
return model
|
nilq/baby-python
|
python
|
from flask import Blueprint
socket_client = Blueprint('socket_client', __name__)
from . import events
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from puzzle.models import Offer
class OfferSerializer(serializers.ModelSerializer):
author_name = serializers.CharField(
source="author.user.username", read_only=True)
class Meta:
model = Offer
fields = ["id", "author_name", "created",
"updated", "trade", "note"]
def create(self, validated_data):
trade_instance = Offer.objects.create(**validated_data)
return trade_instance
|
nilq/baby-python
|
python
|
"""Collection of PBM-based click simulators."""
from typing import Optional
from typing import Tuple
import torch as _torch
from pytorchltr.utils import mask_padded_values as _mask_padded_values
_SIM_RETURN_TYPE = Tuple[_torch.LongTensor, _torch.FloatTensor]
def simulate_pbm(rankings: _torch.LongTensor, ys: _torch.LongTensor,
n: _torch.LongTensor, relevance_probs: _torch.FloatTensor,
cutoff: Optional[int] = None,
eta: float = 1.0) -> _SIM_RETURN_TYPE:
"""Simulates clicks according to a position-biased user model.
Args:
rankings: A tensor of size (batch_size, list_size) of rankings.
ys: A tensor of size (batch_size, list_size) of relevance labels.
n: A tensor of size (batch_size) indicating the nr docs per query.
relevance_prob: A tensor of size (max_relevance) where the entry at
index "i" indicates the probability of clicking a document with
relevance label "i" (given that it is observed).
cutoff: The maximum list size to simulate.
eta: The severity of position bias (0.0 = no bias)
Returns:
A tuple of two tensors of size (batch_size, list_size), where the first
indicates the clicks with 0.0 and 1.0 and the second indicates the
propensity of observing each document.
"""
# Cutoff at n for observation probabilities.
if cutoff is not None:
n = _torch.min(_torch.ones_like(n) * cutoff, n)
# Compute position-biased observation probabilities.
ranks = 1.0 + _torch.arange(
rankings.shape[1], device=rankings.device, dtype=_torch.float)
obs_probs = 1.0 / (1.0 + ranks) ** eta
obs_probs = _torch.repeat_interleave(
obs_probs[None, :], rankings.shape[0], dim=0)
obs_probs = _mask_padded_values(obs_probs, n, mask_value=0.0, mutate=True)
# Compute relevance labels at every rank.
ranked_ys = _torch.gather(ys, 1, rankings)
# Compute click probabilities (given observed).
relevance_probs = _torch.repeat_interleave(
relevance_probs[None, :], rankings.shape[0], dim=0)
click_probs = _torch.gather(relevance_probs, 1, ranked_ys)
# Sample clicks from bernoulli distribution with probabilities.
clicks = _torch.bernoulli(click_probs * obs_probs)
# Invert back to regular ranking.
invert_ranking = _torch.argsort(rankings, dim=1)
# Return click realization and propensities.
return (
_torch.gather(clicks, 1, invert_ranking).to(dtype=_torch.long),
_torch.gather(obs_probs, 1, invert_ranking)
)
def simulate_perfect(rankings: _torch.LongTensor, ys: _torch.LongTensor,
n: _torch.LongTensor, cutoff: Optional[int] = None):
"""Simulates clicks according to a perfect user model.
Args:
rankings: A tensor of size (batch_size, list_size) of rankings.
ys: A tensor of size (batch_size, list_size) of relevance labels.
n: A tensor of size (batch_size) indicating the nr docs per query.
cutoff: The maximum list size to simulate.
Returns:
A tuple of two tensors of size (batch_size, list_size), where the first
indicates the clicks with 0.0 and 1.0 and the second indicates the
propensity of observing each document.
"""
rel_probs = _torch.FloatTensor(
[0.0, 0.2, 0.4, 0.8, 1.0], device=rankings.device)
return simulate_pbm(rankings, ys, n, rel_probs, cutoff, 0.0)
def simulate_position(rankings: _torch.LongTensor, ys: _torch.LongTensor,
n: _torch.LongTensor, cutoff: Optional[int] = None,
eta: float = 1.0) -> _SIM_RETURN_TYPE:
"""Simulates clicks according to a binary position-biased user model.
Args:
rankings: A tensor of size (batch_size, list_size) of rankings.
ys: A tensor of size (batch_size, list_size) of relevance labels.
n: A tensor of size (batch_size) indicating the nr docs per query.
cutoff: The maximum list size to simulate.
eta: The severity of position bias (0.0 = no bias)
Returns:
A tuple of two tensors of size (batch_size, list_size), where the first
indicates the clicks with 0.0 and 1.0 and the second indicates the
propensity of observing each document.
"""
rel_probs = _torch.FloatTensor(
[0.1, 0.1, 0.1, 1.0, 1.0], device=rankings.device)
return simulate_pbm(rankings, ys, n, rel_probs, cutoff, eta)
def simulate_nearrandom(rankings: _torch.LongTensor, ys: _torch.LongTensor,
n: _torch.LongTensor, cutoff: Optional[int] = None,
eta: float = 1.0) -> _SIM_RETURN_TYPE:
"""Simulates clicks according to a near-random user model.
Args:
rankings: A tensor of size (batch_size, list_size) of rankings.
ys: A tensor of size (batch_size, list_size) of relevance labels.
n: A tensor of size (batch_size) indicating the nr docs per query.
cutoff: The maximum list size to simulate.
eta: The severity of position bias (0.0 = no bias)
Returns:
A tuple of two tensors of size (batch_size, list_size), where the first
indicates the clicks with 0.0 and 1.0 and the second indicates the
propensity of observing each document.
"""
rel_probs = _torch.FloatTensor(
[0.4, 0.45, 0.5, 0.55, 0.6], device=rankings.device)
return simulate_pbm(rankings, ys, n, rel_probs, cutoff, eta)
|
nilq/baby-python
|
python
|
import ila
import riscv_um
def genVlg():
rm = riscv_um.riscvModel()
rm.loadUnprivNxtFromDir('unpriv_asts')
rm.model.generateVerilog('RISC-V-VLG.v')
if __name__ == '__main__':
genVlg()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.db import db
from nailgun.db.sqlalchemy.models import NovaNetworkConfig
from nailgun.network.manager import AllocateVIPs70Mixin
from nailgun.network.manager import AssignIPs61Mixin
from nailgun.network.manager import AssignIPs70Mixin
from nailgun.network.manager import AssignIPsLegacyMixin
from nailgun.network.manager import NetworkManager
class NovaNetworkManager(NetworkManager):
@classmethod
def create_nova_network_config(cls, cluster):
nova_net_config = NovaNetworkConfig(cluster_id=cluster.id)
meta = cluster.release.networks_metadata["nova_network"]["config"]
for key, value in meta.iteritems():
if hasattr(nova_net_config, key):
setattr(nova_net_config, key, value)
db().add(nova_net_config)
db().flush()
return nova_net_config
@classmethod
def generate_vlan_ids_list(cls, data, cluster, ng):
if ng["name"] == "fixed":
netw_params = data.get("networking_parameters", {})
start = netw_params.get("fixed_networks_vlan_start")
amount = netw_params.get("fixed_networks_amount")
if start and amount:
return range(int(start), int(start) + int(amount))
if ng.get("vlan_start") is None:
return []
return [int(ng.get("vlan_start"))]
class NovaNetworkManagerLegacy(AssignIPsLegacyMixin, NovaNetworkManager):
pass
class NovaNetworkManager61(AssignIPs61Mixin, NovaNetworkManager):
pass
class NovaNetworkManager70(
AllocateVIPs70Mixin, AssignIPs70Mixin, NovaNetworkManager
):
@classmethod
def build_role_to_network_group_mapping(cls, *_):
"""Not needed due to always using default net role to network mapping
:return: Empty network role to network map
:rtype: dict
"""
return {}
@classmethod
def get_network_group_for_role(cls, network_role, _):
"""Returns network group to which network role is associated
The default network group from the network role description is
returned.
:param network_role: Network role dict
:type network_role: dict
:return: Network group name
:rtype: str
"""
return network_role['default_mapping']
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#import libraries
import string
import ast
from itertools import islice
import csv
from nltk.tokenize import RegexpTokenizer
def createTsvFile_Search1(listUrl_Movies3):
#create tsv file in 'tsv_correct' directory wehere we have preprocessed the tsv file (just created in parser.py)
tokenizer = RegexpTokenizer(r'\w+')
name="article_"
extension2=".tsv"
exclude = string.punctuation
for index in range(totalMovies):
print(index)
file="{}{}{}".format(name,index,extension2)
with open("HW3 ADM/tsv/"+file,"r") as tsvfile, open("HW3 ADM/tsv_correct/"+file,"w") as outfile:
tsvreader = csv.reader(tsvfile, delimiter="\t")
tsvwriter = csv.writer(outfile, delimiter="\t")
for row in tsvreader:
for i in range(len(row)):
#take every words, deleting ountuaction and other symbols
row[i] = tokenizer.tokenize(row[i])
#remove duplicate case-insensitive elements
row[i]= list(set(map(str.lower, row[i])))
#row[i] = row[i].translate({ord(c): None for c in string.punctuation})
tsvwriter.writerow(row)
#create tsv file in 'tsv_correct2' directory wehere we have preprocessed the tsv file (just created in parser.py). These preprocessed files contain preprocessed texts.
#The difference between these tvs files and other tsv files in tsv_correct is that these files contains duplicates words in texts. It's important for the second search engine
def createTsvFile_Search2(listUrl_Movies3):
tokenizer = RegexpTokenizer(r'\w+')
name="article_"
extension2=".tsv"
exclude = string.punctuation
for index in range(totalMovies):
print(index)
file="{}{}{}".format(name,index,extension2)
with open("HW3 ADM/tsv/"+file,"r") as tsvfile, open("HW3 ADM/tsv_correct2/"+file,"w") as outfile:
tsvreader = csv.reader(tsvfile, delimiter="\t")
tsvwriter = csv.writer(outfile, delimiter="\t")
for row in tsvreader:
for i in range(len(row)):
#take every words, deleting ountuaction and other symbols
row[i] = tokenizer.tokenize(row[i])
#remove duplicate case-insensitive elements
row[i]= list(map(str.lower, row[i]))
#row[i] = row[i].translate({ord(c): None for c in string.punctuation})
tsvwriter.writerow(row)
|
nilq/baby-python
|
python
|
from django import template
from classytags.core import Options
from classytags.helpers import AsTag
from classytags.arguments import Argument
from ..models import CallToActionRepository
class GetCallToAction(AsTag):
name = 'get_call_to_action'
options = Options(
Argument('code', required=True),
'as',
Argument('varname', required=False, resolve=False)
)
def get_value(self, context, code):
try:
return CallToActionRepository.objects.get(code=code).rendered()
except CallToActionRepository.DoesNotExist:
return ''
register = template.Library()
register.tag(GetCallToAction)
|
nilq/baby-python
|
python
|
import json
def get_list():
with open("config.json", "r") as f_obj:
f_json = json.load(f_obj)
return f_json
def get_lang(item):
return get_list()[item]
def set_lang(item, lang):
with open("list_lang.json", "r") as f_obj:
list_lang = json.load(f_obj)
if not (lang in list_lang):
return False
f_json = get_list()
f_json[item] = lang
with open("config.json", "w") as f_obj:
json.dump(f_json, f_obj)
return True
if __name__ == "__main__":
print(get_list())
print(get_lang("My_Language"))
print(get_lang("Obj_Language"))
print(set_lang("My_Language", "zh-CN"))
|
nilq/baby-python
|
python
|
import os
import os.path as osp
import copy
import yaml
import numpy as np
from ast import literal_eval
from utils.collections import AttrDict
__C = AttrDict()
cfg = __C
# ---------------------------------------------------------------------------- #
# MISC options
# ---------------------------------------------------------------------------- #
# Device for training or testing
# E.g., 'cuda' for using GPU, 'cpu' for using CPU
__C.DEVICE = 'cuda'
# Number of GPUs to use (applies to both training and testing)
__C.NUM_GPUS = 1
# Pixel mean values (BGR order) as a list
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# Pixel std values (BGR order) as a list
__C.PIXEL_STDS = np.array([[[1.0, 1.0, 1.0]]])
# Directory for saving checkpoints and loggers
__C.CKPT = 'ckpts/mscoco_humanparts/e2e_hier_rcnn_R-50-FPN_1x/'
# Display the log per iteration
__C.DISPLAY_ITER = 20
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# A very small number that's used many times
__C.EPS = 1e-14
# Convert image to BGR format (for Caffe2 models), in range 0-255
__C.TO_BGR255 = True
# ---------------------------------------------------------------------------- #
# Model options
# ---------------------------------------------------------------------------- #
__C.MODEL = AttrDict()
# The type of model to use
# The string must match a function in the modeling.model_builder module
# (e.g., 'generalized_rcnn', 'retinanet', ...)
__C.MODEL.TYPE = 'generalized_rcnn'
# FPN is enabled if True
__C.MODEL.FPN_ON = False
# Indicates the model makes semantic segmentation predictions (as in Semantic Segmentation)
__C.MODEL.SEMSEG_ON = False
# RPN is enabled if True
# Default is True, if RPN_ON = False means that only training the backbone
__C.MODEL.RPN_ON = True
# The meaning of FASTER_RCNN depends on the context (training vs. inference):
# 1) During training, FASTER_ON = True means that end-to-end training will be
# used to jointly train the RPN subnetwork and the Fast R-CNN subnetwork
# (Faster R-CNN = RPN + Fast R-CNN).
# 2) During inference, FASTER_ON = True means that the model's RPN subnetwork
# will be used to generate proposals rather than relying on precomputed
# proposals. Note that FASTER_ON = True can be used at inference time even
# if the Faster R-CNN model was trained with stagewise training (which
# consists of alternating between RPN and Fast R-CNN training in a way that
# finally leads to a single network).
__C.MODEL.FASTER_ON = False
# Indicates the model uses Cascade R-CNN
__C.MODEL.CASCADE_ON = False
# Indicates the model makes instance mask predictions (as in Mask R-CNN)
__C.MODEL.MASK_ON = False
# Indicates the model makes part bbox predictions (as in Hier R-CNN)
__C.MODEL.HIER_ON = False # TODO
# Type of batch normalizaiton, default: 'freeze'
# E.g., 'normal', 'freeze', 'sync', ...
__C.MODEL.BATCH_NORM = 'freeze'
# Number of classes in the dataset; must be set
# E.g., 81 for COCO (80 foreground + 1 background)
__C.MODEL.NUM_CLASSES = -1
# Swap model conv1 weight, for pet/rcnn we use BGR input channel (cv2), for pet/cls we use RGB channel,
# for caffe/caffe2 model using BGR channel. Thus if we use pet pretrain weights set 'True', else if use
# caffe or caffe2 weights set 'False'.
__C.MODEL.CONV1_RGB2BGR = True
# ---------------------------------------------------------------------------- #
# Solver options
# Note: all solver options are used exactly as specified; the implication is
# that if you switch from training on 1 GPU to N GPUs, you MUST adjust the
# solver configuration accordingly. We suggest using gradual warmup and the
# linear learning rate scaling rule as described in
# "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour" Goyal et al.
# https://arxiv.org/abs/1706.02677
# ---------------------------------------------------------------------------- #
__C.SOLVER = AttrDict()
# Type of the optimizer
# E.g., 'SGD', 'RMSPROP', 'ADAM' ...
__C.SOLVER.OPTIMIZER = 'SGD'
# Base learning rate for the specified schedule
__C.SOLVER.BASE_LR = 0.001
# Maximum number of max iterations
__C.SOLVER.MAX_ITER = 90000
# Momentum to use with SGD
__C.SOLVER.MOMENTUM = 0.9
# L2 regularization hyperparameter
__C.SOLVER.WEIGHT_DECAY = 0.0005
# L2 regularization hyperparameter for GroupNorm's parameters
__C.SOLVER.WEIGHT_DECAY_GN = 0.0
# Whether to double the learning rate for bias
__C.SOLVER.BIAS_DOUBLE_LR = True
# Whether to have weight decay on bias as well
__C.SOLVER.BIAS_WEIGHT_DECAY = False
# Multiple learning rate for fine-tuning
# Random initial layer learning rate is LR_MULTIPLE * BASE_LR
__C.SOLVER.LR_MULTIPLE = 1.0 # TODO
# Warm up to SOLVER.BASE_LR over this number of SGD iterations
__C.SOLVER.WARM_UP_ITERS = 500
# Start the warm up from SOLVER.BASE_LR * SOLVER.WARM_UP_FACTOR
__C.SOLVER.WARM_UP_FACTOR = 1.0 / 10.0
# WARM_UP_METHOD can be either 'CONSTANT' or 'LINEAR' (i.e., gradual)
__C.SOLVER.WARM_UP_METHOD = 'LINEAR'
# Schedule type (see functions in utils.lr_policy for options)
# E.g., 'POLY', 'STEP', 'COSINE', ...
__C.SOLVER.LR_POLICY = 'STEP'
# For 'POLY', the power in poly to drop LR
__C.SOLVER.LR_POW = 0.9
# For 'STEP', Non-uniform step iterations
__C.SOLVER.STEPS = [60000, 80000]
# For 'STEP', the current LR is multiplied by SOLVER.GAMMA at each step
__C.SOLVER.GAMMA = 0.1
# Suppress logging of changes to LR unless the relative change exceeds this
# threshold (prevents linear warm up from spamming the training log)
__C.SOLVER.LOG_LR_CHANGE_THRESHOLD = 1.1
# Snapshot (model checkpoint) period
__C.SOLVER.SNAPSHOT_ITERS = 10000
# -----------------------------------------------------------------------------
# DataLoader options
# -----------------------------------------------------------------------------
__C.DATALOADER = AttrDict()
# Type of training sampler, default: 'DistributedSampler'
# E.g., 'DistributedSampler', 'RepeatFactorTrainingSampler', ...
__C.DATALOADER.SAMPLER_TRAIN = "DistributedSampler"
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
__C.DATALOADER.ASPECT_RATIO_GROUPING = True
# if True, the dataloader will filter out images that have no associated
# annotations at train time.
__C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True # TODO
# ---------------------------------------------------------------------------- #
# RepeatFactorTrainingSampler options
# ---------------------------------------------------------------------------- #
__C.DATALOADER.RFTSAMPLER = AttrDict()
# parameters for RepeatFactorTrainingSampler
# rep_times = max(MIN_REPEAT_TIMES, min(MAX_REPEAT_TIMES, math.pow((REPEAT_THRESHOLD / cat_freq),POW)))
__C.DATALOADER.RFTSAMPLER.REPEAT_THRESHOLD = 0.001
__C.DATALOADER.RFTSAMPLER.POW = 0.5
__C.DATALOADER.RFTSAMPLER.MAX_REPEAT_TIMES = 10000.0
__C.DATALOADER.RFTSAMPLER.MIN_REPEAT_TIMES = 1.0
# ---------------------------------------------------------------------------- #
# Training options
# ---------------------------------------------------------------------------- #
__C.TRAIN = AttrDict()
# Initialize network with weights from this .pkl file
__C.TRAIN.WEIGHTS = ''
# Type of training data augmentation, default: 'none'
# E.g., 'none', 'random_crop', ...
__C.TRAIN.PREPROCESS_TYPE = 'none'
# Datasets to train on
# Available dataset list: datasets.dataset_catalog.DATASETS.keys()
# If multiple datasets are listed, the model is trained on their union
__C.TRAIN.DATASETS = ()
# Scales to use during training
# Each scale is the pixel size of an image's shortest side
# If multiple scales are listed, then one is selected uniformly at random for
# each training image (i.e., scale jitter data augmentation)
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Number of Python threads to use for the data loader during training
__C.TRAIN.LOADER_THREADS = 4
# If > 0, this enforces that each collated batch should have a size divisible
# by SIZE_DIVISIBILITY
__C.TRAIN.SIZE_DIVISIBILITY = 32
# Mini-batch size for training
# This is global, so if we have 8 GPUs and BATCH_SIZE = 16, each GPU will
# see 2 images per batch
__C.TRAIN.BATCH_SIZE = 16
# Freeze the backbone architecture during training if set to True
__C.TRAIN.FREEZE_CONV_BODY = False
# Training will resume from the latest snapshot (model checkpoint) found in the
# output directory
__C.TRAIN.AUTO_RESUME = True
# Image ColorJitter Augmentation
__C.TRAIN.BRIGHTNESS = 0.0
__C.TRAIN.CONTRAST = 0.0
__C.TRAIN.SATURATION = 0.0
__C.TRAIN.HUE = 0.0
# Left right mapping for flipping training
__C.TRAIN.LEFT_RIGHT = ()
# ---------------------------------------------------------------------------- #
# Random Crop options
# ---------------------------------------------------------------------------- #
__C.TRAIN.RANDOM_CROP = AttrDict()
# image will resize to min_size * num, num
# If only set one number, real_ratio =1, else real_ratio will random choose from it.
__C.TRAIN.RANDOM_CROP.SCALE_RATIOS = (0.8, 1.2)
# PAD_PIXEL for gap in small picture when random crop. eg.
# If len < 3, real pad_pixel will convert to PIXEL_MEANS, and make it to int by round.
__C.TRAIN.RANDOM_CROP.PAD_PIXEL = ()
# the scale of random crop, if img_size < scale, padding the gap use PAD_PIXEL.
# shape: [H, W], must be divided by SIZE_DIVISIBILITY, default: ([640, 640], )
__C.TRAIN.RANDOM_CROP.CROP_SCALES = ([640, 640], )
# IOU_TH for crop object.
__C.TRAIN.RANDOM_CROP.IOU_THS = (0.9, 0.7, 0.5, 0.3, 0.1)
# Type of instance box for random crop, default: 'horizontal'
# E.g., "horizontal","oriented"
__C.TRAIN.RANDOM_CROP.BOX_TYPE = "horizontal"
# ---------------------------------------------------------------------------- #
# Inference ('test') options
# ---------------------------------------------------------------------------- #
__C.TEST = AttrDict()
# Initialize network with weights from this .pkl file
__C.TEST.WEIGHTS = ''
# Number of Python threads to use for the data loader during testing
__C.TEST.LOADER_THREADS = 4
# If > 0, this enforces that each collated batch should have a size divisible
# by SIZE_DIVISIBILITY
__C.TEST.SIZE_DIVISIBILITY = 32
# Datasets to test on
# Available dataset list: datasets.dataset_catalog.DATASETS.keys()
# If multiple datasets are listed, testing is performed on each one sequentially
__C.TEST.DATASETS = ()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALE = 600
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Number of images in each GPU for testing
__C.TEST.IMS_PER_GPU = 1
# If True, force resize the image to [H, W].
__C.TEST.FORCE_TEST_SCALE = [-1, -1]
# ---------------------------------------------------------------------------- #
# Soft NMS
# ---------------------------------------------------------------------------- #
__C.TEST.SOFT_NMS = AttrDict()
# Use soft NMS instead of standard NMS if set to True
__C.TEST.SOFT_NMS.ENABLED = False
# See soft NMS paper for definition of these options
__C.TEST.SOFT_NMS.METHOD = 'linear'
__C.TEST.SOFT_NMS.SIGMA = 0.5
# For the soft NMS overlap threshold, we simply use TEST.NMS
# ---------------------------------------------------------------------------- #
# Bounding box voting (from the Multi-Region CNN paper)
# ---------------------------------------------------------------------------- #
__C.TEST.BBOX_VOTE = AttrDict()
# Use box voting if set to True
__C.TEST.BBOX_VOTE.ENABLED = False
# We use TEST.NMS threshold for the NMS step. VOTE_TH overlap threshold
# is used to select voting boxes (IoU >= VOTE_TH) for each box that survives NMS
__C.TEST.BBOX_VOTE.VOTE_TH = 0.8
# The method used to combine scores when doing bounding box voting
# Valid options include ('ID', 'AVG', 'IOU_AVG', 'GENERALIZED_AVG', 'QUASI_SUM')
__C.TEST.BBOX_VOTE.SCORING_METHOD = 'ID'
# Hyperparameter used by the scoring method (it has different meanings for
# different methods)
__C.TEST.BBOX_VOTE.SCORING_METHOD_BETA = 1.0
# ---------------------------------------------------------------------------- #
# Test-time augmentations for bounding box detection
# ---------------------------------------------------------------------------- #
__C.TEST.BBOX_AUG = AttrDict()
# Enable test-time augmentation for bounding box detection if True
__C.TEST.BBOX_AUG.ENABLED = False
# Horizontal flip at the original scale (id transform)
__C.TEST.BBOX_AUG.H_FLIP = False
# Each scale is the pixel size of an image's shortest side
__C.TEST.BBOX_AUG.SCALES = ()
# Max pixel size of the longer side
__C.TEST.BBOX_AUG.MAX_SIZE = 4000
# ---------------------------------------------------------------------------- #
# Test-time augmentations for mask detection
# ---------------------------------------------------------------------------- #
__C.TEST.MASK_AUG = AttrDict()
# Enable test-time augmentation for instance mask detection if True
__C.TEST.MASK_AUG.ENABLED = False
# Heuristic used to combine mask predictions
# SOFT prefix indicates that the computation is performed on soft masks
# Valid options: ('SOFT_AVG', 'SOFT_MAX', 'LOGIT_AVG')
__C.TEST.MASK_AUG.HEUR = 'SOFT_AVG'
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
__C.BACKBONE = AttrDict()
# The backbone conv body to use
__C.BACKBONE.CONV_BODY = 'resnet'
# The eps of batch_norm layer
__C.BACKBONE.BN_EPS = 1e-5
# ---------------------------------------------------------------------------- #
# HRNet options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.HRNET = AttrDict()
# Network initial width
__C.BACKBONE.HRNET.WIDTH = 18
# Use a (2 * 2) kernels avg_pooling layer in downsampling block.
__C.BACKBONE.HRNET.AVG_DOWN = False
# Use a squeeze-and-excitation module in each block
__C.BACKBONE.HRNET.USE_SE = False
# Use a global feature in each stage
__C.BACKBONE.HRNET.USE_GLOBAL = False
# Use group normalization
__C.BACKBONE.HRNET.USE_GN = False
# Use a aligned module in each block
__C.BACKBONE.HRNET.USE_ALIGN = False
# Type of 3x3 convolution layer in each block
# 'deform' for dcnv1, 'deformv2' for dcnv2
__C.BACKBONE.HRNET.STAGE_WITH_CONV = ('normal', 'normal', 'normal', 'normal')
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.HRNET.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# MobileNet V1 options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.MV1 = AttrDict()
# The number of layers in each block
__C.BACKBONE.MV1.LAYERS = (2, 2, 6, 2)
# The initial width of each block
__C.BACKBONE.MV1.NUM_CHANNELS = [32, 64, 128, 256, 512, 1024]
# Kernel size of depth-wise separable convolution layers
__C.BACKBONE.MV1.KERNEL = 3
# Network widen factor
__C.BACKBONE.MV1.WIDEN_FACTOR = 1.0
# C5 stage dilation
__C.BACKBONE.MV1.C5_DILATION = 1
# Use a squeeze-and-excitation module in each block
__C.BACKBONE.MV1.USE_SE = False
# Use dropblock in C4 and C5
__C.BACKBONE.MV1.USE_DP = False
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.MV1.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# MobileNet V2 options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.MV2 = AttrDict()
# Network widen factor
__C.BACKBONE.MV2.WIDEN_FACTOR = 1.0
# Use a squeeze-and-excitation module in each block
__C.BACKBONE.MV2.USE_SE = False
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.MV2.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# MobileNet V3 options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.MV3 = AttrDict()
# Network setting of MobileNet V3
__C.BACKBONE.MV3.SETTING = 'large'
# Network widen factor
__C.BACKBONE.MV3.WIDEN_FACTOR = 1.0
# Se module mid channel base, if True use innerplanes, False use inplanes
__C.BACKBONE.MV3.SE_REDUCE_MID = True
# Se module mid channel divisible. This param is to fit otf-fficial implementation
__C.BACKBONE.MV3.SE_DIVISIBLE = False
# Use conv bias in head. This param is to fit tf-official implementation
__C.BACKBONE.MV3.HEAD_USE_BIAS = False
# Force using residual. This param is to fit tf-official implementation
__C.BACKBONE.MV3.FORCE_RESIDUAL = False
# Sync block act to se module. This param is to fit tf-official implementation
__C.BACKBONE.MV3.SYNC_SE_ACT = True
# Use Conv2dSamePadding to replace Conv2d for fitting tf-original implementation
__C.BACKBONE.MV3.SAME_PAD = False
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.MV3.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# ResNet options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.RESNET = AttrDict()
# The number of layers in each block
# (2, 2, 2, 2) for resnet18 with basicblock
# (3, 4, 6, 3) for resnet34 with basicblock
# (3, 4, 6, 3) for resnet50
# (3, 4, 23, 3) for resnet101
# (3, 8, 36, 3) for resnet152
__C.BACKBONE.RESNET.LAYERS = (3, 4, 6, 3)
# Network initial width
__C.BACKBONE.RESNET.WIDTH = 64
# Use bottleneck block, False for basicblock
__C.BACKBONE.RESNET.BOTTLENECK = True
# Place the stride 2 conv on the 3x3 filter.
# True for resnet-b
__C.BACKBONE.RESNET.STRIDE_3X3 = False
# Use a three (3 * 3) kernels head; False for (7 * 7) kernels head.
# True for resnet-c
__C.BACKBONE.RESNET.USE_3x3x3HEAD = False
# Use a (2 * 2) kernels avg_pooling layer in downsampling block.
# True for resnet-d
__C.BACKBONE.RESNET.AVG_DOWN = False
# Use group normalization
__C.BACKBONE.RESNET.USE_GN = False
# Use attentive normalization
# when it is True means use an_bn (an with bn)
# when it is True and USE_GN is True means use an_gn (an with gn)
__C.BACKBONE.RESNET.USE_AN = False
# Use weight standardization
__C.BACKBONE.RESNET.USE_WS = False
# Use a aligned module in each block
__C.BACKBONE.RESNET.USE_ALIGN = False
# Type of context module in each block
# 'se' for se, 'gcb' for gcb
__C.BACKBONE.RESNET.STAGE_WITH_CONTEXT = ('none', 'none', 'none', 'none')
# Context module innerplanes ratio
__C.BACKBONE.RESNET.CTX_RATIO = 0.0625
# Type of 3x3 convolution layer in each block
# 'deform' for dcnv1, 'deformv2' for dcnv2
__C.BACKBONE.RESNET.STAGE_WITH_CONV = ('normal', 'normal', 'normal', 'normal')
# Apply dilation in stage "c5"
__C.BACKBONE.RESNET.C5_DILATION = 1
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.RESNET.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# ResNeXt options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.RESNEXT = AttrDict()
# The number of layers in each block
# (3, 4, 6, 3) for resnext50
# (3, 4, 23, 3) for resnext101
# (3, 8, 36, 3) for resnext152
__C.BACKBONE.RESNEXT.LAYERS = (3, 4, 6, 3)
# Cardinality (groups) of convolution layers
__C.BACKBONE.RESNEXT.C = 32
# Network initial width of each (conv) group
__C.BACKBONE.RESNEXT.WIDTH = 4
# Use a three (3 * 3) kernels head; False for (7 * 7) kernels head.
# True for resnext-c
__C.BACKBONE.RESNEXT.USE_3x3x3HEAD = False
# Use a (2 * 2) kernels avg_pooling layer in downsampling block.
# True for resnext-d
__C.BACKBONE.RESNEXT.AVG_DOWN = False
# Use group normalization
__C.BACKBONE.RESNEXT.USE_GN = False
# Use weight standardization
__C.BACKBONE.RESNEXT.USE_WS = False
# Use a aligned module in each block
__C.BACKBONE.RESNEXT.USE_ALIGN = False
# Type of context module in each block
# 'se' for se, 'gcb' for gcb
__C.BACKBONE.RESNEXT.STAGE_WITH_CONTEXT = ('none', 'none', 'none', 'none')
# Context module innerplanes ratio
__C.BACKBONE.RESNEXT.CTX_RATIO = 0.0625
# Type of 3x3 convolution layer in each block
# 'deform' for dcnv1, 'deformv2' for dcnv2
__C.BACKBONE.RESNEXT.STAGE_WITH_CONV = ('normal', 'normal', 'normal', 'normal')
# Apply dilation in stage "c5"
__C.BACKBONE.RESNEXT.C5_DILATION = 1
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.RESNEXT.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# VoVNet options
# ---------------------------------------------------------------------------- #
__C.BACKBONE.VOV = AttrDict()
# The number of layers in each block
# (1, 1, 1, 1) for vovnet27_slim
# (1, 1, 2, 2) for vovnet39
# (1, 1, 4, 3) for vovnet57
__C.BACKBONE.VOV.LAYERS = (1, 1, 2, 2)
# Network initial width
__C.BACKBONE.VOV.WIDTH = 64
# Number conv layers for each block
__C.BACKBONE.VOV.NUM_CONV = 5
# Dimension of 3x3 conv for each block
# (64, 80, 96, 112) for vovnet27_slim
# (128, 160, 192, 224) for vovnet39/vovnet57
__C.BACKBONE.VOV.STAGE_DIMS = (128, 160, 192, 224)
# Dimension of 1x1 conv concat for each block
# (128, 256, 384, 512) for vovnet27_slim
# (256, 512, 768, 1024) for vovnet39/vovnet57
__C.BACKBONE.VOV.CONCAT_DIMS = (256, 512, 768, 1024)
# Use group normalization
__C.BACKBONE.VOV.USE_GN = False
# Type of 3x3 convolution layer in each block
# 'deform' for dcnv1, 'deformv2' for dcnv2
__C.BACKBONE.VOV.STAGE_WITH_CONV = ('normal', 'normal', 'normal', 'normal')
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.BACKBONE.VOV.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
__C.FPN = AttrDict()
# The Body of FPN to use
# (e.g., "fpn", "hrfpn")
__C.FPN.BODY = "fpn"
# Use C5 or P5 to generate P6
__C.FPN.USE_C5 = True
# Channel dimension of the FPN feature levels
__C.FPN.DIM = 256
# FPN may be used for just RPN, just object detection, or both
# E.g., "conv2"-like level
__C.FPN.LOWEST_BACKBONE_LVL = 2
# E.g., "conv5"-like level
__C.FPN.HIGHEST_BACKBONE_LVL = 5
# Use FPN for RoI transform for object detection if True
__C.FPN.MULTILEVEL_ROIS = True
# Hyperparameters for the RoI-to-FPN level mapping heuristic
__C.FPN.ROI_CANONICAL_SCALE = 224 # s0 # TODO
__C.FPN.ROI_CANONICAL_LEVEL = 4 # k0: where s0 maps to # TODO
# Coarsest level of the FPN pyramid
__C.FPN.ROI_MAX_LEVEL = 5
# Finest level of the FPN pyramid
__C.FPN.ROI_MIN_LEVEL = 2
# Use FPN for RPN if True
__C.FPN.MULTILEVEL_RPN = True
# Coarsest level of the FPN pyramid
__C.FPN.RPN_MAX_LEVEL = 6
# Finest level of the FPN pyramid
__C.FPN.RPN_MIN_LEVEL = 2
# Use extra FPN levels, as done in the RetinaNet paper
__C.FPN.EXTRA_CONV_LEVELS = False
# Use FPN Lite (dwconv) to replace standard FPN
__C.FPN.USE_LITE = False
# Use BatchNorm in the FPN-specific layers (lateral, etc.)
__C.FPN.USE_BN = False
# Use GroupNorm in the FPN-specific layers (lateral, etc.)
__C.FPN.USE_GN = False
# Use Weight Standardization in the FPN-specific layers (lateral, etc.)
__C.FPN.USE_WS = False
# ---------------------------------------------------------------------------- #
# FPN hrfpn body options
# ---------------------------------------------------------------------------- #
__C.FPN.HRFPN = AttrDict()
# Channel dimension of the HRFPN feature levels
__C.FPN.HRFPN.DIM = 256
# Pooling type in HRFPN for down-sampling
__C.FPN.HRFPN.POOLING_TYPE = 'AVG'
# Number of extra pooling layer in HRFPN for down-sampling
__C.FPN.HRFPN.NUM_EXTRA_POOLING = 1
# Use HRFPN Lite (dwconv) to replace standard HRFPN
__C.FPN.HRFPN.USE_LITE = False
# Use BatchNorm in the HRFPN-specific layers
__C.FPN.HRFPN.USE_BN = False
# Use GroupNorm in the HRFPN-specific layers
__C.FPN.HRFPN.USE_GN = False
# ---------------------------------------------------------------------------- #
# Semantic Segmentation options ("SEMSEG" means Semantic Segmentation)
# ---------------------------------------------------------------------------- #
__C.SEMSEG = AttrDict()
# The head of Semantic R-CNN to use
__C.SEMSEG.ROI_SEMSEG_HEAD = 'fused_head'
# Output module of Semantic R-CNN head
__C.SEMSEG.ROI_SEMSEG_OUTPUT = 'semseg_output'
# Multi-task loss weight for Semantic
__C.SEMSEG.SEMSEG_LOSS_WEIGHT = 0.2
# The ignore label
__C.SEMSEG.SEMSEG_IGNORE_LABEL = 255
# The number of Semantic
__C.SEMSEG.SEMSEG_NUM_CLASSES = 183
# ---------------------------------------------------------------------------- #
# Semantic R-CNN semantic head options
# ---------------------------------------------------------------------------- #
__C.SEMSEG.SEMSEG_HEAD = AttrDict()
# (p2, p3, p4, p5), 2 means resize all stages like p3
__C.SEMSEG.SEMSEG_HEAD.FUSION_LEVEL = 2
# Number of Conv layers in the semantic head
__C.SEMSEG.SEMSEG_HEAD.NUM_CONVS = 4
# Number of input feature stage in the semantic head
__C.SEMSEG.SEMSEG_HEAD.NUM_IN_STAGE = 5
# Hidden Conv layer dimension
__C.SEMSEG.SEMSEG_HEAD.CONV_DIM = 256
# Use BatchNorm in the semantic head
__C.SEMSEG.SEMSEG_HEAD.USE_BN = False
# Use GroupNorm in the semantic head
__C.SEMSEG.SEMSEG_HEAD.USE_GN = False
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
__C.RPN = AttrDict()
# Indicates the model's computation terminates with the production of RPN
# proposals (i.e., it outputs proposals ONLY, no actual object detections)
__C.RPN.RPN_ONLY = False
# Base RPN anchor sizes given in absolute pixels w.r.t. the scaled network input
__C.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512)
# Stride of the feature map that RPN is attached.
# For FPN, number of strides should match number of scales
__C.RPN.ANCHOR_STRIDE = (16,)
# RPN anchor aspect ratios
__C.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0)
# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
__C.RPN.STRADDLE_THRESH = 0
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example)
__C.RPN.FG_IOU_THRESHOLD = 0.7
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example)
__C.RPN.BG_IOU_THRESHOLD = 0.3
# Total number of RPN examples per image
__C.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
__C.RPN.POSITIVE_FRACTION = 0.5
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
__C.RPN.PRE_NMS_TOP_N_TRAIN = 12000
__C.RPN.PRE_NMS_TOP_N_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
__C.RPN.POST_NMS_TOP_N_TRAIN = 2000
__C.RPN.POST_NMS_TOP_N_TEST = 1000
# NMS threshold used on RPN proposals
__C.RPN.NMS_THRESH = 0.7
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (a the scale used during training or inference)
__C.RPN.MIN_SIZE = 0
# Number of top scoring RPN proposals to keep after combining proposals from
# all FPN levels
__C.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000
__C.RPN.FPN_POST_NMS_TOP_N_TEST = 2000
# Apply the post NMS per batch (default) or per image during training
# (default is True to be consistent with Detectron, see Issue #672)
__C.RPN.FPN_POST_NMS_PER_BATCH = True
# Custom rpn head, empty to use default conv or separable conv
__C.RPN.RPN_HEAD = "SingleConvRPNHead" # TODO
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
__C.RPN.SMOOTH_L1_BETA = 1.0 / 9
# ---------------------------------------------------------------------------- #
# Fast R-CNN options
# ---------------------------------------------------------------------------- #
__C.FAST_RCNN = AttrDict()
# The head of Fast R-CNN to use
# (e.g., "roi_2mlp_head", "roi_convx_head")
__C.FAST_RCNN.ROI_BOX_HEAD = "roi_2mlp_head"
# Output module of Fast R-CNN head
__C.FAST_RCNN.ROI_BOX_OUTPUT = "box_output"
# RoI transformation function (e.g., ROIPool or ROIAlign or ROIAlignV2)
__C.FAST_RCNN.ROI_XFORM_METHOD = 'ROIAlign'
# Number of grid sampling points in ROIAlign (usually use 2)
# Only applies to ROIAlign
__C.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO = 0
# RoI transform output resolution
# Note: some models may have constraints on what they can use, e.g. they use
# pretrained FC layers like in VGG16, and will ignore this option
__C.FAST_RCNN.ROI_XFORM_RESOLUTION = (14, 14)
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
__C.FAST_RCNN.FG_IOU_THRESHOLD = 0.5
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
__C.FAST_RCNN.BG_IOU_THRESHOLD = 0.5
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
__C.FAST_RCNN.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH
# E.g., a common configuration is: 512 * 2 * 8 = 8192
__C.FAST_RCNN.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
__C.FAST_RCNN.POSITIVE_FRACTION = 0.25
# Use a class agnostic bounding box regressor instead of the default per-class
# regressor
__C.FAST_RCNN.CLS_AGNOSTIC_BBOX_REG = False
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
__C.FAST_RCNN.SCORE_THRESH = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.FAST_RCNN.NMS = 0.5
# Maximum number of detections to return per image (100 is based on the limit
# established for the COCO dataset)
__C.FAST_RCNN.DETECTIONS_PER_IMG = 100
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
__C.FAST_RCNN.SMOOTH_L1_BETA = 1
# Classifier branch switch
__C.FAST_RCNN.CLS_ON = True
# Box regression branch switch
__C.FAST_RCNN.REG_ON = True
# ---------------------------------------------------------------------------- #
# Fast R-CNN mlp head options
# ---------------------------------------------------------------------------- #
__C.FAST_RCNN.MLP_HEAD = AttrDict()
# Hidden layer dimension when using an MLP for the RoI box head
__C.FAST_RCNN.MLP_HEAD.MLP_DIM = 1024
# Use BatchNorm in the Fast R-CNN mlp head
__C.FAST_RCNN.MLP_HEAD.USE_BN = False
# Use GroupNorm in the Fast R-CNN mlp head
__C.FAST_RCNN.MLP_HEAD.USE_GN = False
# Use Weight Standardization in the Fast R-CNN mlp head
__C.FAST_RCNN.MLP_HEAD.USE_WS = False
# ---------------------------------------------------------------------------- #
# Fast R-CNN convfc head options
# ---------------------------------------------------------------------------- #
__C.FAST_RCNN.CONVFC_HEAD = AttrDict()
# Dilation
__C.FAST_RCNN.CONVFC_HEAD.DILATION = 1
# Hidden Conv layer dimension when using Convs for the RoI box head
__C.FAST_RCNN.CONVFC_HEAD.CONV_DIM = 256
# Number of stacked Conv layers in the RoI box head
__C.FAST_RCNN.CONVFC_HEAD.NUM_STACKED_CONVS = 4
# Hidden layer dimension when using an MLP for the RoI box head
__C.FAST_RCNN.CONVFC_HEAD.MLP_DIM = 1024
# Use Fast R-CNN Lite (dwconv) to replace standard Fast R-CNN
__C.FAST_RCNN.CONVFC_HEAD.USE_LITE = False
# Use BatchNorm in the Fast R-CNN convfc head
__C.FAST_RCNN.CONVFC_HEAD.USE_BN = False
# Use GroupNorm in the Fast R-CNN convfc head
__C.FAST_RCNN.CONVFC_HEAD.USE_GN = False
# Use Weight Standardization in the Fast R-CNN convfc head
__C.FAST_RCNN.CONVFC_HEAD.USE_WS = False
# ---------------------------------------------------------------------------- #
# Cascade R-CNN options
# ---------------------------------------------------------------------------- #
__C.CASCADE_RCNN = AttrDict()
# The head of Cascade R-CNN to use
# (e.g., "roi_2mlp_head", "roi_convx_head")
__C.CASCADE_RCNN.ROI_BOX_HEAD = "roi_2mlp_head"
# Output module of Cascade R-CNN head
__C.CASCADE_RCNN.ROI_BOX_OUTPUT = "box_output"
# Number stages of Cascade R-CNN to use
__C.CASCADE_RCNN.NUM_STAGE = 3
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
__C.CASCADE_RCNN.FG_IOU_THRESHOLD = [0.5, 0.6, 0.7]
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
__C.CASCADE_RCNN.BG_IOU_THRESHOLD = [0.5, 0.6, 0.7]
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
__C.CASCADE_RCNN.BBOX_REG_WEIGHTS = ((10., 10., 5., 5.), (20., 20., 10., 10.),
(30., 30., 15., 15.))
# Weights for cascade stages
__C.CASCADE_RCNN.STAGE_WEIGHTS = (1.0, 0.5, 0.25)
# Stage id for testing
__C.CASCADE_RCNN.TEST_STAGE = 3
# Use ensemble results for testing
__C.CASCADE_RCNN.TEST_ENSEMBLE = True
# ---------------------------------------------------------------------------- #
# Mask R-CNN options ("MRCNN" means Mask R-CNN)
# ---------------------------------------------------------------------------- #
__C.MRCNN = AttrDict()
# The head of Mask R-CNN to use
# (e.g., "roi_convx_head")
__C.MRCNN.ROI_MASK_HEAD = "roi_convx_head"
# Output module of Mask R-CNN head
__C.MRCNN.ROI_MASK_OUTPUT = "mask_deconv_output"
# RoI transformation function and associated options
__C.MRCNN.ROI_XFORM_METHOD = 'ROIAlign'
# Mask roi size per image (roi_batch_size = roi_size_per_img * img_per_gpu when using across-sample strategy)
__C.MRCNN.ROI_SIZE_PER_IMG = -1
# Sample the positive box across batch per GPU
__C.MRCNN.ACROSS_SAMPLE = False
# RoI strides for Mask R-CNN head to use
__C.MRCNN.ROI_STRIDES = []
# Number of grid sampling points in ROIAlign (usually use 2)
# Only applies to ROIAlign
__C.MRCNN.ROI_XFORM_SAMPLING_RATIO = 0
# RoI transformation function (e.g., ROIPool or ROIAlign)
__C.MRCNN.ROI_XFORM_RESOLUTION = (14, 14)
# Resolution of mask predictions
__C.MRCNN.RESOLUTION = (28, 28)
# Whether or not resize and translate masks to the input image.
__C.MRCNN.POSTPROCESS_MASKS = False # TODO
__C.MRCNN.POSTPROCESS_MASKS_THRESHOLD = 0.5 # TODO
# Multi-task loss weight to use for Mask R-CNN head
__C.MRCNN.LOSS_WEIGHT = 1.0
# Use Mask IoU for mask head
__C.MRCNN.MASKIOU_ON = False
# ---------------------------------------------------------------------------- #
# Mask R-CNN convx head options
# ---------------------------------------------------------------------------- #
__C.MRCNN.CONVX_HEAD = AttrDict()
# Hidden Conv layer dimension
__C.MRCNN.CONVX_HEAD.CONV_DIM = 256
# Number of stacked Conv layers in the RoI box head
__C.MRCNN.CONVX_HEAD.NUM_STACKED_CONVS = 4
# Use dilated convolution in the mask head
__C.MRCNN.CONVX_HEAD.DILATION = 1
# Use Mask R-CNN Lite (dwconv) to replace standard Mask R-CNN
__C.MRCNN.CONVX_HEAD.USE_LITE = False
# Use BatchNorm in the Mask R-CNN convx head
__C.MRCNN.CONVX_HEAD.USE_BN = False
# Use GroupNorm in the Mask R-CNN convx head
__C.MRCNN.CONVX_HEAD.USE_GN = False
# Use Weight Standardization in the Mask R-CNN convx head
__C.MRCNN.CONVX_HEAD.USE_WS = False
# ---------------------------------------------------------------------------- #
# Mask IoU options
# ---------------------------------------------------------------------------- #
__C.MRCNN.MASKIOU = AttrDict()
# The head of Mask IoU to use
# (e.g., "convx_head")
__C.MRCNN.MASKIOU.MASKIOU_HEAD = "convx_head"
# Output module of Mask IoU head
__C.MRCNN.MASKIOU.MASKIOU_OUTPUT = "linear_output"
# Hidden Conv layer dimension of Mask IoU head
__C.MRCNN.MASKIOU.CONV_DIM = 256
# Hidden MLP layer dimension of Mask IoU head
__C.MRCNN.MASKIOU.MLP_DIM = 1024
# Loss weight for Mask IoU head
__C.MRCNN.MASKIOU.LOSS_WEIGHT = 1.0
# ---------------------------------------------------------------------------- #
# hier R-CNN options ("HRCNN" = Mask R-CNN with Hier support)
# ---------------------------------------------------------------------------- #
__C.HRCNN = AttrDict()
# The head of hier R-CNN to use
# (e.g., "roi_convx_head")
__C.HRCNN.ROI_HIER_HEAD = "roi_convx_head"
# Output module of hier R-CNN head
__C.HRCNN.ROI_HIER_OUTPUT = "hier_output"
# RoI transformation function and associated options
__C.HRCNN.ROI_XFORM_METHOD = 'ROIAlign'
# Sample the positive box across batch per GPU ### TODO
__C.HRCNN.ACROSS_SAMPLE = False
# Hier roi size per image (roi_batch_size = roi_size_per_img * img_per_gpu when using across-sample strategy)
__C.HRCNN.ROI_SIZE_PER_IMG = -1
# RoI strides for Hier R-CNN head to use
__C.HRCNN.ROI_STRIDES = []
# Number of grid sampling points in RoIAlign (usually use 2)
# Only applies to RoIAlign
__C.HRCNN.ROI_XFORM_SAMPLING_RATIO = 0
# RoI transformation function (e.g., RoIPool or RoIAlign)
__C.HRCNN.ROI_XFORM_RESOLUTION = (14, 14)
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
__C.HRCNN.FG_IOU_THRESHOLD = 0.7
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
__C.HRCNN.BG_IOU_THRESHOLD = 0.7
# Inference cls score threshold, anchors with score > INFERENCE_TH are
# considered for inference
__C.HRCNN.INFERENCE_TH = 0.05
# NMS threshold used in Hier
__C.HRCNN.NMS_TH = 0.6
# During inference, #locs to select based on cls score before NMS is performed
# per FPN level
__C.HRCNN.PRE_NMS_TOP_N = 1000
# Number of detections per image
__C.HRCNN.DETECTIONS_PER_IMG = 100
# Number of hier in the dataset
__C.HRCNN.NUM_CLASSES = -1
# Focal loss parameter: alpha
__C.HRCNN.LOSS_ALPHA = 0.25
# Focal loss parameter: gamma
__C.HRCNN.LOSS_GAMMA = 2.0
# Multi-task loss weight to use for hier head
__C.HRCNN.LOSS_WEIGHT = 1.0
# Prior prob for the positives at the beginning of training. This is used to set
# the bias init for the logits layer
__C.HRCNN.PRIOR_PROB = 0.01
# Loc loss type, it can be 'iou', 'liou' and 'giou'
__C.HRCNN.LOC_LOSS_TYPE = 'giou'
# Normalizing the regression targets with FPN strides
__C.HRCNN.NORM_REG_TARGETS = True
# Positioning centerness on the regress branch.
__C.HRCNN.CENTERNESS_ON_REG = True
# Use center sample in the hier head
__C.HRCNN.CENTER_SAMPLE = True
# Center sample radius in the hier head
__C.HRCNN.POS_RADIUS = 1.5
# Convolutions to use in the cls and bbox tower
# NOTE: this doesn't include the last conv for logits
__C.HRCNN.OUTPUT_NUM_CONVS = 2
# Hidden Conv layer dimension
__C.HRCNN.OUTPUT_CONV_DIM = 256
# Use hier output Lite (dwconv) to replace standard hier output
__C.HRCNN.OUTPUT_USE_LITE = False
# Use BatchNorm in the hier output
__C.HRCNN.OUTPUT_USE_BN = False
# Use GroupNorm in the hier output
__C.HRCNN.OUTPUT_USE_GN = True
# Use dcn in the last layer of towers
__C.HRCNN.OUTPUT_USE_DCN = False
# Eval hier
__C.HRCNN.EVAL_HIER = True
# # considered for hier inference
__C.HRCNN.HIER_TH = 0.2
# Limit hands and feet
__C.HRCNN.LIMIT_TYPE = 'hand_and_foot'
# ---------------------------------------------------------------------------- #
# hier R-CNN convx head options
# ---------------------------------------------------------------------------- #
__C.HRCNN.CONVX_HEAD = AttrDict()
# Hidden Conv layer dimension
__C.HRCNN.CONVX_HEAD.CONV_DIM = 256
# Number of stacked Conv layers in the RoI box head
__C.HRCNN.CONVX_HEAD.NUM_STACKED_CONVS = 4
# Use dilated convolution in the mask head
__C.HRCNN.CONVX_HEAD.DILATION = 1
# Use hier R-CNN Lite (dwconv) to replace standard hier R-CNN
__C.HRCNN.CONVX_HEAD.USE_LITE = False
# Use BatchNorm in the Keyoint R-CNN convx head
__C.HRCNN.CONVX_HEAD.USE_BN = False
# Use GroupNorm in the Keyoint R-CNN convx head
__C.HRCNN.CONVX_HEAD.USE_GN = False
# ---------------------------------------------------------------------------- #
# hier R-CNN gce head options
# ---------------------------------------------------------------------------- #
__C.HRCNN.GCE_HEAD = AttrDict()
# Hidden Conv layer dimension
__C.HRCNN.GCE_HEAD.CONV_DIM = 512
# Dimension for ASPPV3
__C.HRCNN.GCE_HEAD.ASPPV3_DIM = 256
# Dilation for ASPPV3
__C.HRCNN.GCE_HEAD.ASPPV3_DILATION = (6, 12, 18)
# Number of stacked Conv layers in GCE head before ASPPV3
__C.HRCNN.GCE_HEAD.NUM_CONVS_BEFORE_ASPPV3 = 0
# Number of stacked Conv layers in GCE head after ASPPV3
__C.HRCNN.GCE_HEAD.NUM_CONVS_AFTER_ASPPV3 = 0
# Use NonLocal in the hier R-CNN gce head
__C.HRCNN.GCE_HEAD.USE_NL = False
# Reduction ration of nonlocal
__C.HRCNN.GCE_HEAD.NL_RATIO = 1.0
# Use BatchNorm in the hier R-CNN gce head
__C.HRCNN.GCE_HEAD.USE_BN = False
# Use GroupNorm in the hier R-CNN gce head
__C.HRCNN.GCE_HEAD.USE_GN = False
# ---------------------------------------------------------------------------- #
# Visualization options
# ---------------------------------------------------------------------------- #
__C.VIS = AttrDict()
# Dump detection visualizations
__C.VIS.ENABLED = False
# Score threshold for visualization
__C.VIS.VIS_TH = 0.9
# ---------------------------------------------------------------------------- #
# Show box options
# ---------------------------------------------------------------------------- #
__C.VIS.SHOW_BOX = AttrDict()
# Visualizing detection bboxes
__C.VIS.SHOW_BOX.ENABLED = True
# Visualization color scheme
# 'green', 'category' or 'instance'
__C.VIS.SHOW_BOX.COLOR_SCHEME = 'green'
# Color map, 'COCO81', 'VOC21', 'ADE151', 'LIP20', 'MHP59'
__C.VIS.SHOW_BOX.COLORMAP = 'COCO81'
# Border thick
__C.VIS.SHOW_BOX.BORDER_THICK = 2
# ---------------------------------------------------------------------------- #
# Show class options
# ---------------------------------------------------------------------------- #
__C.VIS.SHOW_CLASS = AttrDict()
# Visualizing detection classes
__C.VIS.SHOW_CLASS.ENABLED = True
# Default: gray
__C.VIS.SHOW_CLASS.COLOR = (218, 227, 218)
# Font scale of class string
__C.VIS.SHOW_CLASS.FONT_SCALE = 0.45
# ---------------------------------------------------------------------------- #
# Show segmentation options
# ---------------------------------------------------------------------------- #
__C.VIS.SHOW_SEGMS = AttrDict()
# Visualizing detection classes
__C.VIS.SHOW_SEGMS.ENABLED = True
# Whether show mask
__C.VIS.SHOW_SEGMS.SHOW_MASK = True
# False = (255, 255, 255) = white
__C.VIS.SHOW_SEGMS.MASK_COLOR_FOLLOW_BOX = True
# Mask ahpha
__C.VIS.SHOW_SEGMS.MASK_ALPHA = 0.4
# Whether show border
__C.VIS.SHOW_SEGMS.SHOW_BORDER = True
# Border color, (255, 255, 255) for white, (0, 0, 0) for black
__C.VIS.SHOW_SEGMS.BORDER_COLOR = (255, 255, 255)
# Border thick
__C.VIS.SHOW_SEGMS.BORDER_THICK = 2
# ---------------------------------------------------------------------------- #
# Show hier options
# ---------------------------------------------------------------------------- #
__C.VIS.SHOW_HIER = AttrDict()
# Visualizing detection classes
__C.VIS.SHOW_HIER.ENABLED = True
# Border thick
__C.VIS.SHOW_HIER.BORDER_THICK = 2
# ---------------------------------------------------------------------------- #
# Deprecated options
# If an option is removed from the code and you don't want to break existing
# yaml configs, you can add the full config key as a string to the set below.
# ---------------------------------------------------------------------------- #
_DEPCRECATED_KEYS = set()
# ---------------------------------------------------------------------------- #
# Renamed options
# If you rename a config option, record the mapping from the old name to the new
# name in the dictionary below. Optionally, if the type also changed, you can
# make the value a tuple that specifies first the renamed key and then
# instructions for how to edit the config file.
# ---------------------------------------------------------------------------- #
_RENAMED_KEYS = {
'EXAMPLE.RENAMED.KEY': 'EXAMPLE.KEY', # Dummy example to follow
'PIXEL_MEAN': 'PIXEL_MEANS',
'PIXEL_STD': 'PIXEL_STDS',
}
def assert_and_infer_cfg(make_immutable=True):
"""Call this function in your script after you have finished setting all cfg
values that are necessary (e.g., merging a config from a file, merging
command line config options, etc.). By default, this function will also
mark the global cfg as immutable to prevent changing the global cfg settings
during script execution (which can lead to hard to debug errors or code
that's harder to understand than is necessary).
"""
if make_immutable:
cfg.immutable(True)
def merge_cfg_from_file(cfg_filename):
"""Load a yaml config file and merge it into the global config."""
with open(cfg_filename, 'r') as f:
yaml_cfg = AttrDict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def merge_cfg_from_list(cfg_list):
"""Merge config keys, values in a list (e.g., from command line) into the
global config. For example, `cfg_list = ['TEST.NMS', 0.5]`.
"""
assert len(cfg_list) % 2 == 0
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
if _key_is_deprecated(full_key):
continue
if _key_is_renamed(full_key):
_raise_key_rename_error(full_key)
key_list = full_key.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d, 'Non-existent key: {}'.format(full_key)
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, d[subkey], subkey, full_key
)
d[subkey] = value
def _merge_a_into_b(a, b, stack=None):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict'
assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict'
for k, v_ in a.items():
full_key = '.'.join(stack) + '.' + k if stack is not None else k
# a must specify keys that are in b
if k not in b:
raise KeyError('Non-existent config key: {}'.format(full_key))
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
# Recursively merge dicts
if isinstance(v, AttrDict):
try:
stack_push = [k] if stack is None else stack + [k]
_merge_a_into_b(v, b[k], stack=stack_push)
except BaseException:
raise
else:
b[k] = v
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to AttrDict objects
if isinstance(v, dict):
return AttrDict(v)
# All remaining processing is only applied to strings
if not isinstance(v, str):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
"""Checks that `value_a`, which is intended to replace `value_b` is of the
right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
# The types must match (with some exceptions)
type_b = type(value_b)
type_a = type(value_a)
if type_a is type_b:
return value_a
# Exceptions: numpy arrays, strings, tuple<->list
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, str):
value_a = str(value_a)
elif isinstance(value_a, tuple) and isinstance(value_b, list):
value_a = list(value_a)
elif isinstance(value_a, list) and isinstance(value_b, tuple):
value_a = tuple(value_a)
else:
raise ValueError(
'Type mismatch ({} vs. {}) with values ({} vs. {}) for config '
'key: {}'.format(type_b, type_a, value_b, value_a, full_key)
)
return value_a
def _key_is_deprecated(full_key):
if full_key in _DEPCRECATED_KEYS:
return True
return False
def _key_is_renamed(full_key):
return full_key in _RENAMED_KEYS
def _raise_key_rename_error(full_key):
new_key = _RENAMED_KEYS[full_key]
if isinstance(new_key, tuple):
msg = ' Note: ' + new_key[1]
new_key = new_key[0]
else:
msg = ''
raise KeyError(
'Key {} was renamed to {}; please update your config.{}'.
format(full_key, new_key, msg)
)
|
nilq/baby-python
|
python
|
load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
def rules_clojure_dependencies():
jvm_maven_import_external(
name = "org_clojure",
artifact = "org.clojure:clojure:1.10.1",
artifact_sha256 = "d4f6f991fd9ed2a59e7ea4779010b3b069a2b905f3463136c42201106b4ad21a",
server_urls = ["https://repo1.maven.org/maven2/"],
)
jvm_maven_import_external(
name = "org_clojure_spec_alpha",
artifact = "org.clojure:spec.alpha:0.2.176",
artifact_sha256 = "fc4e96ecff34ddd2ab7fd050e74ae1379342ee09daa6028da52024c5de836cc4",
server_urls = ["https://repo1.maven.org/maven2/"],
)
jvm_maven_import_external(
name = "org_clojure_core_specs_alpha",
artifact = "org.clojure:core.specs.alpha:0.2.44",
artifact_sha256 = "3b1ec4d6f0e8e41bf76842709083beb3b56adf3c82f9a4f174c3da74774b381c",
server_urls = ["https://repo1.maven.org/maven2/"],
)
def rules_clojure_toolchains():
native.register_toolchains("@rules_clojure//:clojure_toolchain")
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class ApacheKafkaConfig(AppConfig):
name = 'apache_kafka'
|
nilq/baby-python
|
python
|
import click
from neobox.cmd.list import list_
from neobox.cmd.clear_cache import clear_cache
from neobox.cmd.login import login
from neobox.cmd.logout import logout
from neobox.cmd.search import search
from neobox.cmd.play import play
from neobox.cmd.pause import pause
from neobox.cmd.stop import stop
@click.group()
def neobox():
""" neobox 是一个网易云音乐的命令行客户端
"""
pass
neobox.add_command(login)
neobox.add_command(logout)
neobox.add_command(list_)
neobox.add_command(search)
neobox.add_command(play)
neobox.add_command(pause)
neobox.add_command(stop)
neobox.add_command(clear_cache)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author : yasin
# @time : 18-12-28 下午9:28
# @File : config.py
import logging.config
logging.config.fileConfig("logger.conf")
logger = logging.getLogger("novel-update-monitor-account")
bindIp = '0.0.0.0'
bindPort = 12126
token = 'VqmB965wOEBrPLNoMkHCfIOpxF0WWFM6'
getTokenUlr = 'https://gitlab.net.cn/wechat/token?type=access_token&secret=c4ca4238a0b923820dcc509a6f75849b'
baseNotifyUrl = 'https://api.weixin.qq.com/cgi-bin/message/template/send?access_token='
slientMode = True
slientModeStartTime = 23
slientModeEndTime = 7
notificationQueue = []
notice = {
"touser": "oQHU46Djs5O3yhsTmYGvDz_Hi0vo",
"template_id": "oKa0UsZ6xvSlnFChlGGdMMH1O_yq2l91G-sIQPRg2BI",
"url": "",
"topcolor": "#FF0000",
"data": {
"first": {
"value": "您订阅的小说更新啦!",
"color": "#173177"
},
"novelName": {
"value": "",
"color": "#173177"
},
"sectionName": {
"value": "",
"color": "#173177"
},
"updateTime": {
"value": "",
"color": "#173177"
},
"remark": {
"value": "点击详情立刻阅读最新章节↓↓↓",
"color": "#173177"
}
}
}
|
nilq/baby-python
|
python
|
"""User profile model"""
# Django
from django.db import models
# Utilities
from mydea.utils.models import MyDeaModel
class Profile(MyDeaModel):
"""Profile model.
A profile holds a user's data"""
# user
user = models.OneToOneField('users.User', on_delete=models.CASCADE)
def __str__(self):
"""Return user's str representation."""
return str(self.user)
|
nilq/baby-python
|
python
|
#----------------------------------------------------------------------
# Deep learning for classification for contrast CT;
# Transfer learning using Google Inception V3;
#-------------------------------------------------------------------------------------------
import os
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
import glob
import tensorflow
from tensorflow import keras
from tensorflow.keras import Input
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import img_to_array, load_img, ImageDataGenerator
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications import ResNet152
from tensorflow.keras.applications import ResNet101
from tensorflow.keras.applications import ResNet50V2
from tensorflow.keras.applications import ResNet101V2
from tensorflow.keras.applications import ResNet152V2
# ----------------------------------------------------------------------------------
# transfer learning CNN model
# ----------------------------------------------------------------------------------
def TLNet(resnet, input_shape, activation):
### determine ResNet base model
if resnet == 'ResNet50V2':
base_model = ResNet50V2(
weights='imagenet',
include_top=False,
input_shape=input_shape,
pooling=None
)
elif resnet == 'ResNet101V2':
base_model = ResNet101V2(
weights='imagenet',
include_top=False,
input_shape=input_shape,
pooling=None
)
elif resnet == 'ResNet152V2':
base_model = ResNet152V2(
weights='imagenet',
include_top=False,
input_shape=input_shape,
pooling=None
)
base_model.trainable = False
### create top model
inputs = Input(shape=input_shape)
x = base_model(inputs, training=False)
x = GlobalAveragePooling2D()(x)
x = Dense(1000, activation='relu')(x)
#x = Dense(1024, activation='relu')(x)
#x = Dense(512, activation='relu')(x)
outputs = Dense(1, activation=activation)(x)
model = Model(inputs, outputs)
return model
|
nilq/baby-python
|
python
|
import os
from time import time
import krgram.tl.protocol
import krgram.tl.protocol.auth
from krgram.client.crypto import TLEncryptor
from krgram.client.errors import SecurityError
from krgram.mtproto.connection import MTProtoAbridgedConnection
from krgram.mtproto.dcs import DataCenters
from krgram.mtproto.errors import UnexpectedResponseError
from krgram.mtproto.message import PlainMsg
from krgram.mtproto.msg_extra import MsgId
from krgram.mtproto.servers_pk import TelegramServersPublicKeys
from krgram.tl.base import *
from krgram.tl.core_types.native_extends import TL_int128, TL_int256
from krgram.tl.stream import TLBytesStream
from krgram.utils.cryptohash import Hash, Crypto
from krgram.utils.math import factorize
class AuthKey:
def __init__(self, data):
self.data = data
self.key_id = None
self._calculate_id()
def get_id(self):
return self.key_id
def _calculate_id(self):
if self.data is not None:
self.key_id = Hash.sha1(self.data)[-8:]
else:
self.key_id = Bytes('\x00'*8)
class Authorizer:
_NULL_AUTH_KEY = AuthKey(None)
def __init__(self, dc, test_mode=False, connection=None ):
self.dc = dc
self.test_mode = test_mode
self._connection = connection
self._server_salt = None
self._server_time_diff = -1
self._auth_key = None
def get_auth_key(self):
return self._auth_key
def get_server_salt(self):
return self._server_salt
def get_server_time_diff(self):
return self._server_time_diff
def run(self):
conn = self._connection
autoclose = False
if conn is None:
conn = self._init_connection(self.dc, self.test_mode)
autoclose = True
raw_nonce = os.urandom(16)
obj = krgram.tl.protocol.auth.req_pq(nonce=raw_nonce)
nonce = obj.nonce#["nonce"]
resp_tl_obj = self._send_plain_req(conn, obj)
#resp_tl_obj = msg.get_content()
if resp_tl_obj.ID != krgram.tl.protocol.auth.resPQ.ID:
raise UnexpectedResponseError("Expected a resPQ object")
if resp_tl_obj.nonce != obj.nonce:
raise SecurityError("nonce != (server)nonce")
server_nonce, pub_srvs_fingerprints = resp_tl_obj.server_nonce, resp_tl_obj.server_public_key_fingerprints
# check fingerprint
curr_key = None
for f in pub_srvs_fingerprints:
cpk = TelegramServersPublicKeys().get_key_by_fingerprint(f)
if cpk is not None:
curr_key = cpk
break
# compute p and q
pq = resp_tl_obj.pq#["pq"]
p, q = factorize(pq)
p, q = (p, q) if p < q else (q, p)
new_nonce = os.urandom(32)
obj = krgram.tl.protocol.auth.p_q_inner_data(p=p,
q=q,
pq=pq,
server_nonce=server_nonce,
nonce=nonce,
new_nonce=new_nonce)
pq_inner_data_serialized = obj.serialize()
data_with_hash = Hash.sha1(pq_inner_data_serialized) + pq_inner_data_serialized
if len(data_with_hash) < 255:
data_with_hash += Bytes('\0' * (255 - len(data_with_hash)))
# encrypt with rsa and send data
enc_data = Crypto.rsa_encrypt(data_with_hash, curr_key)
obj = krgram.tl.protocol.auth.req_DH_params(nonce=nonce,
p=p,
q=q,
server_nonce=server_nonce,
public_key_fingerprint=curr_key.fingerprint,
encrypted_data=enc_data)
resp_tl_obj = self._send_plain_req(conn, obj)
#resp_tl_obj = msg.get_content()
if resp_tl_obj.nonce != nonce or resp_tl_obj.server_nonce != server_nonce:
raise SecurityError()
server_nonce_raw = TL_int128(server_nonce).serialize()
new_nonce_raw = TL_int256(new_nonce).serialize()
server_salt = new_nonce_raw[:8] ^ server_nonce_raw[:8]
server_dh = TLEncryptor.decrypt_server_dh(new_nonce_raw, server_nonce_raw, resp_tl_obj.encrypted_answer)
# TODO: check hash
# answer_hash = server_dh.data_hash
answer = server_dh.data
id_answer_class = Bytes(answer[:4]).to_int(False, False)
register_class = TLRegister.get_func_type(id_answer_class)
if register_class is None or register_class.ID != krgram.tl.protocol.auth.server_DH_inner_data.ID:
raise UnexpectedResponseError("Unexpected response type from server")
tlstream = TLBytesStream(answer)
#tlstream.write(answer[4:])
resp_tl_obj = krgram.tl.protocol.auth.server_DH_inner_data().deserialize_from(tlstream)
g_a = resp_tl_obj.g_a
server_time_diff = resp_tl_obj.server_time - int(time())
b_raw = os.urandom(256)
b = Bytes(b_raw).to_int()
g = resp_tl_obj.g
dh_prime = resp_tl_obj.dh_prime
g_b = pow(g, b, dh_prime)
retry_id = 0
data = krgram.tl.protocol.auth.client_DH_inner_data(nonce=nonce,
server_nonce=server_nonce,
retry_id=retry_id,
g_b=g_b).serialize()
enc_data = TLEncryptor.encrypt_client_dh(data, server_dh.aes_key_iv)
obj = krgram.tl.protocol.auth.set_client_DH_params(nonce=nonce,
server_nonce=server_nonce,
encrypted_data=enc_data)
resp_tl_obj = self._send_plain_req(conn, obj)
#resp_tl_obj = msg.get_content()
if resp_tl_obj.ID != krgram.tl.protocol.auth.dh_gen_ok.ID:
raise Exception("DH generation not succesfull")
auth_key = pow(g_a, b, dh_prime)
auth_key = Bytes.from_int(auth_key, 256)
if autoclose:
conn.close()
auth_key = AuthKey(auth_key)
self._server_salt = TLBaseSerializer.deserialize_long(server_salt)
self._auth_key = auth_key
self._server_time_diff = server_time_diff
def _init_connection(self, dc, test_mode):
if dc is None:
dc = DataCenters.get_default().get_datacenter(1)
conn = MTProtoAbridgedConnection()
ip = dc.production_ip if not test_mode else dc.test_ip
try:
conn.open( ip, 443 )
return conn
except:
raise Exception("Cannot open a connection on %s:%d" %(ip, 443))
def _send_plain_req(self, conn, req):
if not isinstance(req, TLFunction):
raise TypeError("obj must be an TLFunction instance")
msg_id = MsgId()()
raw_msg = PlainMsg(msg_id, req)
conn.send_message( raw_msg )
resp_msg = PlainMsg(0, None)
conn.read_message_to(resp_msg)
return resp_msg.content
|
nilq/baby-python
|
python
|
import sqlalchemy
from pydantic import BaseModel
from .model import Base
class Category(BaseModel):
""""""
name: str
color: str
class CategoryTable(Base):
__tablename__ = "category"
name = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
color = sqlalchemy.Column(sqlalchemy.String)
def __init__(self, category: Category):
self.name = category.name
self.color = category.color
def __repr__(self):
return f"Category<{self.name}, {self.color}>"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import sys
import gzip
import ast
import json
class Dumper:
def __init__(self, path):
self.path = path
self.open_file = {}
def close_all(self):
#close previous file(s)
for name in self.open_file:
self.open_file[name].close()
self.open_file = {}
def dump(self, filename, data):
if filename not in self.open_file:
self.close_all()
f = gzip.open(self.path+filename+'.jsonl.gz', 'wt')
self.open_file[filename] = f
else:
f = self.open_file[filename]
f.write(data+'\n')
def readlineq(self, f):
# Read line and quit if no more data
line = f.readline()
if line == '':
self.close_all()
sys.exit(0)
else:
return line
dp = Dumper(sys.argv[2])
def get_value_tuples(line):
values = line.partition(' VALUES ')[-1].strip().replace('NULL', 'None')
if values[-1] == ';':
values = values[:-1]
return ast.literal_eval(values)
def generate_json_line(columns, data, noiter=False):
jl = {}
if noiter:
jl[columns[0]] = data
else:
for i in range(len(columns)):
jl[columns[i]] = data[i]
return json.dumps(jl, ensure_ascii=False)
with gzip.open(sys.argv[1], 'rt') as f:
# look for the beginning of the table definition
while True:
while True:
line = dp.readlineq(f)
if line.startswith('CREATE TABLE'): break #untill
table = line.split('`')[1] # name of the table
# get names and types of columns
columns = []
while True:
line = dp.readlineq(f)
if line.startswith(' `'):
columns.append(line.split('`')[1]) # = line.split('`')[2].split(' ')[1]
else: break
# look for the beginning of the data
while True:
line = dp.readlineq(f)
if line.startswith('INSERT INTO'): break
while line.startswith('INSERT INTO'):
if line.split('`')[1] == table: # check if the INSERT is for the correct table
data = get_value_tuples(line)
if isinstance(data, str) or isinstance(data, int) or isinstance(data, float):
# Case of a table with a single value
dp.dump(table, generate_json_line(columns, data, noiter=True))
else:
for i in data:
dp.dump(table, generate_json_line(columns, i))
line = dp.readlineq(f)
|
nilq/baby-python
|
python
|
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import gym
import numpy as np
from smarts.core.controllers import ActionSpaceType
# The space of the adapted action.
gym_space: gym.Space = gym.spaces.Box(
low=np.array([0.0, 0.0, -1.0]),
high=np.array([1.0, 1.0, 1.0]),
dtype=np.float32,
)
# This adapter reqiures SMARTS to ensure that the agent is provided a "continuous"
# controller, that is, a controller that allows for actions in the form of an array:
# [throttle, brake, steering].
required_interface = {"action": ActionSpaceType.Continuous}
def adapt(action: np.ndarray) -> np.ndarray:
"""Adapts a given action into an action that SMARTS can understand for a continuous
controller. This adapter expects that the action is already a valid continuous
controller action.
Args:
action (numpy.ndarray): The action to adapt. The action should be in the form of
[throttle, brake, steering] where each element is a float. The throttle
element is in the range [0, 1], the brake element is in the range [0, 1] and
the steering element is in the range [-1, 1].
Returns:
np.ndarray: The same action that was passed in.
"""
return action
|
nilq/baby-python
|
python
|
import os
class Config:
title = 'VocView'
# URL root of this web application. This gets set in the before_first_request function.
url_root = None
# Subdirectory of base URL. Example, the '/corveg' part of 'vocabs.tern.org.au/corveg'
SUB_URL = ''
# Path of the application's directory.
APP_DIR = os.path.dirname(os.path.realpath(__file__))
# Vocabulary sources config. file.
VOCAB_SOURCES = 'vocabs.yaml'
# Rule-based reasoner
reasoner = False
# -- Triplestore ---------------------------------------------------------------------------------------------------
#
# Options:
#
# - memory
# - No persistence, load in triples on instance start-up (slow start-up time). Graph is required to be kept in
# memory during application's lifetime. Not recommended due to slow start-up.
# - Difficulty: easy
#
# - pickle
# - Persistent store by saving a binary (pickle) copy of the Python rdflib.Graph object to disk. Graph is
# required to be in memory during application's lifetime. Fast start-up time and fast performance, uses
# significantly more memory than Sleepycat. Exact same as the memory method except it persists between
# application restarts.
# - Difficulty: easy
#
# - sleepycat
# - Persistent store by storing the triples in the now defunct Sleepycat's Berkeley DB store. Requires external
# libraries to be installed on the system before using. Does not require to have the whole triplestore in
# memory. Performance is slightly slower than the pickle method (maybe around 10-20%) but uses much less memory.
# For each request, only the required triples are loaded into the application's memory.
# - Difficulty: intermediate
triplestore_type = 'pickle'
# The time which the persistent store is valid before re-harvesting from its sources
store_hours = 0
store_minutes = 10
# Triplestore disk path
_triplestore_name_pickle = 'triplestore.p'
triplestore_path_pickle = os.path.join(APP_DIR, _triplestore_name_pickle)
_triplestore_name_sleepy_cat = 'triplestore'
triplestore_path_sleepy_cat = os.path.join(APP_DIR, _triplestore_name_sleepy_cat)
|
nilq/baby-python
|
python
|
list1=['car','ara','cabc']
c=0
for i in list1:
a=list1
if(
c=c+1
print(c)
|
nilq/baby-python
|
python
|
import pyeccodes.accessors as _
def load(h):
h.add(_.Unsigned('n2', 2))
h.add(_.Unsigned('n3', 2))
h.add(_.Unsigned('nd', 3))
h.alias('numberOfDiamonds', 'nd')
h.alias('Nj', 'nd')
h.add(_.Unsigned('Ni', 3))
h.add(_.Codeflag('numberingOrderOfDiamonds', 1, "grib1/grid.192.78.3.9.table"))
h.add(_.Signed('latitudeOfIcosahedronPole', 4))
h.add(_.Unsigned('longitudeOfIcosahedronPole', 4))
h.add(_.Unsigned('longitudeOfFirstDiamondCenterLine', 4))
h.add(_.Unsigned('reservedOctet', 1))
h.add(_.Codeflag('scanningModeForOneDiamond', 1, "grib1/grid.192.78.3.10.table"))
h.add(_.Transient('numberOfPoints', ((_.Get('nd') * (_.Get('Ni') + 1)) * (_.Get('Ni') + 1))))
h.alias('numberOfDataPoints', 'numberOfPoints')
h.add(_.Number_of_values('numberOfValues', _.Get('values'), _.Get('bitsPerValue'), _.Get('numberOfDataPoints'), _.Get('bitmapPresent'), _.Get('bitmap'), _.Get('numberOfCodedValues')))
|
nilq/baby-python
|
python
|
import struct
from io import BytesIO
p8 = lambda x:struct.pack("<B", x)
u8 = lambda x:struct.unpack("<B", x)[0]
p16 = lambda x:struct.pack("<H", x)
u16 = lambda x:struct.unpack("<H", x)[0]
p32 = lambda x:struct.pack("<I", x)
u32 = lambda x:struct.unpack("<I", x)[0]
p64 = lambda x:struct.pack("<Q", x)
u64 = lambda x:struct.unpack("<Q", x)[0]
def align(addr, alignment=0x1000):
mask = ((1<<64)-1) & -alignment
return (addr + (alignment-1)) & mask
def struct2str(s):
return BytesIO(s).read()
|
nilq/baby-python
|
python
|
from sacnn.core.we import get_word_to_vector
word_to_vector, WORD_DIMENSION = get_word_to_vector()
|
nilq/baby-python
|
python
|
from __future__ import annotations
import collections
import copy
import json
import logging
import operator
import os
from typing import Any, Dict, List, Optional, Tuple
import joblib
from poker_ai import utils
from poker_ai.poker.card import Card
from poker_ai.poker.engine import PokerEngine
from poker_ai.games.short_deck.player import ShortDeckPokerPlayer
from poker_ai.poker.pot import Pot
from poker_ai.poker.table import PokerTable
logger = logging.getLogger("poker_ai.games.short_deck.state")
InfoSetLookupTable = Dict[str, Dict[Tuple[int, ...], str]]
def new_game(
n_players: int, card_info_lut: InfoSetLookupTable = {}, **kwargs
) -> ShortDeckPokerState:
"""
Create a new game of short deck poker.
...
Parameters
----------
n_players : int
Number of players.
card_info_lut : InfoSetLookupTable
Card information cluster lookup table.
Returns
-------
state : ShortDeckPokerState
Current state of the game
"""
pot = Pot()
players = [
ShortDeckPokerPlayer(player_i=player_i, initial_chips=10000, pot=pot)
for player_i in range(n_players)
]
if card_info_lut:
# Don't reload massive files, it takes ages.
state = ShortDeckPokerState(
players=players,
load_card_lut=False,
**kwargs
)
state.card_info_lut = card_info_lut
else:
# Load massive files.
state = ShortDeckPokerState(
players=players,
**kwargs
)
return state
class ShortDeckPokerState:
"""The state of a Short Deck Poker game at some given point in time.
The class is immutable and new state can be instanciated from once an
action is applied via the `ShortDeckPokerState.new_state` method.
"""
def __init__(
self,
players: List[ShortDeckPokerPlayer],
small_blind: int = 50,
big_blind: int = 100,
lut_path: str = ".",
pickle_dir: bool = False,
load_card_lut: bool = True,
):
"""Initialise state."""
n_players = len(players)
if n_players <= 1:
raise ValueError(
f"At least 2 players must be provided but only {n_players} "
f"were provided."
)
self._pickle_dir = pickle_dir
if load_card_lut:
self.card_info_lut = self.load_card_lut(lut_path, self._pickle_dir)
else:
self.card_info_lut = {}
# Get a reference of the pot from the first player.
self._table = PokerTable(
players=players, pot=players[0].pot, include_ranks=[10, 11, 12, 13, 14]
)
# Get a reference of the initial number of chips for the payout.
self._initial_n_chips = players[0].n_chips
self.small_blind = small_blind
self.big_blind = big_blind
self._poker_engine = PokerEngine(
table=self._table, small_blind=small_blind, big_blind=big_blind
)
# Reset the pot, assign betting order to players (might need to remove
# this), assign blinds to the players.
self._poker_engine.round_setup()
# Deal private cards to players.
self._table.dealer.deal_private_cards(self._table.players)
# Store the actions as they come in here.
self._history: Dict[str, List[str]] = collections.defaultdict(list)
self._betting_stage = "pre_flop"
self._betting_stage_to_round: Dict[str, int] = {
"pre_flop": 0,
"flop": 1,
"turn": 2,
"river": 3,
"show_down": 4,
}
# Rotate the big and small blind to the final positions for the pre
# flop round only.
player_i_order: List[int] = [p_i for p_i in range(n_players)]
self.players[0].is_small_blind = True
self.players[1].is_big_blind = True
self.players[-1].is_dealer = True
self._player_i_lut: Dict[str, List[int]] = {
"pre_flop": player_i_order[2:] + player_i_order[:2],
"flop": player_i_order,
"turn": player_i_order,
"river": player_i_order,
"show_down": player_i_order,
"terminal": player_i_order,
}
self._skip_counter = 0
self._first_move_of_current_round = True
self._reset_betting_round_state()
for player in self.players:
player.is_turn = False
self.current_player.is_turn = True
def __repr__(self):
"""Return a helpful description of object in strings and debugger."""
return f"<ShortDeckPokerState player_i={self.player_i} betting_stage={self._betting_stage}>"
def apply_action(self, action_str: Optional[str]) -> ShortDeckPokerState:
"""Create a new state after applying an action.
Parameters
----------
action_str : str or None
The description of the action the current player is making. Can be
any of {"fold, "call", "raise"}, the latter two only being possible
if the agent hasn't folded already.
Returns
-------
new_state : ShortDeckPokerState
A poker state instance that represents the game in the next
timestep, after the action has been applied.
"""
if action_str not in self.legal_actions:
raise ValueError(
f"Action '{action_str}' not in legal actions: " f"{self.legal_actions}"
)
# Deep copy the parts of state that are needed that must be immutable
# from state to state.
lut = self.card_info_lut
self.card_info_lut = {}
new_state = copy.deepcopy(self)
new_state.card_info_lut = self.card_info_lut = lut
# An action has been made, so alas we are not in the first move of the
# current betting round.
new_state._first_move_of_current_round = False
if action_str is None:
# Assert active player has folded already.
assert (
not new_state.current_player.is_active
), "Active player cannot do nothing!"
elif action_str == "call":
action = new_state.current_player.call(players=new_state.players)
logger.debug("calling")
elif action_str == "fold":
action = new_state.current_player.fold()
elif action_str == "raise":
bet_n_chips = new_state.big_blind
if new_state._betting_stage in {"turn", "river"}:
bet_n_chips *= 2
biggest_bet = max(p.n_bet_chips for p in new_state.players)
n_chips_to_call = biggest_bet - new_state.current_player.n_bet_chips
raise_n_chips = bet_n_chips + n_chips_to_call
logger.debug(f"betting {raise_n_chips} n chips")
action = new_state.current_player.raise_to(n_chips=raise_n_chips)
new_state._n_raises += 1
else:
raise ValueError(
f"Expected action to be derived from class Action, but found "
f"type {type(action)}."
)
# Update the new state.
skip_actions = ["skip" for _ in range(new_state._skip_counter)]
new_state._history[new_state.betting_stage] += skip_actions
new_state._history[new_state.betting_stage].append(str(action))
new_state._n_actions += 1
new_state._skip_counter = 0
# Player has made move, increment the player that is next.
while True:
new_state._move_to_next_player()
# If we have finished betting, (i.e: All players have put the
# same amount of chips in), then increment the stage of
# betting.
finished_betting = not new_state._poker_engine.more_betting_needed
if finished_betting and new_state.all_players_have_actioned:
# We have done atleast one full round of betting, increment
# stage of the game.
new_state._increment_stage()
new_state._reset_betting_round_state()
new_state._first_move_of_current_round = True
if not new_state.current_player.is_active:
new_state._skip_counter += 1
assert not new_state.current_player.is_active
elif new_state.current_player.is_active:
if new_state._poker_engine.n_players_with_moves == 1:
# No players left.
new_state._betting_stage = "terminal"
if not new_state._table.community_cards:
new_state._poker_engine.table.dealer.deal_flop(new_state._table)
# Now check if the game is terminal.
if new_state._betting_stage in {"terminal", "show_down"}:
# Distribute winnings.
new_state._poker_engine.compute_winners()
break
for player in new_state.players:
player.is_turn = False
new_state.current_player.is_turn = True
return new_state
@staticmethod
def load_card_lut(
lut_path: str = ".",
pickle_dir: bool = False
) -> Dict[str, Dict[Tuple[int, ...], str]]:
"""
Load card information lookup table.
...
Parameters
----------
lut_path : str
Path to lookupkup table.
pickle_dir : bool
Whether the lut_path is a path to pickle files or not. Pickle files
are deprecated for the lut.
Returns
-------
cad_info_lut : InfoSetLookupTable
Card information cluster lookup table.
"""
if pickle_dir:
logger.info("Loading card information lut in deprecated way")
file_names = [
"preflop_lossless.pkl",
"flop_lossy_2.pkl",
"turn_lossy_2.pkl",
"river_lossy_2.pkl",
]
betting_stages = ["pre_flop", "flop", "turn", "river"]
card_info_lut: Dict[str, Dict[Tuple[int, ...], str]] = {}
for file_name, betting_stage in zip(file_names, betting_stages):
file_path = os.path.join(lut_path, file_name)
if not os.path.isfile(file_path):
raise ValueError(
f"File path not found {file_path}. Ensure lut_path is "
f"set to directory containing pickle files"
)
with open(file_path, "rb") as fp:
card_info_lut[betting_stage] = joblib.load(fp)
elif lut_path:
logger.info(f"Loading card from single file at path: {lut_path}")
card_info_lut = joblib.load(lut_path + '/card_info_lut.joblib')
else:
card_info_lut = {}
return card_info_lut
def _move_to_next_player(self):
"""Ensure state points to next valid active player."""
self._player_i_index += 1
if self._player_i_index >= len(self.players):
self._player_i_index = 0
def _reset_betting_round_state(self):
"""Reset the state related to counting types of actions."""
self._all_players_have_made_action = False
self._n_actions = 0
self._n_raises = 0
self._player_i_index = 0
self._n_players_started_round = self._poker_engine.n_active_players
while not self.current_player.is_active:
self._skip_counter += 1
self._player_i_index += 1
def _increment_stage(self):
"""Once betting has finished, increment the stage of the poker game."""
# Progress the stage of the game.
if self._betting_stage == "pre_flop":
# Progress from private cards to the flop.
self._betting_stage = "flop"
self._poker_engine.table.dealer.deal_flop(self._table)
elif self._betting_stage == "flop":
# Progress from flop to turn.
self._betting_stage = "turn"
self._poker_engine.table.dealer.deal_turn(self._table)
elif self._betting_stage == "turn":
# Progress from turn to river.
self._betting_stage = "river"
self._poker_engine.table.dealer.deal_river(self._table)
elif self._betting_stage == "river":
# Progress to the showdown.
self._betting_stage = "show_down"
elif self._betting_stage in {"show_down", "terminal"}:
pass
else:
raise ValueError(f"Unknown betting_stage: {self._betting_stage}")
@property
def community_cards(self) -> List[Card]:
"""Return all shared/public cards."""
return self._table.community_cards
@property
def private_hands(self) -> Dict[ShortDeckPokerPlayer, List[Card]]:
"""Return all private hands."""
return {p: p.cards for p in self.players}
@property
def initial_regret(self) -> Dict[str, float]:
"""Returns the default regret for this state."""
return {action: 0 for action in self.legal_actions}
@property
def initial_strategy(self) -> Dict[str, float]:
"""Returns the default strategy for this state."""
return {action: 0 for action in self.legal_actions}
@property
def betting_stage(self) -> str:
"""Return betting stage."""
return self._betting_stage
@property
def all_players_have_actioned(self) -> bool:
"""Return whether all players have made atleast one action."""
return self._n_actions >= self._n_players_started_round
@property
def n_players_started_round(self) -> bool:
"""Return n_players that started the round."""
return self._n_players_started_round
@property
def player_i(self) -> int:
"""Get the index of the players turn it is."""
return self._player_i_lut[self._betting_stage][self._player_i_index]
@player_i.setter
def player_i(self, _: Any):
"""Raise an error if player_i is set."""
raise ValueError(f"The player_i property should not be set.")
@property
def betting_round(self) -> int:
"""Betting stagee in integer form."""
try:
betting_round = self._betting_stage_to_round[self._betting_stage]
except KeyError:
raise ValueError(
f"Attemped to get betting round for stage "
f"{self._betting_stage} but was not supported in the lut with "
f"keys: {list(self._betting_stage_to_round.keys())}"
)
return betting_round
@property
def info_set(self) -> str:
"""Get the information set for the current player."""
cards = sorted(
self.current_player.cards,
key=operator.attrgetter("eval_card"),
reverse=True,
)
cards += sorted(
self._table.community_cards,
key=operator.attrgetter("eval_card"),
reverse=True,
)
if self._pickle_dir:
lookup_cards = tuple([card.eval_card for card in cards])
else:
lookup_cards = tuple(cards)
try:
cards_cluster = self.card_info_lut[self._betting_stage][lookup_cards]
except KeyError:
if self.betting_stage not in {"terminal", "show_down"}:
raise ValueError("You should have these cards in your lut.")
return "default info set, please ensure you load it correctly"
# Convert history from a dict of lists to a list of dicts as I'm
# paranoid about JSON's lack of care with insertion order.
info_set_dict = {
"cards_cluster": cards_cluster,
"history": [
{betting_stage: [str(action) for action in actions]}
for betting_stage, actions in self._history.items()
],
}
return json.dumps(
info_set_dict, separators=(",", ":"), cls=utils.io.NumpyJSONEncoder
)
@property
def payout(self) -> Dict[int, int]:
"""Return player index to payout number of chips dictionary."""
n_chips_delta = dict()
for player_i, player in enumerate(self.players):
n_chips_delta[player_i] = player.n_chips - self._initial_n_chips
return n_chips_delta
@property
def is_terminal(self) -> bool:
"""Returns whether this state is terminal or not.
The state is terminal once all rounds of betting are complete and we
are at the show down stage of the game or if all players have folded.
"""
return self._betting_stage in {"show_down", "terminal"}
@property
def players(self) -> List[ShortDeckPokerPlayer]:
"""Returns players in table."""
return self._table.players
@property
def current_player(self) -> ShortDeckPokerPlayer:
"""Returns a reference to player that makes a move for this state."""
return self._table.players[self.player_i]
@property
def legal_actions(self) -> List[Optional[str]]:
"""Return the actions that are legal for this game state."""
actions: List[Optional[str]] = []
if self.current_player.is_active:
actions += ["fold", "call"]
if self._n_raises < 3:
# In limit hold'em we can only bet/raise if there have been
# less than three raises in this round of betting, or if there
# are two players playing.
actions += ["raise"]
else:
actions += [None]
return actions
|
nilq/baby-python
|
python
|
import json
class SwearWords(object):
def __init__(self):
self.data = json.load(open('data.json'))
def filter_words(self,text,symbol="*"):
text = text.split()
print(text)
for i in range(len(text)):
if text[i] in self.data['word']:
text[i] = symbol * len(text[i])
return " ".join(text)
|
nilq/baby-python
|
python
|
import RPi.GPIO as GPIO
led_pin = 29
button_pin = 40
buzzer_pin = 31
GPIO.setmode(GPIO.BOARD)
GPIO.setup(button_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(led_pin, GPIO.OUT)
GPIO.setup(buzzer_pin, GPIO.OUT)
while True:
if GPIO.input(button_pin) == GPIO.HIGH:
GPIO.output(led_pin, True)
GPIO.output(buzzer_pin, True)
else:
GPIO.output(led_pin, False)
GPIO.output(buzzer_pin, False)
|
nilq/baby-python
|
python
|
"""
MIT License
Copyright (c) 2021 isaa-ctaylor
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import discord
from discord.ext import commands
import asyncio
from typing import Union
class Economy(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def _register_member(self, member_id, starting_value):
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT eco_enabled FROM userdata WHERE user_id = $1", member_id)
if data:
if data[0]["eco_enabled"]:
raise NameError
else:
async with self.bot.db.pool.acquire() as con:
await con.execute("UPDATE userdata SET eco_enabled = $1, wallet = $2, bank = $2", True, starting_value)
else:
async with self.bot.db.pool.acquire() as con:
await con.execute("INSERT INTO userdata(user_id, wallet, bank, eco_enabled) values($1, $2, $2, $3) ON CONFLICT (user_id) DO UPDATE SET wallet = $2, bank = $1, eco_enabled = $3 WHERE userdata.user_id = $1", member_id, starting_value, True)
async with self.bot.db.pool.acquire() as con:
return (await con.fetch("SELECT wallet, bank, eco_enabled FROM userdata WHERE user_id = $1", member_id))[0]
async def _check_registered(self, member_id):
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT eco_enabled from userdata WHERE user_id = $1", member_id)
return bool(data and dict(data[0])["eco_enabled"])
@commands.command(name="register")
async def _register(self, ctx):
try:
await self._register_member(ctx.author.id, 100)
embed = discord.Embed(
title="Done!", description="I have set up a bank account for you!", colour=self.bot.good_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
except NameError:
embed = discord.Embed(
title="Error!", description="You already have an account!")
return await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="unregister")
async def _unregister(self, ctx):
if await self._check_registered(ctx.author.id):
async with self.bot.db.pool.acquire() as con:
await con.execute("UPDATE userdata SET eco_enabled = $1, wallet = $2, bank = $2 WHERE user_id = $3", False, 0, ctx.author.id)
embed = discord.Embed(
title="Done!", description=f"Sad to see you go! If you want to come back, use the `{ctx.prefix}register` command.", colour=self.bot.good_embed_colour)
else:
embed = discord.Embed(
title="Error!", description=f"You don't have an account! Use the `{ctx.prefix}register` command to make an account", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="balance", aliases=["bal"])
async def _balance(self, ctx, *, member: discord.Member = None):
member = member or ctx.author
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT wallet, bank, eco_enabled FROM userdata WHERE user_id = $1", member.id)
if data:
data = dict(data[0])
else:
if member.id != ctx.author.id:
embed = discord.Embed(
title="Error!", description="That person doesn't have an account!", colour=self.bot.bad_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
else:
data = dict(await self._register_member(member.id, 100))
embed = discord.Embed(
description=f"You didnt have an account, so I made one for you", colour=self.bot.good_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
if not data["eco_enabled"]:
if member.id != ctx.author.id:
embed = discord.Embed(
title="Error!", description="That person doesn't have an account!", colour=self.bot.bad_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
else:
data = dict(await self._register_member(member.id, 100))
embed = discord.Embed(
description=f"You didnt have an account, so I made one for you", colour=self.bot.good_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
desc_string = f"**`Wallet:`** {data['wallet'] or 0}\n**`Bank:`** {data['bank'] or '0'}\n**`Total:`** {(data['wallet'] or 0) + (data['bank'] or 0)}"
embed = discord.Embed(title=f"{member.name}'s balance",
description=desc_string, colour=self.bot.neutral_embed_colour)
embed.set_thumbnail(url=str(member.avatar.url))
await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="withdraw", aliases=["with"])
async def _withdraw(self, ctx, *, amount: Union[int, str]):
if await self._check_registered(ctx.author.id):
if isinstance(amount, str):
if amount.lower() == "all":
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
data = dict(data[0])
newwallet = data["wallet"] + data["bank"]
newbank = 0
await con.execute("UPDATE userdata SET wallet = $1, bank = $2", newwallet, newbank)
await self._balance(ctx)
else:
embed = discord.Embed(title="Error!", description=f"`{amount}` isn't an amount I can withdraw!", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
elif isinstance(amount, int):
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
data = dict(data[0])
if data["bank"] < amount:
embed = discord.Embed(title=f"Error!", description=f"You don't have enough coins to do that!", colour=self.bot.bad_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
else:
newbank = data["bank"] - amount
newwallet = data["wallet"] + amount
await con.execute("UPDATE userdata SET wallet = $1, bank = $2", newwallet, newbank)
await self._balance(ctx)
else:
embed = discord.Embed(title="Error!", description=f"You dont have an account! Use the `{ctx.prefix}register` command to make one!", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="deposit", aliases=["dep"])
async def _deposit(self, ctx, *, amount: Union[int, str]):
if await self._check_registered(ctx.author.id):
if isinstance(amount, str):
if amount.lower() == "all":
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
data = dict(data[0])
newwallet = 0
newbank = data["wallet"] + data["bank"]
await con.execute("UPDATE userdata SET wallet = $1, bank = $2", newwallet, newbank)
await self._balance(ctx)
else:
embed = discord.Embed(title="Error!", description=f"`{amount}` isn't an amount I can withdraw!", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
elif isinstance(amount, int):
async with self.bot.db.pool.acquire() as con:
data = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
data = dict(data[0])
if data["wallet"] < amount:
embed = discord.Embed(title=f"Error!", description=f"You don't have enough coins to do that!", colour=self.bot.bad_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
else:
newbank = data["bank"] + amount
newwallet = data["wallet"] - amount
await con.execute("UPDATE userdata SET wallet = $1, bank = $2", newwallet, newbank)
await self._balance(ctx)
else:
embed = discord.Embed(title="Error!", description=f"You dont have an account! Use the `{ctx.prefix}register` command to make one!", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="transfer", aliases=["pay"])
async def _transfer(self, ctx, member: discord.Member, amount: int):
if await self._check_registered(ctx.author.id) and await self._check_registered(member.id):
if ctx.author.id == member.id and ctx.author.id != self.bot.owner_id:
return await ctx.error("You can't give yourself money.", reply=True)
if amount < 0:
return await ctx.error("Invalid amount.", reply=True)
async with self.bot.db.pool.acquire() as con:
authordata = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
memberdata = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", member.id)
if authordata and memberdata:
authordata = dict(authordata[0])
memberdata = dict(memberdata[0])
if amount > authordata["wallet"] and ctx.author.id != self.bot.owner_id:
embed = discord.Embed(title="Error!", description="You don't have enough coins to do that!", colour=self.bot.bad_embed_colour)
else:
newauthorwallet = authordata["wallet"]
if ctx.author.id != self.bot.owner_id:
newauthorwallet = authordata["wallet"] - amount
newmemberwallet = memberdata["wallet"] + amount
await con.execute("UPDATE userdata SET wallet = $1 WHERE user_id = $2", newauthorwallet, ctx.author.id)
await con.execute("UPDATE userdata SET wallet = $1 WHERE user_id = $2", newmemberwallet, member.id)
embed = discord.Embed(title="Done!", description=f"You successfully payed {member.mention} `{amount}` coins!", colour=self.bot.good_embed_colour)
return await ctx.reply(embed=embed, mention_author=False)
else:
async with self.bot.db.pool.acquire() as con:
authordata = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", ctx.author.id)
memberdata = await con.fetch("SELECT wallet, bank FROM userdata WHERE user_id = $1", member.id)
if not authordata:
embed = discord.Embed(title="Error!", description="You do not have an account!", colour=self.bot.bad_embed_colour)
else:
embed = discord.Embed(title="Error!", description=f"{member.mention} does not have an account!", colour=self.bot.bad_embed_colour)
await ctx.reply(embed=embed, mention_author=False)
@commands.command(name="rob", aliases=["steal"])
async def _rob(self, ctx, member: discord.Member):
pass
def setup(bot):
bot.add_cog(Economy(bot))
|
nilq/baby-python
|
python
|
"""
The Importer feature sets up the ability to work with cuneiform text(s)
one-on-one, whether it is the Code of Hammurabi, a collection of texts such as
ARM01, or whatever your research desires.
This cdli_corpus module is for working with text files having already been read
by file_importer. The file_lines required by CDLICorpus are taken from prior
use of FileImport(text_file).read_file().
e.g.:
# FileImport takes a txt file and reads it; this becomes file_lines.
text_path = os.path.join('texts', 'ARM01_texts.txt')
f_i = FileImport(text_path)
f_i.read_file()
ARM01 = f_i.file_lines
# CDLICorpus takes file_lines and uses it to work:
cdli = CDLICorpus()
cdli.parse_file(ARM01)
cdli.print_catalog()
The output of CDLICorpus will be able to further utilized by the feature
ATFConverter and its subsequent classes: Tokenizer, ATFConverter, Lemmatizer,
and PPrint.
"""
import re
__author__ = ['Andrew Deloucas <ADeloucas@g.harvard.com>']
__license__ = 'MIT License. See LICENSE.'
class CDLICorpus(object):
"""
Takes file_lines, prepares and organizes data.
"""
def __init__(self):
"""
Empty.
"""
self.chunks = []
self.catalog = {}
def parse_file(self, file_lines):
"""
Parses lines of file into a dictionary of texts.
:param file_lines: file_importer.file_lines
:return: Each text as the form:
Pnum: {'metadata': List of lines of metadata,
'pnum': P-number,
'edition': Bibliographic edition,
'raw_text': Raw lines of ATF text,
'transliteration': lines of transliteration,
'normalization': lines of normalization (if present),
'translation': lines of translation (if present)}
"""
# separate the file into chunks of text
chunks, chunk = [], []
# check to see what format the corpus is in, we assume that the headers are the same for all
# texts in the file... (maybe not safe?)
if re.match('Primary publication:', file_lines[0]):
header = re.compile('Primary publication:')
else:
header = re.compile(r'&?P\d{6}')
for line in file_lines:
if header.match(line):
if len(chunk) > 0: # pylint: disable=len-as-condition
chunks.append(chunk)
chunk = [line]
else:
if len(line) > 0: # pylint: disable=len-as-condition
chunk.append(line)
chunks.append(chunk)
self.chunks = chunks
# create a rich catalog from the chunks
re_translit = re.compile(r'(\d+\'?\.) ?(.*)')
re_normaliz = re.compile(r'(#tr\.ts:) ?(.*)')
re_translat = re.compile(r'(#tr\.en:) ?(.*)')
for chunk in self.chunks:
text = chunk
if chunk[0].startswith('Primary publication:'):
# we've got full metadata, add additional parsing later
metadata = chunk[:25]
text = chunk[26:]
else: # no metadata
metadata = []
pnum = ''.join([c for c in text[0].split('=')[0] if c != '&']).rstrip()
edition = text[0].split('=')[1].lstrip()
text = text[3:]
translit = []
normaliz = []
translat = []
for line in text:
if re.match(r'\d+\'?\.', line):
translit.append(re_translit.match(line).groups()[1])
if line.startswith('#tr.ts:'):
normaliz.append(re_normaliz.match(line).groups()[1])
if line.startswith('#tr.en:'):
translat.append(re_translat.match(line).groups()[1])
self.catalog[pnum] = {'metadata': metadata,
'pnum': pnum,
'edition': edition,
'raw_text': text,
'transliteration': translit,
'normalization': normaliz,
'translation': translat}
def toc(self):
"""
Returns a rich list of texts in the catalog.
"""
return [
f"Pnum: {key}, Edition: {self.catalog[key]['edition']}, "
f"length: {len(self.catalog[key]['transliteration'])} line(s)"
for key in sorted(self.catalog.keys())]
def list_pnums(self):
"""
Lists all Pnums in the catalog.
"""
return sorted([key for key in self.catalog])
def list_editions(self):
"""
Lists all text editions in the catalog.
"""
return sorted([self.catalog[key]['edition'] for key in self.catalog])
def print_catalog(self, catalog_filter=[]):
"""
Prints out a catalog of all the texts in the corpus. Can be filtered by passing
a list of keys you want present in the texts.
:param: catalog_filter = If you wish to sort the list, use the keys pnum,
edition, metadata, transliteration, normalization, or translation.
"""
keys = sorted(self.catalog.keys())
if len(catalog_filter) > 0: # pylint: disable=len-as-condition
valid = []
for key in keys:
for f in catalog_filter:
if len(self.catalog[key][f]) > 0: # pylint: disable=len-as-condition
valid.append(key)
keys = valid
for key in keys:
print(f"Pnum: {self.catalog[key]['pnum']}")
print(f"Edition: {self.catalog[key]['edition']}")
print(f"Metadata: {len(self.catalog[key]['metadata']) > 0}")
print(f"Transliteration: {len(self.catalog[key]['transliteration']) > 0}")
print(f"Normalization: {len(self.catalog[key]['normalization']) > 0}")
print(f"Translation: {len(self.catalog[key]['translation']) > 0}")
print()
|
nilq/baby-python
|
python
|
# Copyright (c) 2016-2018, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
import asyncio
import pylru
from aiorpcx import run_in_thread
from electrumx.lib.hash import hash_to_hex_str
class ChainState(object):
'''Used as an interface by servers to request information about
blocks, transaction history, UTXOs and the mempool.
'''
def __init__(self, env, daemon, bp, notifications):
self._env = env
self._daemon = daemon
self._bp = bp
self._history_cache = pylru.lrucache(256)
# External interface pass-throughs for session.py
self.force_chain_reorg = self._bp.force_chain_reorg
self.tx_branch_and_root = self._bp.merkle.branch_and_root
self.read_headers = self._bp.read_headers
# Cache maintenance
notifications.add_callback(self._notify)
async def _notify(self, height, touched):
# Invalidate our history cache for touched hashXs
hc = self._history_cache
for hashX in set(hc).intersection(touched):
del hc[hashX]
async def broadcast_transaction(self, raw_tx):
return await self._daemon.sendrawtransaction([raw_tx])
async def daemon_request(self, method, args=()):
return await getattr(self._daemon, method)(*args)
def db_height(self):
return self._bp.db_height
def get_info(self):
'''Chain state info for LocalRPC and logs.'''
return {
'daemon': self._daemon.logged_url(),
'daemon_height': self._daemon.cached_height(),
'db_height': self.db_height(),
}
async def get_history(self, hashX):
'''Get history asynchronously to reduce latency.'''
def job():
# History DoS limit. Each element of history is about 99
# bytes when encoded as JSON. This limits resource usage
# on bloated history requests, and uses a smaller divisor
# so large requests are logged before refusing them.
limit = self._env.max_send // 97
return list(self._bp.get_history(hashX, limit=limit))
hc = self._history_cache
if hashX not in hc:
hc[hashX] = await run_in_thread(job)
return hc[hashX]
async def get_utxos(self, hashX):
'''Get UTXOs asynchronously to reduce latency.'''
def job():
return list(self._bp.get_utxos(hashX, limit=None))
return await run_in_thread(job)
def header_branch_and_root(self, length, height):
return self._bp.header_mc.branch_and_root(length, height)
def processing_new_block(self):
'''Return True if we're processing a new block.'''
return self._daemon.cached_height() > self.db_height()
def raw_header(self, height):
'''Return the binary header at the given height.'''
header, n = self._bp.read_headers(height, 1)
if n != 1:
raise IndexError(f'height {height:,d} out of range')
return header
def set_daemon_url(self, daemon_url):
self._daemon.set_urls(self._env.coin.daemon_urls(daemon_url))
return self._daemon.logged_url()
async def query(self, args, limit):
coin = self._env.coin
db = self._bp
lines = []
def arg_to_hashX(arg):
try:
script = bytes.fromhex(arg)
lines.append(f'Script: {arg}')
return coin.hashX_from_script(script)
except ValueError:
pass
try:
hashX = coin.address_to_hashX(arg)
lines.append(f'Address: {arg}')
return hashX
except Base58Error:
print(f'Ingoring unknown arg: {arg}')
return None
for arg in args:
hashX = arg_to_hashX(arg)
if not hashX:
continue
n = None
for n, (tx_hash, height) in enumerate(
db.get_history(hashX, limit), start=1):
lines.append(f'History #{n:,d}: height {height:,d} '
f'tx_hash {hash_to_hex_str(tx_hash)}')
if n is None:
lines.append('No history found')
n = None
for n, utxo in enumerate(db.get_utxos(hashX, limit), start=1):
lines.append(f'UTXO #{n:,d}: tx_hash '
f'{hash_to_hex_str(utxo.tx_hash)} '
f'tx_pos {utxo.tx_pos:,d} height '
f'{utxo.height:,d} value {utxo.value:,d}')
if n is None:
lines.append('No UTXOs found')
balance = db.get_balance(hashX)
lines.append(f'Balance: {coin.decimal_value(balance):,f} '
f'{coin.SHORTNAME}')
return lines
|
nilq/baby-python
|
python
|
import sys
from secret import FLAG, REGISTER, TAPS
assert FLAG.startswith('flag')
assert len(REGISTER) == 16
assert len(TAPS) == 5
class LFSR:
def __init__(self, register, taps):
self.register = register
self.taps = taps
def next(self):
new = 0
ret = self.register[0]
for i in self.taps:
new ^= self.register[i]
self.register = self.register[1:] + [new]
return ret
def encrypt():
enc_flag = []
for char in FLAG.encode():
enc_char = 0
for binary in '{:08b}'.format(char):
enc_char <<= 1
enc_char += (int(binary) ^ lfsr.next())
enc_flag.append(enc_char)
return bytes(enc_flag)
if __name__ == '__main__':
lfsr = LFSR(REGISTER, TAPS)
while True:
print('> flag')
print('> server.py')
print('> exit')
cmd = input('> Command: ')
if cmd == 'exit':
sys.exit()
elif cmd == 'flag':
print(encrypt().hex())
elif cmd == 'server.py':
print(open('./server.py', 'r').read())
else:
print('Bad hacker')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# External import
import pytest
from numpy import array, pi
from os.path import join
from multiprocessing import cpu_count
# Pyleecan import
from pyleecan.Classes.ImportGenVectLin import ImportGenVectLin
from pyleecan.Classes.ImportMatrixVal import ImportMatrixVal
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
from pyleecan.Classes.Output import Output
from pyleecan.Functions.load import load
from pyleecan.definitions import DATA_DIR
from Tests import save_validation_path as save_path
@pytest.mark.long
@pytest.mark.validation
@pytest.mark.FEMM
@pytest.mark.MeshSol
def test_Magnetic_FEMM_sym():
"""Validation of a polar SIPMSM with surface magnet
Linear lamination material
From publication
Lubin, S. Mezani, and A. Rezzoug,
“2-D Exact Analytical Model for Surface-Mounted Permanent-Magnet Motors with Semi-Closed Slots,”
IEEE Trans. Magn., vol. 47, no. 2, pp. 479–492, 2011.
Test compute the Flux in FEMM, with and without symmetry
and with MANATEE semi-analytical subdomain model
"""
SPMSM_003 = load(join(DATA_DIR, "Machine", "SPMSM_003.json"))
simu = Simu1(name="EM_SPMSM_FL_002", machine=SPMSM_003)
# Definition of the enforced output of the electrical module
N0 = 3000
Is = ImportMatrixVal(
value=array(
[
[6.97244193e-06, 2.25353053e02, -2.25353060e02],
[-2.60215295e02, 1.30107654e02, 1.30107642e02],
[-6.97244208e-06, -2.25353053e02, 2.25353060e02],
[2.60215295e02, -1.30107654e02, -1.30107642e02],
]
)
)
time = ImportGenVectLin(start=0, stop=0.015, num=4, endpoint=True)
Na_tot = 1024
simu.input = InputCurrent(
Is=Is,
Ir=None, # No winding on the rotor
N0=N0,
angle_rotor=None, # Will be computed
time=time,
Na_tot=Na_tot,
angle_rotor_initial=0.5216 + pi,
)
# Definition of the magnetic simulation (no symmetry)
simu.mag = MagFEMM(
type_BH_stator=2,
type_BH_rotor=2,
is_periodicity_a=False,
is_get_mesh=True,
nb_worker=cpu_count(),
)
simu.force = None
simu.struct = None
# Copy the simu and activate the symmetry
assert SPMSM_003.comp_periodicity() == (1, True, 1, True)
simu_sym = Simu1(init_dict=simu.as_dict())
simu_sym.mag.is_periodicity_a = True
out = Output(simu=simu_sym)
out.post.legend_name = "1/2 symmetry"
out.post.line_color = "r--"
simu_sym.run()
out.mag.meshsolution.plot_mesh(
save_path=join(save_path, "EM_SPMSM_FL_002_mesh.png"), is_show_fig=False
)
out.mag.meshsolution.plot_mesh(
group_names="stator core",
save_path=join(save_path, "EM_SPMSM_FL_002_mesh_stator.png"),
is_show_fig=False,
)
out.mag.meshsolution.plot_mesh(
group_names=["stator core", "/", "airgap", "stator winding"],
save_path=join(save_path, "EM_SPMSM_FL_002_mesh_stator_interface.png"),
is_show_fig=False,
)
out.mag.meshsolution.plot_contour(
label="\mu",
save_path=join(save_path, "EM_SPMSM_FL_002_mu.png"),
is_show_fig=False,
)
out.mag.meshsolution.plot_contour(
label="B", save_path=join(save_path, "EM_SPMSM_FL_002_B.png"), is_show_fig=False
)
out.mag.meshsolution.plot_contour(
label="H", save_path=join(save_path, "EM_SPMSM_FL_002_H.png"), is_show_fig=False
)
out.mag.meshsolution.plot_contour(
label="H",
group_names="stator core",
save_path=join(save_path, "EM_SPMSM_FL_002_H_stator.png"),
is_show_fig=False,
)
return out
# To run it without pytest
if __name__ == "__main__":
out = test_Magnetic_FEMM_sym()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""SERVICE YET TO BE IMPLEMENTED. THIS FILE IS JUST A PLACEHOLDER."""
print("Sorry! This service has not yet been implemented\n(will you be the one to take care of it?\n --- RIGHT NOW THIS FILE IS JUST AN HANDY PLACEHOLDER ---")
|
nilq/baby-python
|
python
|
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for keras_utils.py."""
import collections
import tensorflow as tf
from tensorflow_federated.python.learning.reconstruction import keras_utils
from tensorflow_federated.python.learning.reconstruction import model as model_lib
def _create_input_spec():
return collections.namedtuple('Batch', ['x', 'y'])(
x=tf.TensorSpec(shape=[None, 784], dtype=tf.float32),
y=tf.TensorSpec(shape=[None, 1], dtype=tf.int32))
def _create_keras_model():
model = tf.keras.Sequential([
tf.keras.layers.Reshape(target_shape=[784], input_shape=(28 * 28,)),
tf.keras.layers.Dense(10),
])
return model
class KerasUtilsTest(tf.test.TestCase):
def test_from_keras_model_succeeds(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers,
local_layers=[],
input_spec=input_spec)
def test_from_keras_model_fails_bad_input_spec(self):
keras_model = _create_keras_model()
input_spec = collections.namedtuple('Batch', ['x'])(
x=tf.TensorSpec(shape=[None, 784], dtype=tf.float32))
with self.assertRaisesRegex(ValueError, 'input_spec'):
keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers,
local_layers=[],
input_spec=input_spec)
def test_from_keras_model_fails_compiled(self):
keras_model = _create_keras_model()
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=tf.keras.optimizers.SGD(learning_rate=0.1))
input_spec = _create_input_spec()
with self.assertRaisesRegex(ValueError, 'compiled'):
keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers,
local_layers=[],
input_spec=input_spec)
def test_from_keras_model_fails_missing_variables(self):
"""Ensures failure if global/local layers are missing variables."""
keras_model = _create_keras_model()
input_spec = _create_input_spec()
with self.assertRaisesRegex(ValueError, 'variables'):
keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers[:-1],
local_layers=[],
input_spec=input_spec)
def test_from_keras_model_succeeds_from_set(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=set(keras_model.layers),
local_layers=set(),
input_spec=input_spec)
def test_from_keras_model_properties(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
recon_model = keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers,
local_layers=[],
input_spec=input_spec)
# Global trainable/non_trainable should include all the variables, and
# local should be empty.
self.assertEqual(recon_model.global_trainable_variables,
keras_model.trainable_variables)
self.assertEqual(recon_model.global_non_trainable_variables,
keras_model.non_trainable_variables)
self.assertEmpty(recon_model.local_trainable_variables)
self.assertEmpty(recon_model.local_non_trainable_variables)
self.assertEqual(input_spec, recon_model.input_spec)
def test_from_keras_model_local_layers_properties(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
recon_model = keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers[:-1], # Last Dense layer is local.
local_layers=keras_model.layers[-1:],
input_spec=input_spec)
# Expect last two variables, the weights and bias for the final Dense layer,
# to be local trainable, and the rest global.
self.assertEqual(recon_model.global_trainable_variables,
keras_model.trainable_variables[:-2])
self.assertEqual(recon_model.global_non_trainable_variables,
keras_model.non_trainable_variables)
self.assertEqual(recon_model.local_trainable_variables,
keras_model.trainable_variables[-2:])
self.assertEmpty(recon_model.local_non_trainable_variables)
self.assertEqual(input_spec, recon_model.input_spec)
def test_from_keras_model_forward_pass(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
recon_model = keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers[:-1],
local_layers=keras_model.layers[-1:],
input_spec=input_spec)
batch_input = collections.namedtuple('Batch', ['x', 'y'])(
x=tf.ones(shape=[10, 784], dtype=tf.float32),
y=tf.zeros(shape=[10, 1], dtype=tf.int32))
batch_output = recon_model.forward_pass(batch_input)
self.assertIsInstance(batch_output, model_lib.BatchOutput)
self.assertEqual(batch_output.num_examples, 10)
self.assertAllEqual(batch_output.labels,
tf.zeros(shape=[10, 1], dtype=tf.int32))
# Change num_examples and labels.
batch_input = collections.namedtuple('Batch', ['x', 'y'])(
x=tf.zeros(shape=[5, 784], dtype=tf.float32),
y=tf.ones(shape=[5, 1], dtype=tf.int32))
batch_output = recon_model.forward_pass(batch_input)
self.assertIsInstance(batch_output, model_lib.BatchOutput)
self.assertEqual(batch_output.num_examples, 5)
self.assertAllEqual(batch_output.labels,
tf.ones(shape=[5, 1], dtype=tf.int32))
def test_from_keras_model_forward_pass_list_input(self):
"""Forward pass still works with a 2-element list batch input."""
keras_model = _create_keras_model()
input_spec = _create_input_spec()
recon_model = keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers[:-1],
local_layers=keras_model.layers[-1:],
input_spec=input_spec)
batch_input = [
tf.ones(shape=[10, 784], dtype=tf.float32),
tf.zeros(shape=[10, 1], dtype=tf.int32)
]
batch_output = recon_model.forward_pass(batch_input)
self.assertIsInstance(batch_output, model_lib.BatchOutput)
self.assertEqual(batch_output.num_examples, 10)
self.assertAllEqual(batch_output.labels,
tf.zeros(shape=[10, 1], dtype=tf.int32))
def test_from_keras_model_forward_pass_fails_bad_input_keys(self):
keras_model = _create_keras_model()
input_spec = _create_input_spec()
recon_model = keras_utils.from_keras_model(
keras_model=keras_model,
global_layers=keras_model.layers,
local_layers=[],
input_spec=input_spec)
batch_input = collections.namedtuple('Batch', ['a', 'b'])(
a=tf.ones(shape=[10, 784], dtype=tf.float32),
b=tf.zeros(shape=[10, 1], dtype=tf.int32))
with self.assertRaisesRegex(KeyError, 'keys'):
recon_model.forward_pass(batch_input)
def test_mean_loss_metric_from_keras_loss(self):
mse_loss = tf.keras.losses.MeanSquaredError()
mse_metric = keras_utils.MeanLossMetric(mse_loss)
y_true = tf.ones([10, 1], dtype=tf.float32)
y_pred = tf.ones([10, 1], dtype=tf.float32) * 0.5
mse_metric.update_state(y_true, y_pred)
self.assertEqual(mse_loss(y_true, y_pred), mse_metric.result())
def test_mean_loss_metric_multiple_weighted_batches(self):
mse_loss = tf.keras.losses.MeanSquaredError()
mse_metric = keras_utils.MeanLossMetric(mse_loss)
y_true = tf.ones([10, 1], dtype=tf.float32)
y_pred = tf.ones([10, 1], dtype=tf.float32) * 0.5
mse_metric.update_state(y_true, y_pred)
y_true = tf.ones([40, 1], dtype=tf.float32)
y_pred = tf.ones([40, 1], dtype=tf.float32)
mse_metric.update_state(y_true, y_pred)
# Final weighted loss is (10 * 0.5^2 + 40 * 0.0) / 50
self.assertEqual(mse_metric.result(), 0.05)
def test_mean_loss_metric_from_fn(self):
"""Ensures the mean loss metric also works with a callable."""
def mse_loss(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
mse_metric = keras_utils.MeanLossMetric(mse_loss)
y_true = tf.ones([10, 1], dtype=tf.float32)
y_pred = tf.ones([10, 1], dtype=tf.float32) * 0.5
mse_metric.update_state(y_true, y_pred)
self.assertEqual(mse_loss(y_true, y_pred), mse_metric.result())
def test_recreate_mean_loss_from_keras_loss(self):
"""Ensures we can create a metric from config, as is done in aggregation."""
mse_loss = tf.keras.losses.MeanSquaredError()
mse_metric = keras_utils.MeanLossMetric(mse_loss)
recreated_mse_metric = type(mse_metric).from_config(mse_metric.get_config())
y_true = tf.ones([10, 1], dtype=tf.float32)
y_pred = tf.ones([10, 1], dtype=tf.float32) * 0.5
mse_metric.update_state(y_true, y_pred)
recreated_mse_metric.update_state(y_true, y_pred)
self.assertEqual(recreated_mse_metric.result(), mse_metric.result())
def test_recreate_mean_loss_from_fn(self):
def mse_loss(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
mse_metric = keras_utils.MeanLossMetric(mse_loss)
recreated_mse_metric = type(mse_metric).from_config(mse_metric.get_config())
y_true = tf.ones([10, 1], dtype=tf.float32)
y_pred = tf.ones([10, 1], dtype=tf.float32) * 0.5
mse_metric.update_state(y_true, y_pred)
recreated_mse_metric.update_state(y_true, y_pred)
self.assertEqual(recreated_mse_metric.result(), mse_metric.result())
if __name__ == '__main__':
tf.test.main()
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.3 on 2018-11-09 05:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restapi', '0010_annotatedrecording_recitation_mode'),
]
operations = [
migrations.CreateModel(
name='TajweedInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_id', models.CharField(blank=True, max_length=32)),
('recording_id', models.CharField(max_length=32)),
('platform', models.CharField(default='web', max_length=32)),
('letter', models.CharField(max_length=1)),
('letter_position', models.IntegerField(default=0)),
('degree', models.CharField(choices=[('jali', 'Jali'), ('khafi', 'Khafi')], default='jali', max_length=32)),
('category', models.CharField(choices=[('madd', 'Prolongation'), ('tafkheem', 'Fattening'), ('tarqeeq', 'Thinning'), ('makharij', 'Emission'), ('noon', 'Noon'), ('meem', 'Meem'), ('qalqala', 'Echo'), ('other', 'Other')], default='madd', max_length=32)),
],
),
]
|
nilq/baby-python
|
python
|
# MIT License
#
# Copyright (c) 2021 TrigonDev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Generic,
Iterable,
Type,
TypeVar,
cast,
overload,
)
if TYPE_CHECKING: # pragma: no cover
from .connection import Connection
from .field import BaseField
from .model import Model
from .utils.lazy_list import LazyList
_REF = TypeVar("_REF", bound="Model")
_THROUGH = TypeVar("_THROUGH", bound="Model")
class ManyToMany(Generic[_REF, _THROUGH]):
"""A useful tool to simplify many-to-many references.
Args:
here (str): The field name on the current model.
here_ref (str): The model and field name on the "middle" table (in the
example below, the middle table is Player) in the format of
"model.field".
other_ref (str): The model and field name on the middle table that
references the final table (or other table).
other (str): The model and field name on the final table referenced by
the middle table.
Note: Although unecessary, it is highly recommended to use ForeignKeys on
the middle table where it references the initial and final table. You may
get unexpected behaviour if you don't.
Example Usage:
```
class User(Model):
username = VarChar(32).field()
primary_key = (username,)
games = ManyToMany["Game", "Player"]( # the typehints are optional
# the column on this table referenced in Player
"username",
# the column on Player that references "username"
"players.username",
# the column on Player that references Game.gameid
"players.gameid",
# the column on Game referenced by Player
"games.gameid",
)
class Game(Model):
gameid = Serial().field()
primary_key = (gameid,)
users = ManyToMany["User", "Player"](
"gameid",
"players.gameid",
"players.username",
"users.username",
)
class Player(Model):
username = VarChar(32).field()
gameid = Int().field()
primary_key = (username, gameid)
username_fk = ForeignKey(username, User.username)
gameid_fk = ForeignKey(gameid, Game.gameid)
class MyDatabase(Database):
users = User
games = Game
players = Player
...
circuit = await User.fetch(username="Circuit")
circuits_games = await circuit.games.fetchmany()
```
If you want typehints to work properly, use
`games = ManyToMany["Game"](...)`.
"""
__slots__: Iterable[str] = (
"_here",
"_here_ref",
"_other_ref",
"_other",
"_attribute_name",
)
_attribute_name: str
# populated by Model on __init_subclass__
def __init__(
self, here: str, here_ref: str, other_ref: str, other: str
) -> None:
self._here = here
self._here_ref = here_ref
self._other_ref = other_ref
self._other = other
@overload
def __get__(
self, inst: Model, cls: Type[Model]
) -> _RealManyToMany[_REF, _THROUGH]:
...
@overload
def __get__(
self, inst: None, cls: Type[Model]
) -> ManyToMany[_REF, _THROUGH]:
...
def __get__(
self, inst: Model | None, cls: type[Model]
) -> ManyToMany[_REF, _THROUGH] | _RealManyToMany[_REF, _THROUGH]:
if inst is None:
return self
real_m2m = self._generate_mtm(inst)
setattr(inst, self._attribute_name, real_m2m)
return real_m2m
def _generate_mtm(self, inst: Model) -> _RealManyToMany[_REF, _THROUGH]:
return _RealManyToMany(self, inst)
class _RealManyToMany(Generic[_REF, _THROUGH]):
__slots__: Iterable[str] = (
"orig",
"model",
"field",
"mm_model",
"mm_h_field",
"mm_o_field",
"ot_model",
"ot_field",
)
def __init__(
self, orig: ManyToMany[_REF, _THROUGH], model_inst: Model
) -> None:
# NOTE: all these casts are ugly, but truthfully
# there isn't a better way to do this. You can't
# actually check that these are Models and Fields
# without creating circular imports (since model.py
# imports this file)
self.orig = orig
mm_h_model, _mm_h_field = self.orig._here_ref.split(".")
mm_o_model, _mm_o_field = self.orig._other_ref.split(".")
assert mm_h_model == mm_o_model
mm_model = cast(
"Type[Model]", getattr(model_inst.database, mm_h_model)
)
mm_h_field = cast(
"BaseField[Any, Any, Any]", getattr(mm_model, _mm_h_field)
)
mm_o_field = cast(
"BaseField[Any, Any, Any]", getattr(mm_model, _mm_o_field)
)
_ot_model, _ot_field = self.orig._other.split(".")
ot_model = cast("Type[Model]", getattr(model_inst.database, _ot_model))
ot_field = cast(
"BaseField[Any, Any, Any]", getattr(ot_model, _ot_field)
)
self.model = model_inst
self.field = cast(
"BaseField[Any, Any, Any]",
getattr(model_inst.__class__, self.orig._here),
)
self.mm_model = mm_model
self.mm_h_field = mm_h_field
self.mm_o_field = mm_o_field
self.ot_model = ot_model
self.ot_field = ot_field
def __getattr__(self, name: str) -> Any:
return getattr(self.orig, name)
async def fetchmany(
self, con: Connection | None = None
) -> LazyList[dict[str, Any], Model]:
"""Fetch all rows from the final table that belong to this instance.
Returns:
LazyList[dict, Model]: A lazy-list of returned Models.
"""
return (
await self.ot_model.fetch_query(con=con)
.where(
self.mm_model.fetch_query()
.where(
self.mm_h_field.eq(
self.model._raw_values[self.field.name]
),
self.mm_o_field.eq(self.ot_field),
)
.exists()
)
.fetchmany()
)
async def count(self, con: Connection | None = None) -> int:
"""Returns the count.
Warning: To be efficient, this returns the count of *middle* models,
which may differ from the number of final models if you did not use
ForeignKeys properly.
Returns:
int: The count.
"""
return (
await self.mm_model.fetch_query(con=con)
.where(self.mm_h_field.eq(self.model._raw_values[self.field.name]))
.count()
)
async def clear(
self, con: Connection | None = None
) -> LazyList[dict[str, Any], Model]:
"""Remove all instances of the other model from this instance.
Both of these lines do the same thing:
```
deleted_players = await user.games.clear()
deleted_players = await Player.delete_query().where(
username=user.name
).execute()
```
Returns:
LazyList[dict, _REF]: A lazy-list of deleted through models (in
the example, it would be a list of Player).
"""
return (
await self.mm_model.delete_query(con=con)
.where(self.mm_h_field.eq(self.model._raw_values[self.field.name]))
.execute()
)
async def add(self, other: Model, con: Connection | None = None) -> Model:
"""Add one or more models to this ManyToMany.
Each of these lines does the exact same thing:
```
player = await user.games.add(game)
# OR
player = await games.users.add(user)
# OR
player = await Player(username=user.name, gameid=game.id).create()
```
Returns:
Model: The reference model that lines this model and the other
model. In the example, the return would be a Player.
"""
values = {
self.mm_h_field.name: self.model._raw_values[self.field.name],
self.mm_o_field.name: other._raw_values[self.ot_field.name],
}
return await self.mm_model(**values).create(con=con)
async def remove(
self, other: Model, con: Connection | None = None
) -> LazyList[dict[str, Any], Model]:
"""Remove one or models from this ManyToMany.
Each of these lines does the exact same thing:
```
deleted_players = await user.games.remove(game)
# OR
deleted_players = await games.user.remove(user)
# OR
deleted_players = await Player.delete_query().where(
username=user.name, gameid=game.id
).execute()
```
Note: The fact that .remove() returns a list instead of a single model
was intentional. The reason is ManyToMany does not enforce uniqueness
in any way, so there could be multiple Players that link a single user
to a single game. Thus, user.remove(game) could actually end up
deleting multiple players.
"""
values = {
self.mm_h_field.name: self.model._raw_values[self.field.name],
self.mm_o_field.name: other._raw_values[self.ot_field.name],
}
return (
await self.mm_model.delete_query(con=con).where(**values).execute()
)
|
nilq/baby-python
|
python
|
import torch
from numpy import histogram, random
from scipy.stats import skewnorm
from torch import Tensor, from_numpy
from torch.nn.functional import softmax
class WGAN:
def __init__(self) -> None:
super().__init__()
def discriminator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
real_loss = -real_scores
fake_loss = fake_scores
loss = real_loss.mean() + fake_loss.mean()
return loss
def generator_loss(self, fake_scores: Tensor) -> Tensor:
fake_loss = -fake_scores
loss = fake_loss.mean()
return loss
class RaHinge:
def __init__(self) -> None:
super().__init__()
def discriminator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
relativistic_real_validity = real_scores - fake_scores.mean()
relativistic_fake_validity = fake_scores - real_scores.mean()
real_loss = torch.relu(1.0 - relativistic_real_validity)
fake_loss = torch.relu(1.0 + relativistic_fake_validity)
loss = (real_loss.mean() + fake_loss.mean()) / 2
return loss
def generator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
relativistic_real_validity = real_scores - fake_scores.mean()
relativistic_fake_validity = fake_scores - real_scores.mean()
real_loss = torch.relu(1.0 - relativistic_fake_validity)
fake_loss = torch.relu(1.0 + relativistic_real_validity)
loss = (fake_loss.mean() + real_loss.mean()) / 2
return loss
class RaLSGAN:
def __init__(self) -> None:
super().__init__()
def discriminator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
relativistic_real_scores = real_scores - fake_scores.mean()
relativistic_fake_scores = fake_scores - real_scores.mean()
real_loss = (relativistic_real_scores - 1.0) ** 2
fake_loss = (relativistic_fake_scores + 1.0) ** 2
loss = (fake_loss.mean() + real_loss.mean()) / 2
return loss.unsqueeze(0)
def generator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
relativistic_real_scores = real_scores - fake_scores.mean()
relativistic_fake_scores = fake_scores - real_scores.mean()
real_loss = (relativistic_real_scores + 1.0) ** 2
fake_loss = (relativistic_fake_scores - 1.0) ** 2
loss = (fake_loss.mean() + real_loss.mean()) / 2
return loss
def js_div(p, q, reduce=True):
m = 0.5 * (p + q)
jsd = 0.5 * (kl_div(p, m, reduce=False) + kl_div(q, m, reduce=False))
return torch.mean(jsd) if reduce else jsd
def kl_div(p, q, epsilon=1e-12, reduce=True):
kld = torch.sum(
p * (p / (q + epsilon)).log(),
dim=1
)
return torch.mean(kld) if reduce else kld
class Realness:
def __init__(self, score_dim) -> None:
super().__init__()
self.score_dim = score_dim
self.gauss_uniform = True
self.measure = 'kl'
if self.measure == 'js':
self.distance = js_div
elif self.measure == 'kl':
self.distance = kl_div
else:
raise NotImplementedError()
if self.gauss_uniform:
gauss = random.normal(0.0, 0.1, size=1000)
count, _ = histogram(gauss, self.score_dim)
self.anchor0 = from_numpy(count / sum(count)).float()
uniform = random.uniform(-1.0, 1.0, size=1000)
count, _ = histogram(uniform, self.score_dim)
self.anchor1 = from_numpy(count / sum(count)).float()
else:
skew_left = skewnorm.rvs(-5.0, size=1000)
count, _ = histogram(skew_left, self.score_dim)
self.anchor0 = from_numpy(count / sum(count)).float()
skew_right = skewnorm.rvs(5.0, size=1000)
count, _ = histogram(skew_right, self.score_dim)
self.anchor1 = from_numpy(count / sum(count)).float()
def discriminator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
self.anchor0 = self.anchor0.to(real_scores)
self.anchor1 = self.anchor1.to(real_scores)
real_probabilities = softmax(real_scores, dim=1)
fake_probabilities = softmax(fake_scores, dim=1)
loss = self.distance(self.anchor1, real_probabilities) + self.distance(self.anchor0, fake_probabilities)
# loss -= self.div(self.anchor1, fake_probabilities) + self.div(self.anchor0, real_probabilities)
return loss
def generator_loss(self, real_scores: Tensor, fake_scores: Tensor) -> Tensor:
self.anchor0 = self.anchor0.to(real_scores)
self.anchor1 = self.anchor1.to(real_scores)
real_probabilities = softmax(real_scores, dim=1)
fake_probabilities = softmax(fake_scores, dim=1)
# No relativism
# loss = self.distance(self.anchor0, fake_probabilities)
# EQ19 (default)
loss = self.distance(real_probabilities, fake_probabilities) - self.distance(self.anchor0, fake_probabilities)
# EQ20
# loss = self.distance(self.anchor1, fake_probabilities) - self.distance(self.anchor0, fake_probabilities)
return loss
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-01 13:53
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20170801_1819'),
]
operations = [
migrations.AlterField(
model_name='sales',
name='description',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True),
),
migrations.AlterField(
model_name='sales',
name='discount',
field=models.IntegerField(blank=True, null=True, verbose_name='Скидка'),
),
migrations.AlterField(
model_name='sales',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='shops/sales', verbose_name='Изображение'),
),
]
|
nilq/baby-python
|
python
|
import pytest
from pathlib import Path
from coolcmp.cmp.source_code import *
from unit_tests.utils import run_test_codegen
tests = []
with open('unit_tests/compiled_files.txt') as f:
for line in f:
tests.append(Path(line.rstrip()).resolve())
@pytest.mark.complete
@pytest.mark.parametrize('file', tests, ids=map(str, tests))
def test_complete(file):
run_test_codegen(file)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'shouke'
from common.log import logger
from common.globalvar import db_related_to_project_dic
from unittesttestcase import MyUnittestTestCase
__all__ = ['DBUnittestTestCase']
class DBUnittestTestCase(MyUnittestTestCase):
def test_select_one_record(self):
if self.input_params != '':
self.input_params = self.input_params + ','
self.input_params = eval(self.input_params) # 字符串类型的元组转为元组
try:
flag, query_result = db_related_to_project_dic[self.op_object].select_one_record(self.url_or_sql, self.input_params)
logger.info('数据库服务器返回的查询结果为为 query_result:%s, flag:%s' % (query_result, flag))
if flag:
if query_result:
logger.info('正在保存目标内容到自定义变量')
# 如果用户自定义了“输出”参数,则还要保存目标值到用户定义的变量
self.save_result(query_result) # 保存查询记录
logger.info('正在进行结果断言')
self.assert_result(query_result)
else:
msg = 'fail#%s' % query_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
def test_update_record(self):
if self.input_params != '':
self.input_params = self.input_params
self.input_params = eval(self.input_params) # 字符串类型的元组转为元组
try:
flag, execute_result = db_related_to_project_dic[self.op_object].execute_update(self.url_or_sql, self.input_params)
if not flag:
msg = 'fail#%s' % execute_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
def test_delete_record(self):
if self.input_params != '':
self.input_params = self.input_params
self.input_params = eval(self.input_params) # 字符串类型的元组转为元组
try:
flag, execute_result = db_related_to_project_dic[self.op_object].execute_update(self.url_or_sql, self.input_params)
if not flag:
msg = 'fail#%s' % execute_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
def test_call_proc(self):
if self.input_params != '':
self.input_params = self.input_params
self.input_params = eval(self.input_params) # 字符串类型的元组转为元组
try:
flag, execute_result = db_related_to_project_dic[self.op_object].call_proc(self.url_or_sql, self.input_params)
if not flag:
msg = 'fail#%s' % execute_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
def test_truncate_table(self):
if self.input_params != '':
self.input_params = self.input_params
self.input_params = eval(self.input_params) # 字符串类型的元组转为元组
try:
flag, execute_result = db_related_to_project_dic[self.op_object].execute_update(self.url_or_sql, self.input_params)
if not flag:
msg = 'fail#%s' % execute_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
def test_insert_record(self):
if self.input_params != '':
self.input_params = self.input_params
self.input_params = eval(self.input_params) # 字符串类型的元组转为元组
try:
temp_sql = self.url_or_sql % self.input_params
flag, execute_result = db_related_to_project_dic[self.op_object].execute_insert(temp_sql, '')
if not flag:
msg = 'fail#%s' % execute_result
self.assertEqual(1, 0, msg=msg)
except Exception as e:
msg = 'fail#%s' % e
logger.error(msg)
self.assertEqual(1, 0, msg=msg)
|
nilq/baby-python
|
python
|
"""
Simple HTTP Server with GET that waits for given seconds.
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
import time
ENCODING = 'utf-8'
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
"""Simple multi-threaded HTTP Server."""
pass
class MyRequestHandler(BaseHTTPRequestHandler):
"""Very simple request handler. Only supports GET."""
def do_GET(self):
"""Respond after seconds given in path.
"""
try:
seconds = float(self.path[1:])
except ValueError:
seconds = 0.0
if seconds < 0:
seconds = 0.0
text = "Waited for {:4.2f} seconds.\nThat's all.\n"
msg = text.format(seconds).encode(ENCODING)
time.sleep(seconds)
self.send_response(200)
self.send_header("Content-type", "text/plain; charset=utf-8")
self.send_header("Content-length", str(len(msg)))
self.end_headers()
self.wfile.write(msg)
def run(server_class=ThreadingHTTPServer,
handler_class=MyRequestHandler,
port=8888):
"""Run the simple server on a given port."""
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print(("Serving from port {}...".format(port)))
httpd.serve_forever()
if __name__ == '__main__':
run()
|
nilq/baby-python
|
python
|
from .regressor import CrossLgbRegression
|
nilq/baby-python
|
python
|
from django.urls import path, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
admin.autodiscover()
import autobot.views
# To add a new path, first import the app:
# import blog
#
# Then add the new path:
# path('blog/', blog.urls, name="blog")
#
# Learn more here: https://docs.djangoproject.com/en/2.1/topics/http/urls/
urlpatterns = [
path("", autobot.views.index, name="index"),
path("login/", autobot.views.login, name="login"),
path("logout/", auth_views.LogoutView.as_view(), name="logout"),
path("social-auth/", include('social_django.urls', namespace="social")),
path("db/", autobot.views.db, name="db"),
path("admin/", admin.site.urls),
]
|
nilq/baby-python
|
python
|
# This imports all that is listed in __init__.py of current directory:
from __init__ import *
#-------------------------------------------------------------------------------------- MAIN WINDOW CLASS ----------------------------------------------------------------------------------#
class Window(QtGui.QMainWindow): # defines a subclass of QMainWindow named Window
# This method defines everything that is to be executed automatically when the class is initialized:
def __init__(self):
super(Window, self).__init__() # This makes the class inherit functions from its upper class (Here, QMainWindow Class).
#---------------------------------- RESTORING THE LAST SETTINGS --------------------------------#
# Note: settings is an imported variable (see Variables_Module.py)
if settings.value("Runs").isNull(): # if a value named Runs does not exist in settings (this is the first time the code is run)
# then, set these custom values as the x, y positions and height, width of the Window class:
# Note: QRect converts integer values into a rectangle form which can be used to visualize a window:
self.setGeometry(QtCore.QRect(50, 50, 800, 450))
else:
self.restore_last_settings_MWindow() # restore the last settings...
self.setWindowTitle("EditOS")
self.setWindowIcon(QtGui.QIcon("Icons/Icon.ico")) # sets the icon to the icon present in icons folder of current directory...
self.initialize_editor() # (for initialize_editor, see __init__ RELATED FUNCTIONS in Objects_Module.py)
#--------------------------------- MENU BAR CONFIGURATION ---------------------------------------#
# Here, we add a menubar using the built-in .menuBar() function of QMainWindow class in PyQt and store it in self.main_menu variable for future use:
self.main_menu = self.menuBar()
self.file_menu = self.main_menu.addMenu("&File")
self.edit_menu = self.main_menu.addMenu("&Edit")
self.view_menu = self.main_menu.addMenu("&View")
self.help_menu = self.main_menu.addMenu("&Help")
# Here, we add a statusbar using the built-in .statusBar() function of QMainWindow class in PyQt and store it in self.statusbar variable for future use:
self.statusbar = self.statusBar()
self.statusbar.hide() # we hide it currently so that it can be invoked by our statusbar option in view menu (see Functions_Modules.py for details)
#------------------------------- FINAL TOUCHES TO THE WINDOW --------------------------------------#
# Here, we add a style named cleanlooks using QStyleFactory and set that style to our QApplication:
# Note: Qt has a number of built-in styles with names such as plastique, cleanlooks, motif, windows vista, cde etc.
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create("Cleanlooks"))
self.show()
#-------------------------------------------------------------- __init__ RELATED FUNCTIONS ------------------------------------------------------------------------#
def restore_last_settings_MWindow(self):
if (settings.value("Runs").toInt()) >= 1: # check if the number of runs is equal to or greater than 1:
if (settings.value("State").toString()) == "Maximized": # if yes, check the settings if the last time window was maximized
# then, set these custom values as the x, y positions and height, width of the Window class and maximize the window:
# Note: QRect converts integer values into a rectangle form which can be used to visualize a window:
self.setGeometry(QtCore.QRect(50, 50, 800, 450))
self.showMaximized()
else:
# if no, last time the window was not maximized,
# then set the position and size of the window according to the last values present in the settings named Geometry:
# Note: .toSize and .toPoint converts the values in settings to QSize and QPoint
# Which are compatible values to be used for moving and resizing the main window.
self.resize(settings.value("Main_Size").toSize())
self.move(settings.value("Main_Position").toPoint())
else: # the else command is just a safety measure for any unexpected exceptions or falts...
self.setGeometry(QtCore.QRect(50, 50, 800, 450))
def initialize_editor(self):
#-------------------------------- SETTING THE DEFAULT SETTINGS (EDITOR WINDOW) ---------------------------------------#
self.text_editor = QtGui.QPlainTextEdit() # QPlainTextEdit gives us that big bald white space we call editor.
tmr = QtGui.QFont("times new roman")
tmr.setPointSize(16) # set font size to 16pt
self.text_editor.setFont(tmr)
# self.back_color and self.font_color hold the background and font colors of our editor
# They are defaulted to none but we will allow the user to change them later:
self.back_color = "none"
self.font_color = "none"
# sets the default stylesheet of our editor using css. it has no border, no background and no font color currently.
self.text_editor.setStyleSheet("border: none;background: %s;color: %s;" %(self.back_color, self.font_color))
#-------------------------------- RESTORING THE LAST SETTINGS (EDITOR WINDOW) -------------------------------------------#
# Note: settings is an imported variable (see Variables_Module.py)
if settings.value("Runs").isNull(): # if a value named Runs does not exist in settings (this is the first time the code is run)
pass # do nothing.
else: # else, if a value named Runs does exist (this is not the first time the code is run)
# (for restore_last_settings_editor, see SUPPORT FUNCTIONS in Objects_Module.py)
self.restore_last_settings_editor() # restore the last settings of the text editor's window...
#-------------------------------- FINAL TOUCHES TO THE WINDOW (EDITOR WINDOW) -------------------------------------------#
self.text_editor.cursorPositionChanged.connect(self.position_print) # (for position_print, see SUPPORT FUNCTIONS in Objects_Module.py)
self.setCentralWidget(self.text_editor) # makes our window a text editor.
#-------------------------------------------------------------- FUNCTIONS FOR MENUBAR OPTIONS-------------------------------------------------------------------------------#
def new_file(self):
warning = QtGui.QMessageBox.question(self, "Warning!!!", "Are you sure?\nplease save all work first....", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if warning == QtGui.QMessageBox.Yes:
self.text_editor.clear()
else:
pass
def open_file(self):
# note: File_Dialog_Window is a team created class (see OTHER WINDOW CLASSES in Objects_Module.py)
# get the name and location of the file to be opened:
self.open_file_name = QtGui.QFileDialog.getOpenFileName(File_Dialog_Window(), "Open File", "", ("Text Files (*.txt);;Any File (*.*)"))
with open(self.open_file_name, "r") as self.current_file: # open the file in read only mode and as variable self.current_file
text = self.current_file.read()
self.text_editor.setPlainText(text) # set the text in the file as the text in the editor window
self.current_file = open(self.open_file_name, "r+") # open file for future purposes
def save_file(self):
try: # try checking if there is a variable named self.current_file
self.current_file
except AttributeError: # except, if there is an attribute error (no file was opened).
self.save_as_file() # (for save_as_file function, see below)
else: # else if everything goes ok (there is a self.current_file variable), then:
if self.current_file.closed == True:
self.save_as_file() # (for save_as_file function, see below)
elif self.current_file.closed == False:
text = self.text_editor.toPlainText() # get the text currently in the editor...
try: # try saving the file:
self.current_file.write(text)
except IOError: # except, if there is an input output error
self.save_as_file() # (for save_as_file function, see below)
def save_as_file(self):
# note: File_Dialog_Window is a team created class (see OTHER WINDOW CLASSES in Objects_Module.py)
# get the name and location of the file to be saved:
self.save_file_name = QtGui.QFileDialog.getSaveFileName(File_Dialog_Window(), "Save As File", "Document", ("Text Files (*.txt);;Any File (*.*)"))
with open(self.save_file_name, "w") as self.current_file:
text = self.text_editor.toPlainText() # get the text currently in the editor...
self.current_file.write(text) # write the text in the editor to the file
self.current_file = open(self.save_file_name, "r+") # open file for future purposes
def print_preview(self):
# note: paint_page_view is a team created function, see SUPPORT FUNCTIONS in Objects_Module.py
print_preview_dialog = QtGui.QPrintPreviewDialog()
print_preview_dialog.paintRequested.connect(self.paint_page_view) # whenever print_preview_dialog is created supply current page_view via paint_page_view method...
print_preview_dialog.exec_()
def print_doc(self):
print_dialog = QtGui.QPrintDialog()
print_dialog.exec_()
if print_dialog.Accepted: # if a printer is selected successfully,
self.text_editor.print_(print_dialog.printer()) # print everything in the text_editor by the printer selected by user in print_dialog
def exit_app(self):
# note: save_current_settings() is a team created function, see SUPPORT FUNCTIONS in Objects_Module.py
self.save_current_settings()
warning = QtGui.QMessageBox.question(self, "Warning!!!", "Are you sure you want to quit?\nplease save all work before closing....", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if warning == QtGui.QMessageBox.Yes:
try: # try closing the current file:
self.current_file.close()
except AttributeError: # except, if there is an attribute error (no file was opened).
QtCore.QCoreApplication.instance().quit()
else: # else, if self.current_file is sucessfully closed:
QtCore.QCoreApplication.instance().quit()
else: # if, No button is clicked:
pass # do, nothing.
def find(self):
editor = self.text_editor # allows us to access editor window through a variable named editor
# note: File_Dialog is a team created class (see OTHER WINDOW CLASSES in Objects_Module.py)
find_dialog = Find_Dialog(self)
self.find_dialog = find_dialog # allows us to access Find_Dialog class through a variable named self.find_dialog
text = self.text_editor.toPlainText() # gets the text currently in the editor and stores it in text variable
def find_text(self):
# gets the word to be found from the find dialog and stores it in find_word:
find_word = find_dialog.find_input.text()
# Note: case_sensitive, whole_words_only and direction are all imported variables (see Variables_Module.py)
# These variables are manipulated from the check boxes in the find dialog window
# flag contains the settings which makes the find function work.
if case_sensitive == False and whole_words_only == False and direction == "Backward":
flag = QtGui.QTextDocument.FindBackward
elif case_sensitive == True and whole_words_only == False and direction == "Backward":
flag = QtGui.QTextDocument.FindBackward and QtGui.QTextDocument.FindCaseSensitively
elif case_sensitive == True and whole_words_only == True and direction == "Backward":
flag = QtGui.QTextDocument.FindBackward and QtGui.QTextDocument.FindCaseSensitively and QtGui.QTextDocument.FindWholeWords
elif case_sensitive == False and whole_words_only == True and direction == "Backward":
flag = QtGui.QTextDocument.FindBackward and QtGui.QTextDocument.FindWholeWords
elif case_sensitive == False and whole_words_only == False and direction == "Forward":
flag = QtGui.QTextDocument.FindCaseSensitively
elif case_sensitive == True and whole_words_only == False and direction == "Forward":
flag = QtGui.QTextDocument.FindCaseSensitively
elif case_sensitive == True and whole_words_only == True and direction == "Forward":
flag = QtGui.QTextDocument.FindCaseSensitively and QtGui.QTextDocument.FindWholeWords
elif case_sensitive == False and whole_words_only == True and direction == "Forward":
flag = QtGui.QTextDocument.FindWholeWords
else:
flag = QtGui.QTextDocument.FindBackward
editor.find(find_word, flag)
def replace_text(self):
# gets the word to be found from the find dialog and stores it in replace_word:
replace_word = find_dialog.replace_input.text()
#-------------------------------------------------------------------------#
#-- when the find button is clicked in the find dialog, if a word is --#
#-- found. Then, it is selected automatically and thus, the cursor --#
#-- has a selection. However, if no matching word is found the cursor --#
#-- will have no selection --#
#-------------------------------------------------------------------------#
if editor.textCursor().hasSelection():
editor.insertPlainText(replace_word) # replace the selection with the word to be replaced with
else:
# show a message:
message = QtGui.QMessageBox(QtGui.QMessageBox.Information, "Error!!!", "No text was found to be replaced, \nTry finding the word again then replace it!", QtGui.QMessageBox.Ok)
message.setWindowIcon(QtGui.QIcon("Icons/Icon.ico"))
message.exec_()
def replace_all(self):
# get the words to be found and replaced from the find dialog:
find_word = find_dialog.find_input.text()
replace_word = find_dialog.replace_input.text()
new_text = text.replace(find_word, replace_word)
editor.clear()
editor.insertPlainText(new_text) # add the new text to the editor window
self.find_dialog.find_btn.clicked.connect(find_text)
self.find_dialog.find_next_btn.clicked.connect(find_text)
self.find_dialog.replace_btn.clicked.connect(replace_text)
self.find_dialog.replace_all_btn.clicked.connect(replace_all)
return self.find_dialog
def get_font_choice(self):
font, valid = QtGui.QFontDialog.getFont()
if valid:
self.text_editor.setFont(font)
# add a value named "Editor_Font" to settings and set that value to the font chosen by user in QFontDialog:
settings.setValue("Editor_Font", font)
def get_font_color(self):
color_dialog = QtGui.QColorDialog.getColor()
# change the value of self.font_color to the name of the color chosen by user:
self.font_color = color_dialog.name()
# set the stylesheet of the text editor with the same background color but new font color:
self.text_editor.setStyleSheet("border: none;background: %s;color: %s;" %(self.back_color, self.font_color))
def get_back_color(self):
bgcolor_dialog = QtGui.QColorDialog.getColor()
# change the value of self.back_color to the name of the color chosen by user:
self.back_color = bgcolor_dialog.name()
# set the stylesheet of the text editor with the same font color but new background color:
self.text_editor.setStyleSheet("border: none;background: %s;color: %s;" %(self.back_color, self.font_color))
def set_night_theme(self, isChecked):
#----------------------------------------------------------#
#-- This function is called by the checkbox of view menu --#
#-- named "Night Theme" and this function acts according --#
#-- to the current state of that checkbox. For more info --#
#-- see add_night_theme_option_view_menu() function in --#
#-- Functions_Module.py --#
#----------------------------------------------------------#
# Note: isChecked is a property of checkboxes that returns true if it is checked or false otherwise:
if isChecked:
# change the values of self.back_color and self.font_color variables to black and white respectively:
self.back_color = "black"
self.font_color = "white"
# set the stylesheet of the text editor according to the changed values of self.back_color and self.font_color:
self.text_editor.setStyleSheet("border: none;background: %s;color: %s;" %(self.back_color, self.font_color))
# add a value named "Night_Btn" to settings and set that value to the current status of the checkbox that is "checked":
settings.setValue("Night_Btn", "checked")
else:
# set the stylesheet of the text editor back to default :
self.text_editor.setStyleSheet("border: none;background: none;color: none;")
# add a value named "Night_Btn" to settings and set that value to the current status of the checkbox that is "unchecked":
settings.setValue("Night_Btn", "unchecked")
def add_statusbar(self, isChecked):
#----------------------------------------------------------#
#-- This function is called by the checkbox of view menu --#
#-- named "Statusbar" and this function acts according --#
#-- to the current state of that checkbox. For more info --#
#-- see add_statusbar_checkbox_view_menu() function in --#
#-- Functions_Module.py --#
#----------------------------------------------------------#
# Note: isChecked is a property of checkboxes that returns true if it is checked or false otherwise:
if isChecked:
self.statusbar.show()
# add a value named "Status_Btn" to settings and set that value to the current status of the checkbox that is "checked":
settings.setValue("Status_Btn", "checked")
else:
self.statusbar.hide()
# add a value named "Status_Btn" to settings and set that value to the current status of the checkbox that is "unchecked":
settings.setValue("Status_Btn", "unchecked")
def about(self):
# note: About_Window is a team created class (see OTHER WINDOW CLASSES in Objects_Module.py)
self.about = About_Window()
return self.about
#------------------------------------------------------------- SUPPORT FUNCTIONS ---------------------------------------------------------------------#
def save_current_settings(self):
# Note: settings is an imported variable (see Variables_Module.py)
settings.setValue("Main_Size", self.size()) # add a value named "Main_Size" to settings and set that value to the current size of the main window.
settings.setValue("Main_Position", self.pos()) # add a value named "Main_Position" to settings and set that value to the current position of the main window.
settings.setValue("StyleSheet", self.text_editor.styleSheet()) # add a value named "StyleSheet" to settings and set that value to the current stylesheet of the editor
if settings.value("Runs").isNull(): # if a value named Runs does not exist in settings (this is the first time the code is run)
settings.setValue("Runs", int(1)) # create a value named Runs in settings and set its value to integer 1 (the current no. of runs)
elif (settings.value("Runs").toInt()) >= 1: # else if a value named "Runs" does exist in settings, check if its value is greater than 1.
runs, can_convert = (settings.value("Runs").toInt()) # can_convert is a property of integer setting values that returns true if it can be converted
if can_convert == True:
settings.setValue("Runs", int(runs + 1)) # add 1 to the number of runs before closing the app.
else:
settings.setValue("Runs", int(1)) # the else command is just a safety measure for any unexpected exceptions or falts...
if self.isMaximized(): # if self (here, our Window Class) is maximized:
settings.setValue("State", "Maximized") # create a value named State in settings and set its value to "Maximized"
else:
settings.setValue("State", "False")
def paint_page_view(self, printer):
self.text_editor.print_(printer) # print current page view using the given printer
def restore_last_settings_editor(self):
if (settings.value("Runs").toInt()) >= 1: # check if the number of runs is equal to or greater than 1,
self.text_editor.setStyleSheet(settings.value("StyleSheet").toString())
if settings.value("Editor_Font").isValid():
self.text_editor.setFont(QtGui.QFont(settings.value("Editor_Font")))
else:
pass # do nothing...
def position_print(self):
line = self.text_editor.textCursor().blockNumber()
col = self.text_editor.textCursor().columnNumber()
cursor_position = ("Line: %s | Column: %s" %(str(line), str(col)))
self.statusbar.showMessage(cursor_position) # shows the cursor position on statusbar
def closeEvent(self, event): # if user tries to close self (here, our Window class) this function is executed.
event.ignore()
self.exit_app() # note: exit_app() is a team created function, see FUNCTIONS FOR MENUBAR OPTIONS in Objects_Module.py
#------------------------------------------------------------------------------------- OTHER WINDOW CLASSES --------------------------------------------------------------------------#
class File_Dialog_Window(QtGui.QWidget): # defines a subclass of QWidget named File_Dialog_Window
# This method defines everything that is to be executed automatically when the class is initialized:
def __init__(self):
super(File_Dialog_Window, self).__init__() # This makes the class inherit functions from its upper class (Here, QWidget Class):
self.move(50, 50)
self.setWindowIcon(QtGui.QIcon("Icons/Icon.ico"))
self.show()
class About_Window(QtGui.QWidget): # defines a subclass of QWidget named About_Window
# This method defines everything that is to be executed automatically when the class is initialized:
def __init__(self):
super(About_Window, self).__init__() # This makes the class inherit functions from its upper class (Here, QWidget Class):
if settings.value("Runs").isNull(): # if a value named Runs does not exist in settings (this is the first time the code is run)
# then, set these custom values as the x, y positions and height, width of the Window class:
# Note: QRect converts integer values into a rectangle form which can be used to visualize a window:
self.setGeometry(QtCore.QRect(50, 50, 350, 110))
else:
self.restore_last_settings_AbWindow() # restore the last settings of About_Window...
pass
self.setWindowTitle("About")
self.setWindowIcon(QtGui.QIcon("Icons/Icon.ico"))
self.interface()
self.show()
def restore_last_settings_AbWindow(self):
if (settings.value("Runs").toInt()) >= 1: # check if the number of runs is equal to or greater than 1:
# If yes, then set the position of the window according to the last values present in the settings named About_Position:
# Note: .toPoint converts the values in settings to a QPoint which can be used to move the window.
self.move(settings.value("About_Position").toPoint())
self.resize(350, 110)
else: # the else command is just a safety measure for any unexpected exceptions or falts...
self.setGeometry(QtCore.QRect(50, 50, 350, 110))
def interface(self):
para = "An open source project by A.E.R.T team. \nIts a fully functional text editor coded in python \nand licensed under unlicense."
name = QtGui.QLabel(self)
about_text = QtGui.QLabel(self)
name.setStyleSheet("font-family: georgia;color: blue;font: 18pt")
about_text.setStyleSheet("font-family: georgia;font: 12pt")
name.setText("EditOS")
about_text.setText(para)
name.resize(name.sizeHint())
about_text.resize(about_text.sizeHint())
name.move(40, 7)
about_text.move(5, 45)
logo = QtGui.QLabel(self)
logo.setPixmap(QtGui.QPixmap("Icons/Large_Icon.ico"))
logo.move(5, 5)
logo.resize(logo.sizeHint())
logo.show()
# this saves the current settings in registry and then, closes the window:
def close_window(self):
# Note: settings is an imported variable (see Variables_Module.py)
settings.setValue("About_Position", self.pos()) # add a value named "About_Position" to settings and set that value to the current position of the window
self.hide()
def closeEvent(self, event): # if user tries to close self (here, our About_Window class) this function is executed.
event.ignore()
self.close_window()
class Find_Dialog(QtGui.QDialog): # defines a subclass of QDailog named Find_Dialog
def __init__(self, parent = None):
QtGui.QDialog.__init__(self, parent) # This makes the class inherit functions from its upper class (Here, QDialog Class):
if settings.value("Runs").isNull(): # if a value named Runs does not exist in settings (this is the first time the code is run)
# then, set these custom values as the x, y positions and height, width of the Window class:
# Note: QRect converts integer values into a rectangle form which can be used to visualize a window:
self.setGeometry(QtCore.QRect(50, 50, 400, 220))
else:
self.restore_last_settings_FdWindow() # restore the last settings of Find_Dialog...
pass
self.setWindowTitle("Find")
self.setWindowIcon(QtGui.QIcon("Icons/Icon.ico"))
self.add_find_interface()
self.show()
def restore_last_settings_FdWindow(self):
if (settings.value("Runs").toInt()) >= 1: # check if the number of runs is equal to or greater than 1:
# If yes, then set the position of the window according to the last values present in the settings named Find_Position:
# Note: .toPoint converts the values in settings to a QPoint which can be used to move the window.
self.move(settings.value("Find_Position").toPoint())
self.resize(400, 220)
else: # the else command is just a safety measure for any unexpected exceptions or falts...
self.setGeometry(QtCore.QRect(50, 50, 400, 220))
def width(self):
return float(self.frameGeometry().width())
def height(self):
return float(self.frameGeometry().height())
def paintEvent(self, event): # creates the line for direction option
painter = QtGui.QPainter()
painter.begin(self)
pen = QtGui.QPen(QtGui.QColor(211, 211, 211))
painter.setPen(pen)
painter.drawLine(QtCore.QPoint(int(self.width() / 40.0), int(self.height() /3.5)), QtCore.QPoint(int(self.width() / 20.0), int(self.height() /3.5)))
painter.drawLine(QtCore.QPoint(int(self.width() / 5.7), int(self.height() /3.5)), QtCore.QPoint(int(self.width() / 1.16), int(self.height() /3.5)))
painter.drawLine(QtCore.QPoint(int(self.width() / 40.0), int(self.height() /3.5)), QtCore.QPoint(int(self.width() / 40.0), int(self.height() / 2.15)))
painter.drawLine(QtCore.QPoint(int(self.width() / 40.0), int(self.height() / 2.15)), QtCore.QPoint(int(self.width() / 1.16), int(self.height() / 2.15)))
painter.drawLine(QtCore.QPoint(int(self.width() / 1.16), int(self.height() /3.5)), QtCore.QPoint(int(self.width() / 1.16), int(self.height() / 2.15)))
painter.end()
def add_find_interface(self):
find_label = QtGui.QLabel("Search For: ", self)
self.find_label = find_label
self.find_label.move(int(self.width() / 20.0), int(self.height() / 15.5))
self.find_label.resize(self.find_label.sizeHint())
find_input = QtGui.QLineEdit(self)
self.find_input = find_input
self.find_input.setGeometry(int(self.width() / 4.0), int(self.height() / 20.0), int(self.width() / 1.6), int(self.height() / 8.8))
find_btn = QtGui.QPushButton("Find", self)
self.find_btn = find_btn
self.find_btn.move(int(self.width() / 2.9), int(self.height() / 5.0))
self.find_btn.resize(self.find_btn.sizeHint())
find_next_btn = QtGui.QPushButton("Find Next", self)
self.find_next_btn = find_next_btn
self.find_next_btn.move(int(self.width() / 1.7), int(self.height() / 5.0))
self.find_next_btn.resize(self.find_next_btn.sizeHint())
direction_label = QtGui.QLabel("Direction: ", self)
self.direction_label = direction_label
self.direction_label.move(int(self.width() / 17.0), int(self.height() / 3.2))
self.direction_label.resize(self.direction_label.sizeHint())
backwards_radio_btn = QtGui.QRadioButton("Backward", self)
self.backwards_radio_btn = backwards_radio_btn
self.backwards_radio_btn.move(int(self.width() / 4.5), int(self.width() / 4.4))
self.backwards_radio_btn.resize(self.backwards_radio_btn.sizeHint())
self.backwards_radio_btn.toggle()
self.backwards_radio_btn.toggled.connect(self.set_direction)
forwards_radio_btn = QtGui.QRadioButton("Forward", self)
self.forwards_radio_btn = forwards_radio_btn
self.forwards_radio_btn.move(int(self.width() / 2.0), int(self.width() / 4.4))
self.forwards_radio_btn.resize(self.forwards_radio_btn.sizeHint())
self.forwards_radio_btn.toggled.connect(self.set_direction)
replace_label = QtGui.QLabel("Replace By: ", self)
self.replace_label = replace_label
self.replace_label.move(int(self.width() / 20.0), int(self.height() / 1.65))
self.replace_label.resize(self.replace_label.sizeHint())
replace_input = QtGui.QLineEdit(self)
self.replace_input = replace_input
self.replace_input.setGeometry(int(self.width() / 4.0), int(self.height() / 1.7), int(self.width() / 1.6), int(self.height() / 8.8))
replace_btn = QtGui.QPushButton("Replace", self)
self.replace_btn = replace_btn
self.replace_btn.move(int(self.width() / 2.9), int(self.height() / 1.34))
self.replace_btn.resize(self.replace_btn.sizeHint())
replace_all_btn = QtGui.QPushButton("Replace All", self)
self.replace_all_btn = replace_all_btn
self.replace_all_btn.move(int(self.width() / 1.7), int(self.height() / 1.34))
self.replace_all_btn.resize(self.replace_all_btn.sizeHint())
case_check = QtGui.QCheckBox("Case sensitive", self)
self.case_check = case_check
self.case_check.move(int(self.width() / 40.0), int(self.height() / 1.1))
self.case_check.stateChanged.connect(self.case_sense)
whole_word_opt = QtGui.QCheckBox("Whole words only",self)
self.whole_word_opt = whole_word_opt
self.whole_word_opt.move(int(self.width() / 3.7), int(self.height() / 1.1))
self.whole_word_opt.stateChanged.connect(self.whole_word_sense)
def set_direction(self, isChecked):
#--------------------------------------------------#
#-- This function is called by either --#
#-- backwards_radio_btn or forwards_radio_btn --#
#-- and this function acts according --#
#-- to the current state of those radio buttons. --#
#-- For the coding of these buttons, see up --#
#-- In add_find_interface of Find_Dialog --#
#--------------------------------------------------#
# Note: isChecked is a property of radio buttons that returns true if it is checked or false otherwise:
global direction # Note: direction is an imported variable (see Variables_Module.py)
if isChecked:
if direction == "Forward":
direction = "Backward"
elif direction == "Backward":
direction = "Forward"
else:
direction = "Backward"
else:
pass
def case_sense(self, state):
#--------------------------------------------------------#
#-- This function is called by whole_word_opt checkbox --#
#-- and this function acts according to --#
#-- the current state of that checkbox. --#
#-- For the coding of this checkbox, --#
#-- See up in add_find_interface of Find_Dialog --#
#--------------------------------------------------------#
# Note: state is a property of checkboxes that equals QtCore.Qt.Checked if checkbox is checked or false otherwise:
global case_sensitive # Note: case_sensitive is an imported variable (see Variables_Module.py)
if state == QtCore.Qt.Checked:
case_sensitive = True
else:
case_sensitive = False
def whole_word_sense(self, state):
#----------------------------------------------------#
#-- This function is called by case_check checkbox --#
#-- and this function acts according to --#
#-- the current state of that checkbox. --#
#-- For the coding of this checkbox, --#
#-- See up in add_find_interface of Find_Dialog --#
#----------------------------------------------------#
# Note: state is a property of checkboxes that equals QtCore.Qt.Checked if checkbox is checked or false otherwise:
global whole_words_only # Note: whole_words_only is an imported variable (see Variables_Module.py)
if state == QtCore.Qt.Checked:
whole_words_only = True
else:
whole_words_only = False
# this saves the current settings in registry and then, closes the window:
def close_window(self):
# Note: settings is an imported variable (see Variables_Module.py)
settings.setValue("Find_Position", self.pos()) # add a value named "Find_Position" to settings and set that value to the current position of the window
self.hide()
def closeEvent(self, event): # if user tries to close self (here, our Find_Dialog class) this function is executed.
event.ignore()
self.close_window()
#-------x-----------------------x-------------------------x--------------------------THE END--------------------x---------------------x-----------------------------------x---------------------x------------------x------------#
|
nilq/baby-python
|
python
|
import os
# Database connection setup
class Config(object):
SERVER = ''
DATABASE = ''
DRIVER = ''
USERNAME = ''
PASSWORD = ''
SQLALCHEMY_DATABASE_URI = f'mssql+pyodbc://{USERNAME}:{PASSWORD}@{SERVER}/{DATABASE}?driver={DRIVER}'
SQLALCHEMY_TRACK_MODIFICATIONS = False
DEBUG = True
SECRET_KEY = os.environ.get('SECRET_KEY') or '' # Set the Secret_key Config
|
nilq/baby-python
|
python
|
import os
import sys
filename = __file__[:-5] + '-input'
with open(filename) as f:
lines = f.read().splitlines()
lines = list(map(lambda s: s.split('-'), lines))
connections = {}
for line in lines:
if line[0] not in connections and line[1] != 'start' and line[0] != 'end':
connections[line[0]] = [line[1]]
elif line[1] != 'start' and line[0] != 'end':
connections[line[0]].append(line[1])
if line[1] not in connections and line[0] != 'start' and line[1] != 'end':
connections[line[1]] = [line[0]]
elif line[0] != 'start' and line[1] != 'end':
connections[line[1]].append(line[0])
routes = []
def find_all_paths(start, end, path, twice):
path.append(start)
if start == end:
routes.append(path)
else:
for neighbor in connections[start]:
if (neighbor in path and neighbor.isupper()) or neighbor not in path:
find_all_paths(neighbor, end, path[:], twice)
elif (neighbor in path and not twice):
find_all_paths(neighbor, end, path[:], not twice)
find_all_paths('start', 'end', [], False)
print(len(routes))
|
nilq/baby-python
|
python
|
import numpy as np
from . InverterException import InverterException
from . InputData import InputData
class Image(InputData):
"""
This class represents a camera image and can be used as input to the
various inversion algorithms. Images can be created directly, or by
importing and filtering a video.
"""
def __init__(self, data):
"""
Constructor.
Args:
data (numpy.ndarray): Raw image data, or Image object to copy.
"""
if data.ndim != 2:
raise InverterException("Invalid dimensions of image: {}. Image must have exactly two dimensions.".format(data.ndim))
self.data = data
self.pixels = data.shape
self.subset = (slice(None), slice(None))
def get(self):
"""
Returns:
numpy.ndarray: the image data, or the previously specified subset of the image data.
"""
return self.data[self.subset]
def setSubset(self, x, y=None, w=None, h=None):
"""
Specifies which subset of the image to return when
'get()' is called. Calling this method as 'setSubset(None)'
resets any previously set subset.
Args:
x (int): X axis offset.
y (int): Y axis offset.
w (int): Number of pixels to pick along X axis.
h (int): Number of pixels to pick along Y axis.
"""
if (x is None) and (y is None) and (w is None) and (h is None):
self.subset = (slice(None), slice(None))
else:
self.subset = (slice(x, x+w), slice(y, y+h))
|
nilq/baby-python
|
python
|
"""Support for Avanaza Stock sensor."""
|
nilq/baby-python
|
python
|
import os
import re
import codecs
def isValidLine(line):
if re.search('include \"', line) == None or line.find('.PSVita') != -1 or line.find('.PS4') != -1 or line.find('.Switch') != -1 or line.find('.XBoxOne') != -1:
return True
return False
class CreateHeader:
def __init__(self):
self.lines = []
def addLine(self,line):
self.lines.append(line)
def readLines(self,path):
f = codecs.open(path, 'r','utf-8_sig')
line = f.readline()
while line:
if isValidLine(line):
self.lines.append(line.strip(os.linesep))
line = f.readline()
f.close()
def output(self,path):
f = codecs.open(path, 'w','utf-8_sig')
for line in self.lines:
f.write(line + os.linesep)
f.close()
effekseerHeader = CreateHeader()
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Base.Pre.h')
effekseerHeader.readLines('Effekseer/Effekseer/Utils/Effekseer.CustomAllocator.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Vector2D.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Vector3D.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Color.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.RectF.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Matrix43.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Matrix44.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.File.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.DefaultFile.h')
effekseerHeader.readLines('Effekseer/Effekseer/Backend/GraphicsDevice.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Resource.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Effect.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Manager.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Setting.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Server.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Client.h')
effekseerHeader.addLine('')
effekseerHeader.addLine('#include "Effekseer.Modules.h"')
effekseerHeader.addLine('')
effekseerHeader.output('Effekseer/Effekseer.h')
effekseerSimdHeader = CreateHeader()
effekseerSimdHeader.addLine('#pragma once')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Base.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec2f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec3f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec4f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Mat43f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Mat44f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Quaternionf.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Utils.h')
effekseerSimdHeader.output('Effekseer/Effekseer.SIMD.h')
effekseerModulesHeader = CreateHeader()
effekseerModulesHeader.addLine('#pragma once')
effekseerModulesHeader.addLine('')
effekseerModulesHeader.addLine('#include "Effekseer.h"')
effekseerModulesHeader.addLine('#include "Effekseer.SIMD.h"')
effekseerModulesHeader.addLine('')
effekseerModulesHeader.addLine('// A header to access internal data of effekseer')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Parameter/Effekseer.Parameters.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.SpriteRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.RibbonRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.RingRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.ModelRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.TrackRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.EffectLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.TextureLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/Model.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/ModelLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.MaterialLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/Model.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.Curve.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.CurveLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Sound/Effekseer.SoundPlayer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.SoundLoader.h')
effekseerModulesHeader.output('Effekseer/Effekseer.Modules.h')
effekseerRendererDX9Header = CreateHeader()
effekseerRendererDX9Header.readLines('EffekseerRendererDX9/EffekseerRenderer/EffekseerRendererDX9.Base.Pre.h')
effekseerRendererDX9Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX9Header.readLines('EffekseerRendererDX9/EffekseerRenderer/EffekseerRendererDX9.Renderer.h')
effekseerRendererDX9Header.output('EffekseerRendererDX9/EffekseerRendererDX9.h')
effekseerRendererDX11Header = CreateHeader()
effekseerRendererDX11Header.readLines('EffekseerRendererDX11/EffekseerRenderer/EffekseerRendererDX11.Base.Pre.h')
effekseerRendererDX11Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX11Header.readLines('EffekseerRendererDX11/EffekseerRenderer/EffekseerRendererDX11.Renderer.h')
effekseerRendererDX11Header.output('EffekseerRendererDX11/EffekseerRendererDX11.h')
effekseerRendererDX12Header = CreateHeader()
effekseerRendererDX12Header.readLines('EffekseerRendererDX12/EffekseerRenderer/EffekseerRendererDX12.Base.Pre.h')
effekseerRendererDX12Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX12Header.readLines('EffekseerRendererDX12/EffekseerRenderer/EffekseerRendererDX12.Renderer.h')
effekseerRendererDX12Header.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererDX12Header.output('EffekseerRendererDX12/EffekseerRendererDX12.h')
effekseerRendererVulkanHeader = CreateHeader()
effekseerRendererVulkanHeader.readLines('EffekseerRendererVulkan/EffekseerRenderer/EffekseerRendererVulkan.Base.Pre.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererVulkan/EffekseerRenderer/EffekseerRendererVulkan.Renderer.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererVulkanHeader.output('EffekseerRendererVulkan/EffekseerRendererVulkan.h')
effekseerRendererGLHeader = CreateHeader()
effekseerRendererGLHeader.readLines('EffekseerRendererGL/EffekseerRenderer/EffekseerRendererGL.Base.Pre.h')
effekseerRendererGLHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererGLHeader.readLines('EffekseerRendererGL/EffekseerRenderer/EffekseerRendererGL.Renderer.h')
effekseerRendererGLHeader.output('EffekseerRendererGL/EffekseerRendererGL.h')
effekseerRendererMetalHeader = CreateHeader()
effekseerRendererMetalHeader.readLines('EffekseerRendererMetal/EffekseerRenderer/EffekseerRendererMetal.Base.Pre.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererMetal/EffekseerRenderer/EffekseerRendererMetal.Renderer.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererMetalHeader.output('EffekseerRendererMetal/EffekseerRendererMetal.h')
|
nilq/baby-python
|
python
|
from __future__ import annotations
from typing import List, Tuple
import ujson
import os.path as path
import stargazing.pomodoro.pomodoro_controller as pomo_pc
CONFIG_FILE_PATH = f"{path.dirname(path.abspath(__file__))}/../config/settings.json"
def get_saved_youtube_player_urls() -> List[str]:
with open(CONFIG_FILE_PATH) as file:
data = ujson.load(file)
return data["saved_youtube_player_urls"]
def get_interval_times() -> List[List[int]]:
with open(CONFIG_FILE_PATH) as file:
data = ujson.load(file)
return data["interval_times"]
def get_last_session_data() -> Tuple[str, pomo_pc.PomodoroIntervalSettings, bool, int]:
with open(CONFIG_FILE_PATH) as file:
data = ujson.load(file)
return (data["last_project_name"], pomo_pc.PomodoroIntervalSettings(*data["last_interval_time"]),
data["last_autostart"], data["last_volume"])
def update_last_session_data(project_name: str, interval_settings: pomo_pc.PomodoroIntervalSettings,
autostart: bool, volume: int) -> None:
with open(CONFIG_FILE_PATH, 'r+') as file:
data = ujson.load(file)
data["last_project_name"] = project_name
data["last_interval_time"] = [
interval_settings.work_secs, interval_settings.break_secs]
data["last_autostart"] = autostart
data["last_volume"] = volume
file.seek(0)
ujson.dump(data, file, indent=4)
file.truncate()
|
nilq/baby-python
|
python
|
OUTPUT_ON = b'1'
OUTPUT_OFF = b'0'
OUTPUT_PULSE = b'P'
OUTPUT_CURRENT = b'O'
INPUT_DELTA = b'D'
INPUT_CURRENT = b'C'
TURNOUT_NORMAL = b'N'
TURNOUT_REVERSE = b'R'
IDENTIFY = b'Y'
SERVO_ANGLE = b'A'
SET_TURNOUT = b'T'
GET_TURNOUT = b'G'
CONFIG = b'F'
ACKNOWLEDGE = b'!'
STORE = b'W'
ERRORRESPONSE = b'E'
WARNINGRESPONSE = b'e'
def commandName(cmd):
if cmd == OUTPUT_ON:
return("OUTPUT_ON")
elif cmd == OUTPUT_OFF:
return("OUTPUT_OFF")
elif cmd == OUTPUT_PULSE:
return("OUTPUT_PULSE")
elif cmd == OUTPUT_CURRENT:
return("OUTPUT_CURRENT")
elif cmd == INPUT_DELTA:
return("INPUT_DELTA")
elif cmd == INPUT_CURRENT:
return("INPUT_CURRENT")
elif cmd == TURNOUT_NORMAL:
return("TURNOUT_NORMAL")
elif cmd == TURNOUT_REVERSE:
return("TURNOUT_REVERSE")
elif cmd == SERVO_ANGLE:
return("SERVO_ANGLE")
elif cmd == SET_TURNOUT:
return("SET_TURNOUT")
elif cmd == GET_TURNOUT:
return("GET_TURNOUT")
elif cmd == IDENTIFY:
return("IDENTIFY")
elif cmd == CONFIG:
return("CONFIG")
elif cmd == ACKNOWLEDGE:
return("ACKNOWLEDGE")
elif cmd == STORE:
return("STORE")
elif cmd == ERRORRESPONSE:
return("ERRORRESPONSE")
elif cmd == WARNINGRESPONSE:
return("WARNINGRESPONSE")
else:
return("UNKNOWN COMMAND: %s" % str(cmd))
|
nilq/baby-python
|
python
|
import tensorflow as tf
# Stolen from magenta/models/shared/events_rnn_graph
def make_rnn_cell(rnn_layer_sizes,
dropout_keep_prob=1.0,
attn_length=0,
base_cell=tf.contrib.rnn.BasicLSTMCell,
state_is_tuple=False):
cells = []
for num_units in rnn_layer_sizes:
cell = base_cell(num_units, state_is_tuple=state_is_tuple)
cell = tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob=dropout_keep_prob)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=state_is_tuple)
if attn_length:
cell = tf.contrib.rnn.AttentionCellWrapper(
cell, attn_length, state_is_tuple=state_is_tuple)
return cell
|
nilq/baby-python
|
python
|
from block import Block
from transaction import Transaction
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
import bitcoin
class BlockChain:
def __init__(self):
self.chain = []
self.tx_pool = []
self.bits = 2
self.reward = 50
genesis_block = Block(None, self.bits, [])
genesis_block.bits = self.bits
genesis_block.gen_hash()
self.chain.append(genesis_block)
def make_transaction(self, value, receiver_address, sender_address, sender_private_key):
try:
transaction = Transaction(self, value, receiver_address, sender_address, sender_private_key)
except Exception as e:
return str(e)
self.tx_pool.append(transaction)
return transaction.hash
def mining(self, miner_address):
tx_list = self.tx_pool
tx_list.insert(0, Transaction(self, self.reward, miner_address))
self.tx_pool = []
new_block = Block(self.chain[-1].hash, self.bits, tx_list)
try:
new_block.gen_hash()
except:
return '블록 생성 실패'
self.chain.append(new_block)
return new_block.hash
def get_utxo_list(self, address):
utxo_list = []
for block in self.chain:
for tx in block.transactions:
for i in range(len(tx.outputs)):
if address == tx.outputs[i].to:
utxo_list.append((tx.hash, i, tx.outputs[i].to, tx.outputs[i].value))
for i in range(len(tx.inputs)):
for utxo in utxo_list:
if tx.inputs[i].hash == utxo[0] and tx.inputs[i].n == utxo[1] and tx.inputs[i].address == utxo[2] and tx.inputs[i].value == utxo[3]:
utxo_list.remove((tx.inputs[i].hash, tx.inputs[i].n, tx.inputs[i].address, tx.inputs[i].value))
return utxo_list
def get_balance(self, address):
utxo_list = self.get_utxo_list(address)
balance = 0
for utxo in utxo_list:
balance += utxo[3]
return balance
def increase_bits(self):
self.bits += 1
def decrease_bits(self):
self.bits -= 1
|
nilq/baby-python
|
python
|
'''
Nombre de archivo:
+procesamientodatos.py
Descripción:
+Librería con funciones para el procesamiento de los datos
Métodos:
|--+cargar_datos
|--+generar_tablas
|--+almacenar_tablas
'''
#librerías necesarias
import sys, os, glob, datetime as dt
from pyspark.sql import SparkSession, functions as F, window as W, DataFrame as DF
from pyspark.sql.types import (DateType, IntegerType, FloatType, DoubleType, LongType, StringType, StructField, StructType, TimestampType)
from functools import reduce
#sesión de spark
spark = SparkSession.builder\
.master("local")\
.appName("App#1")\
.config('spark.ui.port', '4050')\
.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
#función para carga de datos (lista de archivos .json)
def cargar_datos(files=[]):
try:
#lectura de archivos .json
df1 = spark.read.json(files, multiLine=True)
#se realizan las transformaciones necesarias para obtener cada uno de los elementos del esquema
df1 = df1.withColumn('viajes', F.explode(F.col('viajes'))).select('identificador','viajes.*').orderBy('identificador')
df1.collect()
return [df1]
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)
#función para generar las tablas con los resultados de los datos procesados
def generar_tablas(df=[]):
try:
#se crean dataframes temporales que sirven como tablas intermedias para el filtrado y agregación de los datos
df1a = df[0].withColumnRenamed('codigo_postal_origen','codigo_postal').withColumn('tipo', F.lit('origen'))\
.groupBy('codigo_postal', 'tipo').agg(F.count('codigo_postal').alias('cantidad_viajes'), F.sum(F.col('kilometros')*F.col('precio_kilometro')).alias('ingresos'))
df1b = df[0].withColumnRenamed('codigo_postal_destino','codigo_postal').withColumn('tipo', F.lit('destino'))\
.groupBy('codigo_postal', 'tipo').agg(F.count('codigo_postal').alias('cantidad_viajes'), F.sum(F.col('kilometros')*F.col('precio_kilometro')).alias('ingresos'))
df1c = df[0].select('identificador', 'kilometros', 'precio_kilometro')\
.groupBy('identificador').agg(F.sum('kilometros').alias('cantidad_kms'), F.sum(F.col('kilometros')*F.col('precio_kilometro')).alias('ingresos'))
#tabla correspondiente a la cantidad de viajes por código postal
df2 = df1a.union(df1b).select('codigo_postal', 'tipo', 'cantidad_viajes').orderBy(F.col('codigo_postal'), F.col('tipo').desc())
#tabla correspondiente a los ingresos totales por código postal
df3 = df1a.union(df1b).select('codigo_postal', 'tipo', F.round('ingresos',2).alias('ingresos')).orderBy(F.col('codigo_postal'), F.col('tipo').desc())
#tabla correspondiente a la cantidad de kms e ingresos por identificador de conductor
df4 = df1c.select('identificador', F.round('cantidad_kms',2).alias('cantidad_kms'), F.round('ingresos',2).alias('ingresos')).orderBy(F.col('identificador'))
#tabla correspondiente a métricas particulares
data = [('persona_con_mas_kilometros', df4.groupBy('identificador').agg(F.max('cantidad_kms')).orderBy(F.col('max(cantidad_kms)').desc()).collect()[0][0]),\
('persona_con_mas_ingresos', df4.groupBy('identificador').agg(F.max('ingresos')).orderBy(F.col('max(ingresos)').desc()).collect()[0][0]),\
('percentil_25', df4.select(F.percentile_approx('ingresos', .25)).collect()[0][0]),\
('percentil_50', df4.select(F.percentile_approx('ingresos', .50)).collect()[0][0]),\
('percentil_75', df4.select(F.percentile_approx('ingresos', .75)).collect()[0][0]),\
('codigo_postal_origen_con_mas_ingresos', df1a.groupBy('codigo_postal').agg(F.max('ingresos')).orderBy(F.col('max(ingresos)').desc()).collect()[0][0]),\
('codigo_postal_destino_con_mas_ingresos', df1b.groupBy('codigo_postal').agg(F.max('ingresos')).orderBy(F.col('max(ingresos)').desc()).collect()[0][0])]
schema = StructType(\
[StructField('tipo_metrica',StringType()),
StructField('valor',StringType()),])
df5 = spark.createDataFrame(data, schema)
#se agregan los dataframes a una lista para la iteración
proceso = [df2, df3, df5]
#
if 'fecha' in df[0].columns: #código para tabla de métricas en Parte Extra (existe columna fecha)
window = W.Window.partitionBy('fecha')
dfe1a = df[0].withColumnRenamed('codigo_postal_origen','codigo_postal').withColumn('tipo', F.lit('origen'))\
.groupBy('codigo_postal', 'tipo', 'fecha').agg(F.count('codigo_postal').alias('cantidad_viajes'), F.sum(F.col('kilometros')*F.col('precio_kilometro')).alias('ingresos'))
dfe1b = df[0].withColumnRenamed('codigo_postal_destino','codigo_postal').withColumn('tipo', F.lit('destino'))\
.groupBy('codigo_postal', 'tipo', 'fecha').agg(F.count('codigo_postal').alias('cantidad_viajes'), F.sum(F.col('kilometros')*F.col('precio_kilometro')).alias('ingresos'))
dfe1c = df[0].select('identificador', 'kilometros', 'precio_kilometro', 'fecha')\
.groupBy('identificador', 'fecha').agg(F.sum('kilometros').alias('cantidad_kms'), F.sum(F.col('kilometros')*F.col('precio_kilometro')).alias('ingresos'))
#tabla correspondiente a la cantidad de viajes por código postal
dfe2 = dfe1a.union(dfe1b).select('codigo_postal', 'tipo', 'cantidad_viajes', 'fecha').orderBy(F.col('codigo_postal'), F.col('tipo').desc(), F.col('fecha'))
#tabla correspondiente a los ingresos totales por código postal
dfe3 = dfe1a.union(dfe1b).select('codigo_postal', 'tipo', F.round('ingresos',2).alias('ingresos'), 'fecha').orderBy(F.col('codigo_postal'), F.col('tipo').desc(), F.col('fecha'))
#tabla correspondiente a la cantidad de kms e ingresos por identificador de conductor
dfe4 = dfe1c.select('identificador', F.round('cantidad_kms',2).alias('cantidad_kms'), F.round('ingresos',2).alias('ingresos'), 'fecha').orderBy(F.col('identificador'), F.col('fecha'))
#tabla correspondiente a métricas particulares
met1 = dfe4.groupBy(F.lit('persona_con_mas_kilometros').alias('tipo_metrica'), 'fecha', F.col('identificador').alias('valor')).agg(F.max('cantidad_kms')).orderBy(F.col('max(cantidad_kms)').desc())\
.withColumn('row',F.row_number().over(W.Window.partitionBy('fecha').orderBy(F.col('fecha').desc()))).filter(F.col('row')<=1).drop('row').drop('max(cantidad_kms)').orderBy(F.col('fecha').desc())
met2 = dfe4.groupBy(F.lit('persona_con_mas_ingresos').alias('tipo_metrica'), 'fecha', F.col('identificador').alias('valor')).agg(F.max('ingresos')).orderBy(F.col('max(ingresos)').desc())\
.withColumn('row',F.row_number().over(W.Window.partitionBy('fecha').orderBy(F.col('fecha').desc()))).filter(F.col('row')<=1).drop('row').drop('max(ingresos)').orderBy(F.col('fecha').desc())
met3 = dfe4.groupBy(F.lit('percentil_25').alias('tipo_metrica'), 'fecha').agg(F.percentile_approx('ingresos', .25).alias('valor')).orderBy(F.col('fecha').desc())
met4 = dfe4.groupBy(F.lit('percentil_50').alias('tipo_metrica'), 'fecha').agg(F.percentile_approx('ingresos', .50).alias('valor')).orderBy(F.col('fecha').desc())
met5 = dfe4.groupBy(F.lit('percentil_75').alias('tipo_metrica'), 'fecha').agg(F.percentile_approx('ingresos', .75).alias('valor')).orderBy(F.col('fecha').desc())
met6 = dfe3.where('tipo like "origen"').groupBy(F.lit('codigo_postal_origen_con_mas_ingresos').alias('tipo_metrica'), 'fecha', F.col('codigo_postal').alias('valor')).agg(F.max('ingresos')).orderBy(F.col('max(ingresos)').desc())\
.withColumn('row',F.row_number().over(W.Window.partitionBy('fecha').orderBy(F.col('fecha').desc()))).filter(F.col('row')<=1).drop('row').drop('max(ingresos)').orderBy(F.col('fecha').desc())
met7 = dfe3.where('tipo like "destino"').groupBy(F.lit('codigo_postal_destino_con_mas_ingresos').alias('tipo_metrica'), 'fecha', F.col('codigo_postal').alias('valor')).agg(F.max('ingresos')).orderBy(F.col('max(ingresos)').desc())\
.withColumn('row',F.row_number().over(W.Window.partitionBy('fecha').orderBy(F.col('fecha').desc()))).filter(F.col('row')<=1).drop('row').drop('max(ingresos)').orderBy(F.col('fecha').desc())
dfe5 = reduce(DF.unionAll, [met1, met2, met3, met4, met5, met6, met7])
proceso.append(dfe5)
#
#por medio de las funciones list-map-lambda se ejecutan las operaciones iterando sobre los dataframes creados
list(map(lambda x: {x.printSchema(), x.show(50, truncate=False)}, proceso)) #se despliegan el esquema y los datos correspondientes a cada tabla
return proceso
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)
#función para almacenar los dataframes en formato .csv
def almacenar_tablas(df=[], files_name=[]):
try:
#escritura de los archivos
csv_files=[]
if (len(df)==len(files_name)):
#se ejecutan las operaciones de escritura iterando sobre cada objeto
list(map(lambda x, y: {x.write.csv(y, mode='overwrite')}, df, files_name))
#se ejecuta una función de comprobación, leyendo cada archivo creado
[csv_files.append(spark.read.csv(files_name[i])) for i in range(len(files_name))]
if csv_files: print('Tablas almacenadas: '+ str(files_name))
return csv_files
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, os.path.split(exc_tb.tb_frame.f_code.co_filename)[1], exc_tb.tb_lineno, exc_obj)
#
|
nilq/baby-python
|
python
|
"""VIC Emergency Incidents feed entry."""
from typing import Optional, Tuple
import logging
import re
from time import strptime
import calendar
from datetime import datetime
import pytz
from aio_geojson_client.feed_entry import FeedEntry
from geojson import Feature
from markdownify import markdownify
from .consts import ATTR_CATEGORY1, ATTR_CATEGORY2, ATTR_ID, \
ATTR_PUB_DATE, ATTR_SOURCE_TITLE, ATTR_SOURCE_ORG, ATTR_ESTA_ID, \
ATTR_RESOURCES, ATTRIBUTION, ATTR_SIZE, ATTR_SIZE_FMT, ATTR_LOCATION, \
ATTR_STATEWIDE, ATTR_TEXT, ATTR_STATUS, ATTR_TYPE, \
ATTR_WEBBODY, CUSTOM_ATTRIBUTE
_LOGGER = logging.getLogger(__name__)
class VICEmergencyIncidentsFeedEntry(FeedEntry):
"""VIC Emergency Incidents feed entry."""
def __init__(self,
home_coordinates: Tuple[float, float],
feature: Feature):
"""Initialise this service."""
super().__init__(home_coordinates, feature)
@property
def attribution(self) -> Optional[str]:
"""Return the attribution of this entry."""
return ATTRIBUTION
@property
def title(self) -> Optional[str]:
"""Return the attribution of this entry."""
return ATTR_SOURCE_TITLE
@property
def category1(self) -> str:
"""Return the category of this entry."""
return self._search_in_properties(ATTR_CATEGORY1)
@property
def category2(self) -> str:
"""Return the category of this entry."""
return self._search_in_properties(ATTR_CATEGORY2)
@property
def external_id(self) -> str:
"""Return the external id of this entry."""
return self._search_in_properties(ATTR_ID)
@property
def publication_date(self) -> datetime:
"""Return the publication date of this entry."""
publication_date = self._search_in_properties(ATTR_PUB_DATE)
if publication_date:
# Parse the date. Sometimes that have Z as the timezone, which isn't like by %z.
# This gets rids of any ms and the Z which then allows it to work.
if publication_date[-1] == 'Z':
date_struct = strptime(publication_date[:-5], "%Y-%m-%dT%H:%M:%S")
else:
date_struct = strptime(publication_date, "%Y-%m-%dT%H:%M:%S%z")
publication_date = datetime.fromtimestamp(calendar.timegm(date_struct), tz=pytz.utc)
return publication_date
@property
def description(self) -> str:
"""Return the description of this entry."""
return self._search_in_properties(ATTR_TEXT)
def _search_in_description(self, regexp):
"""Find a sub-string in the entry's description."""
if self.description:
match = re.search(regexp, self.description)
if match:
return match.group(CUSTOM_ATTRIBUTE)
return None
@property
def location(self) -> str:
"""Return the location of this entry."""
return self._search_in_properties(ATTR_LOCATION)
@property
def status(self) -> str:
"""Return the status of this entry."""
return self._search_in_properties(ATTR_STATUS)
@property
def type(self) -> str:
"""Return the type of this entry."""
return self._search_in_properties(ATTR_TYPE)
@property
def size(self) -> str:
"""Return the size of this entry."""
return self._search_in_properties(ATTR_SIZE)
@property
def size_fmt(self) -> str:
"""Return the size of this entry."""
return self._search_in_properties(ATTR_SIZE_FMT)
@property
def statewide(self) -> str:
"""Return the size of this entry."""
return self._search_in_properties(ATTR_STATEWIDE)
@property
def source_organisation(self) -> str:
"""Return the responsible agency of this entry."""
return self._search_in_properties(ATTR_SOURCE_ORG)
@property
def source_organisation_title(self) -> str:
"""Return the responsible agency of this entry."""
return self._search_in_properties(ATTR_SOURCE_TITLE)
@property
def resources(self) -> str:
"""Return the responsible agency of this entry."""
return self._search_in_properties(ATTR_RESOURCES)
@property
def etsa_id(self) -> str:
"""Return the responsible agency of this entry."""
return self._search_in_properties(ATTR_ESTA_ID)
@property
def advice_html(self) -> str:
"""Return the responsible agency of this entry."""
return self._search_in_properties(ATTR_WEBBODY)
@property
def advice_markdown(self) -> str:
"""Return the responsible agency of this entry."""
if self._search_in_properties(ATTR_WEBBODY) is None:
return None
return markdownify(self._search_in_properties(ATTR_WEBBODY))
|
nilq/baby-python
|
python
|
import networkx as nx
from . import utils
# ===== asexual lineage metrics =====
def get_asexual_lineage_length(lineage):
"""Get asexual lineage length.
Will check that given lineage is an asexual lineage.
Args:
lineage (networkx.DiGraph): an asexual lineage
Returns:
length (int) of given lineage
"""
if not utils.is_asexual_lineage(lineage): raise Exception("the given lineage is not an asexual lineage")
return len(lineage.nodes)
def get_asexual_lineage_num_discrete_state_changes(lineage, attribute_list):
"""Get the number of discrete state changes from an asexual lineage.
State is described by the aggregation of all attributes give by attribute list.
Args:
lineage (networkx.DiGraph): an asexual lineage
attribute_list (list): list of attributes (strings) to use when defining
a state
Returns:
Returns the number of discrete states along the lineage.
"""
# Check that lineage is an asexual lineage.
if not utils.is_asexual_lineage(lineage): raise Exception("the given lineage is not an asexual lineage")
# Check that all nodes have all given attributes in the attribute list
if not utils.all_taxa_have_attributes(lineage, attribute_list): raise Exception("given attributes are not universal among all taxa along the lineage")
# get the first state (root node)
lineage_id = utils.get_root_ids(lineage)[0]
num_states = 1
cur_state = [lineage.nodes[lineage_id][attr] for attr in attribute_list]
# count the number of state changes moving down the lineage
while True:
successor_ids = list(lineage.successors(lineage_id))
if len(successor_ids) == 0: break # We've hit the last thing!
lineage_id = successor_ids[0]
state = [lineage.nodes[lineage_id][attr] for attr in attribute_list]
if cur_state != state:
cur_state = state
num_states += 1
return num_states
def get_asexual_lineage_num_discrete_unique_states(lineage, attribute_list):
"""Get the number of discrete unique states along a lineage where what it
means to be a state is defined by attribute_list.
Args:
lineage (networkx.DiGraph): an asexual lineage
attribute_list (list): list of attributes (strings) to use when defining
a state
Returns:
The number of discrete unique states found along the lineage.
"""
# Check that lineage is an asexual lineage.
if not utils.is_asexual_lineage(lineage): raise Exception("the given lineage is not an asexual lineage")
# Check that all nodes have all given attributes in the attribute list
if not utils.all_taxa_have_attributes(lineage, attribute_list): raise Exception("given attributes are not universal among all taxa along the lineage")
# get the first state (root node)
lineage_id = utils.get_root_ids(lineage)[0]
unique_states = set()
unique_states.add(tuple([lineage.nodes[lineage_id][attr] for attr in attribute_list]))
while True:
successor_ids = list(lineage.successors(lineage_id))
if len(successor_ids) == 0: break # We've hit the last thing!
lineage_id = successor_ids[0]
unique_states.add(tuple([lineage.nodes[lineage_id][attr] for attr in attribute_list]))
return len(unique_states)
def get_asexual_lineage_mutation_accumulation(lineage, mutation_attributes, skip_root=False):
"""Get the distribution of mutation type accumulations over an asexual lineage.
Args:
lineage (networkx.DiGraph): an asexual lineage
mutation_attributes (list of str): what are the mutation count attributes
that we should accumulate over the lineage?
skip_root (bool): Should we include root node mutation count values in
our accumlation? Defaults to false.
Returns:
A dictionary indexed by mutation types (mutation_attributes) where each
value in the dictionary is the sum of that type of mutation along the lineage.
"""
# Check that lineage is an asexual lineage.
if not utils.is_asexual_lineage(lineage): raise Exception("the given lineage is not an asexual lineage")
# Check that all nodes have all given attributes in the attribute list
if not utils.all_taxa_have_attributes(lineage, mutation_attributes): raise Exception("given mutation attributes are not universal among all taxa along the lineage")
# initialize
mut_accumulators = {mut_attr:0 for mut_attr in mutation_attributes}
# get the root node
lineage_id = utils.get_root_ids(lineage)[0]
if not skip_root:
for mut_attr in mutation_attributes:
mut_accumulators[mut_attr] += lineage.nodes[lineage_id][mut_attr]
while True:
successor_ids = list(lineage.successors(lineage_id))
if len(successor_ids) == 0: break # We've hit the last thing!
# Is this a new state or a member of the current state?
lineage_id = successor_ids[0]
for mut_attr in mutation_attributes:
mut_accumulators[mut_attr] += lineage.nodes[lineage_id][mut_attr]
return mut_accumulators
# ===== asexual phylogeny metrics =====
def get_mrca_tree_depth_asexual(phylogeny, ids=None):
"""Get the tree depth of the most recent common ancestor shared by the specified
taxa ids (ids) in an asexual phylogeny (phylogeny).
"""
# Get the id of the most recent common ancestor
mrca_id = utils.get_mrca_id_asexual(phylogeny, ids)
if mrca_id == -1: raise Exception("phylogeny has no common ancestor")
# Calculate distance from root to mrca
cur_id = mrca_id
depth = 0
while True:
ancestor_ids = list(phylogeny.predecessors(cur_id))
if len(ancestor_ids) == 0: break
depth+=1
cur_id = ancestor_ids[0]
return depth
# ===== phylogenetic richness =====
def calc_phylogenetic_diversity_asexual(phylogeny, ids=None):
"""Calculate phylogenetic diversity (i.e., the number of nodes in the minimum
spanning tree from the MRCA to all extant taxa). Currently only for asexual
phylogenies.
(Faith, 1992)
ids gives the set we want to calculate phylogenetic diversity on. i.e.,
we'll get the mrca for those ids and compute the minimum spanning tree
none defaults to including all leaf nodes
"""
# if given no ids, default to leaf taxa; otherwise, validate given ids
if ids == None:
# Find MRCA on leaf nodes
ids = utils.get_leaf_taxa_ids(phylogeny)
# (1) get the mrca
mrca_id = utils.get_mrca_id_asexual(phylogeny, ids)
if mrca_id == -1: raise Exception("given ids have no common ancestor")
# (2) collect paths from each id to mrca
canopy = set([i for i in ids] + [mrca_id])
for i in ids:
cur_id = i
while True:
ancestor_ids = list(phylogeny.predecessors(cur_id))
if len(ancestor_ids) == 0: break
cur_id = ancestor_ids[0]
# If we've encountered this path before, we can skip the rest because
# we're guaranteed an asexual phylogeny.
if cur_id in canopy: break
canopy.add(cur_id)
# Build a subgraph with only the canopy
canopy_phylo = nx.subgraph(phylogeny, list(canopy))
# Okay, now we can compute the minimum spanning tree.
return len(nx.minimum_spanning_tree(canopy_phylo.to_undirected()).nodes)
|
nilq/baby-python
|
python
|
import tqdm
from multiprocessing import Pool
import logging
from dsrt.config.defaults import DataConfig
class Filter:
def __init__(self, properties, parallel=True, config=DataConfig()):
self.properties = properties
self.config = config
self.parallel = parallel
self.init_logger()
def init_logger(self):
self.logger = logging.getLogger()
self.logger.setLevel(self.config['logging-level'])
def transform(self, dialogues):
chunksize=self.config['chunksize']
p = Pool() if self.parallel else Pool(1)
if self.config['filter-long-dialogues']:
self.max_dl = self.config['max-dialogue-length']
self.log('info', 'Filtering long dialogues (> {} utterances) ...'.format(self.max_dl))
res = []
total = len(dialogues)
self.log('info', '[filter running on {} cores]'.format(p._processes))
for d in tqdm.tqdm(p.imap(self.filter_long_dialogues, dialogues, chunksize=chunksize), total=total):
res.append(d)
dialogues = list(filter(None, res))
if self.config['filter-dialogues-with-long-utterances']:
self.max_ul = self.config['max-utterance-length']
self.log('info', 'Filtering dialogues with long utterances (> {} tokens) ...'.format(self.max_ul))
res = []
total = len(dialogues)
self.log('info', '[filter running on {} cores]'.format(p._processes))
for d in tqdm.tqdm(p.imap(self.filter_dialogues_with_long_utterances, dialogues, chunksize=chunksize), total=total):
res.append(d)
dialogues = list(filter(None, res))
p.close()
p.join()
return dialogues
def filter_long_dialogues(self, dialogue):
if len(dialogue) > self.max_dl:
return None
def filter_dialogues_with_long_utterances(self, dialogue):
for utterance in dialogue:
if len(utterance) > self.max_ul:
return None
return dialogue
####################
# UTILITIES #
####################
def log(self, priority, msg):
"""
Just a wrapper, for convenience.
NB1: priority may be set to one of:
- CRITICAL [50]
- ERROR [40]
- WARNING [30]
- INFO [20]
- DEBUG [10]
- NOTSET [0]
Anything else defaults to [20]
NB2: the levelmap is a defaultdict stored in Config; it maps priority
strings onto integers
"""
self.logger.log(logging.CRITICAL, msg)
|
nilq/baby-python
|
python
|
import os
import numba
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from optimizer import *
from trainer_callbacks import *
from utils import *
#%% #################################### Model Trainer Class ####################################
class ModelTrainer():
def __init__(self,
model=None,
Loaders=[None,[]],
metrics=None,
fold=None,
lr=None,
epochsTorun=None,
checkpoint_saving_path=None,
resume_train_from_checkpoint=False,
resume_checkpoint_path=None,
test_run_for_error=False,
batch_size=None,
do_grad_accum=False,
grad_accum_steps=4,
use_fp16=True,
problem_name=None
):
super(ModelTrainer, self).__init__()
self.problem_name = problem_name
self.model = model.cuda()
self.trainLoader = Loaders[0]
self.valLoader = Loaders[1]
self.info_bbx = store_info(metrics)
self.fold = fold
if self.fold != None:
self.checkpoint_saving_path = checkpoint_saving_path + '/fold' + str(self.fold) + '/'
else:
self.checkpoint_saving_path = checkpoint_saving_path + '/'
self.fold = 0
os.makedirs(self.checkpoint_saving_path,exist_ok=True)
self.lr = lr
self.epochsTorun = epochsTorun
self.init_epoch = -1
self.test_run_for_error = test_run_for_error
self.current_checkpoint_save_count = 1
self.resume_checkpoint_path = resume_checkpoint_path
self.best_loss = 9999
self.best_f1_score = -9999
self.best_rmse = 9999
self.batch_size = batch_size
self.optimizer = Over9000(params=self.model.parameters(),lr=self.lr)
self.scheduler = ReduceLROnPlateau(self.optimizer, factor=0.5, mode='min', patience=5, verbose=True)
self.do_grad_accum = do_grad_accum
self.grad_accum_steps = grad_accum_steps
self.trainer_settings_dict = {
'do_grad_accum': self.do_grad_accum,
'grad_accum_steps':self.grad_accum_steps,
'epochsTorun':self.epochsTorun,
'lr':self.lr,
'batch_size':batch_size,
}
self.use_fp16 = use_fp16
self.scheduler_flag = 9999
self.criterion = RMSELoss().cuda()
self.criterion_2 = nn.CrossEntropyLoss().cuda()
self.scaler = torch.cuda.amp.GradScaler()
if resume_train_from_checkpoint:
if os.path.isfile(resume_checkpoint_path):
print("=> Loading checkpoint from '{}'".format(resume_checkpoint_path))
checkpoint_dict = torch.load(resume_checkpoint_path)
self.model.load_state_dict(checkpoint_dict['Model_state_dict'])
self.scheduler.load_state_dict(checkpoint_dict['Scheduler_state_dict'])
self.optimizer.load_state_dict(checkpoint_dict['Optimizer_state_dict'])
self.best_loss = checkpoint_dict['Best_val_loss']
self.best_f1_score = checkpoint_dict['Best_val_f1_score']
self.info_bbx.all_info = checkpoint_dict['All_info']
self.init_epoch = checkpoint_dict['Epoch']
print('Best Val loss is {}'.format(self.best_loss))
print('Best Val f1_score is {}'.format(self.best_f1_score))
print('Current val loss is {}'.format(checkpoint_dict['Current_val_Loss']))
print('Current val f1 score is {}'.format(checkpoint_dict['Current_val_f1_score']))
self.scheduler_flag = checkpoint_dict['Scheduler_flag']
del checkpoint_dict
torch.cuda.empty_cache()
else:
print("=> No checkpoint found at '{}' !".format(resume_checkpoint_path))
#%% train part starts here
def fit(self):
with TQDM() as pbar:
pbar.on_train_begin({'num_batches':len(self.trainLoader),'num_epoch':self.epochsTorun})
pbar.on_val_begin({'num_batches':len(self.valLoader),'num_epoch':self.epochsTorun})
self.train_metric_meter = Metric_Meter()
self.val_metric_meter = Metric_Meter()
for epoch in range(self.epochsTorun):
current_epoch_no = epoch+1
if current_epoch_no <= self.init_epoch:
continue
pbar.on_epoch_train_begin(self.fold,current_epoch_no)
self.info_bbx._init_new_epoch(current_epoch_no)
self.model.train()
torch.set_grad_enabled(True)
#self.optimizer.zero_grad()
self.train_metric_meter.reset()
self.val_metric_meter.reset()
for itera_no, data in enumerate(self.trainLoader):
pbar.on_train_batch_begin()
self.optimizer.zero_grad()
images, targets = data
images = images.cuda()
targets = targets.cuda()
with torch.cuda.amp.autocast():
out = self.model(images)
batch_loss = self.criterion(out['LOGITS'], targets[:,None]) + self.criterion_2(out['LOGITS_2'], targets.long())
self.scaler.scale(batch_loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
self.train_metric_meter.update(out['LOGITS'].clone(), targets, 'single')
self.info_bbx.update_train_info({'Loss':[(batch_loss.detach().item()),images.shape[0]]})
pbar.on_train_batch_end(logs=self.info_bbx.request_current_epoch_train_metric_info())
torch.cuda.empty_cache()
if self.test_run_for_error:
if itera_no==5:
break
#%% validation part starts here
f1_score, rmse = self.train_metric_meter.feedback()
self.info_bbx.update_train_info({'f1_score': f1_score, 'rmse': rmse})
pbar.on_epoch_train_end(self.info_bbx.request_current_epoch_train_metric_info())
pbar.on_epoch_val_begin(self.fold,current_epoch_no)
self.model.eval()
torch.set_grad_enabled(False)
with torch.no_grad():
for itera_no, data in enumerate(self.valLoader):
pbar.on_val_batch_begin()
images, targets = data
images = images.cuda()
targets = targets.cuda()
with torch.cuda.amp.autocast():
out = self.model(images)
batch_loss = self.criterion(out['LOGITS'], targets[:,None]) + self.criterion_2(out['LOGITS_2'], targets.long())
self.val_metric_meter.update(out['LOGITS'].clone(), targets, 'single')
self.info_bbx.update_val_info({'Loss':[(batch_loss.detach().item()),images.shape[0]]})
pbar.on_val_batch_end(logs=self.info_bbx.request_current_epoch_val_metric_info())
torch.cuda.empty_cache()
if self.test_run_for_error:
if itera_no==5:
break
f1_score, rmse = self.val_metric_meter.feedback()
self.info_bbx.update_val_info({'f1_score': f1_score, 'rmse': rmse})
pbar.on_epoch_val_end(self.info_bbx.request_current_epoch_val_metric_info())
#%% Update best parameters
if self.best_loss > self.info_bbx.get_info(current_epoch_no,'Loss','Val'):
print( ' Val Loss is improved from {:.4f} to {:.4f}! '.format(self.best_loss,self.info_bbx.get_info(current_epoch_no,'Loss','Val')) )
self.best_loss = self.info_bbx.get_info(current_epoch_no,'Loss','Val')
is_best_loss = True
else:
print( ' Val Loss is not improved from {:.4f}! '.format(self.best_loss))
is_best_loss = False
if self.best_f1_score < self.info_bbx.get_info(current_epoch_no,'f1_score','Val'):
print( ' Val f1 score is improved from {:.4f} to {:.4f}! '.format(self.best_f1_score,self.info_bbx.get_info(current_epoch_no,'f1_score','Val')) )
self.best_f1_score = self.info_bbx.get_info(current_epoch_no,'f1_score','Val')
is_best_f1_score = True
else:
print( ' Val f1 score is not improved from {:.4f}! '.format(self.best_f1_score))
is_best_f1_score = False
#%%Learning Rate Schedulers
if is_best_loss or is_best_f1_score:
self.scheduler_flag = self.scheduler_flag - 1
self.scheduler.step(self.scheduler_flag)
else:
self.scheduler.step(self.scheduler_flag+1)
#%%checkpoint dict creation
checkpoint_dict = {
'Epoch': current_epoch_no,
'Model_state_dict': self.model.state_dict(),
'Current_val_Loss': self.info_bbx.get_info(current_epoch_no,'Loss','Val'),
'Current_train_Loss': self.info_bbx.get_info(current_epoch_no,'Loss','Train'),
'Current_val_f1_score':self.info_bbx.get_info(current_epoch_no,'f1_score','Val'),
'Current_train_f1_score':self.info_bbx.get_info(current_epoch_no,'f1_score','Train'),
'Current_val_rmse':self.info_bbx.get_info(current_epoch_no,'rmse','Val'),
'Current_train_rmse':self.info_bbx.get_info(current_epoch_no,'rmse','Train'),
'Best_val_loss' : self.best_loss,
'Best_val_f1_score': self.best_f1_score,
'Best_val_rmse': self.best_rmse,
}
#%%checkpoint dict saving
if is_best_f1_score:
torch.save(checkpoint_dict, self.checkpoint_saving_path+'checkpoint_best_f1_score_fold{}.pth'.format(self.fold))
del checkpoint_dict
torch.cuda.empty_cache()
|
nilq/baby-python
|
python
|
from backend.stage import ready_stage
from backend import message
from backend import helpers
class JobStage(ready_stage.ReadyStage):
stage_type = 'Job'
def __init__(self, game) -> None:
super().__init__(game)
self._job_selected = {} # facility selected indexed by player
@classmethod
def title(cls) -> str:
return "Select your Job"
### stage event handling ###
def begin(self):
# clear player jobs
for player in self.game.players:
if player.current_facility:
player.current_facility.leave(player)
super().begin()
def end(self):
super().end()
# update player jobs
for player in self.game.players:
# the client is responsible for checking that player has a job selected when clicking ready
assert player in self._job_selected
self._job_selected[player].join(player)
### action handling ###
def job_selected(self, sender, job: str) -> None:
if job is None:
# handle deselection
self._job_selected.pop(sender, None)
else:
# find facility using `job` string
try:
facility = self.game.facilities[job]
except KeyError:
raise message.InvalidArgumentError("job_selected called with invalid job %s" % job, method='job_selected', args=(job,))
else:
# change/add job selections
self._job_selected[sender] = facility
# notify all players of change
self._update_job_selections_to_all()
### convenience ###
@property
def _job_selections_id(self):
"""Return dictionary of player ids indexed by jobs"""
return dict((f.name, [p.id for p in players]) for f, players in helpers.invert(self._job_selected, codomain=self.game.facilities.values()).items())
def _update_job_selections_to_all(self) -> None:
for player in self.game.players:
player.update_job_selections(job_selections=self._job_selections_id)
### player handling ###
def handle_add_player(self, new_player) -> None:
super().handle_add_player(new_player)
# update new player's job selection
new_player.update_job_selections(job_selections=self._job_selections_id)
def handle_remove_player(self, player_removed) -> None:
super().handle_remove_player(player_removed)
# remove player from job selections
self._job_selected.pop(player_removed, None)
# update everyone else's job selection data
self._update_job_selections_to_all()
|
nilq/baby-python
|
python
|
""" Robot http server and interface handler
Approach to operations:
This http server module is conceptualized as a gateway between a robot,
with private, internal operations, and the web. Incoming requests for
actions to be executed by the robot and requests for information such as
telemetry data arrive via http post and get.
Requests posted for execution are forwarded to the robot process using
the robot's internal communications framework. (In general, a move toward
all json messaging is being considered.)
When the robot makes available information for consumption by web users, it
sends the information to this server. User retrieve the information in replies
to their posts or in replies to their get requests.
"""
__author__ = "Tal G. Ball"
__copyright__ = "Copyright (C) 2009-2020 Tal G. Ball"
__license__ = "Apache License, Version 2.0"
__version__ = "1.0"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import os
import ssl
import time
from time import time as robtimer
import json
import logging
import multiprocessing
import threading
import socket
from lbrsys.settings import robhttpLogFile, robhttpAddress, USE_SSL
from lbrsys import feedback
from robcom import robauth
proc = multiprocessing.current_process()
if proc.name == "Robot Http Service" :
logging.basicConfig (level=logging.DEBUG,filename = robhttpLogFile,
format='[%(levelname)s] (%(processName)-10s) %(message)s',)
class RobHTTPService(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
daemon_threads = True
def __init__(self, address, handler, receiveQ, sendQ):
HTTPServer.__init__(self, address, handler)
self.receiveQ = receiveQ
self.sendQ = sendQ
self.currentTelemetry = {'Ranges':{'Left':1,'Right':2,'Forward':3, 'Back':4, 'Bottom':5}}
self.newTelemetry = True
self.t0 = robtimer()
self.motors_powered = 0
self.telemetry_sent = 0
self.heartbeat_thread = None
self.heartbeat = False
self.dockSignal_state = {
'time_to_live': 3.0,
'left': 0.0, # timestamp of last left signal
'right': 0.0,
}
self.set_security_mode()
def check_dockSignal(self):
'''Monitor time to live for docksignals. Todo - generalize for any signals needing ttl'''
for signal in ['left', 'right']:
try:
state = self.currentTelemetry['dockSignal'][signal]
if state == 1:
if time.time() - self.dockSignal_state[signal] \
> self.dockSignal_state['time_to_live']:
# print("Clearing dockSignal: %s" % signal)
self.currentTelemetry['dockSignal'][signal] = 0
self.dockSignal_state[signal] = 0.0
except KeyError:
pass
return
def set_security_mode(self):
try:
if USE_SSL:
self.socket = ssl.wrap_socket(
self.socket,
server_side=True,
certfile=os.environ['ROBOT_CERT'],
keyfile=os.environ['ROBOT_KEY']
)
except Exception as e:
logging.error("Exception securing http server: {}".format(str(e)))
# todo simplify heartbeat management using threading.Timer
def set_heartbeat(self):
if self.motors_powered > 0 and not self.heartbeat:
self.heartbeat_thread = threading.Thread(target=self.check_heartbeat)
self.heartbeat_thread.start()
self.heartbeat = True
def check_heartbeat(self, pulse=2.0):
time.sleep(pulse)
self.heartbeat = False
if self.motors_powered > 0 and time.time() - self.telemetry_sent > pulse:
self.sendQ.put('/r/0/0')
self.motors_powered = 0
logging.debug("Heartbeat timeout - cutting motor power")
print("Hearbeat timeout - cutting motor power at %s" % time.asctime())
else:
# print('\ttelemetry age: %.3f' % (time.time() - self.telemetry_sent))
self.set_heartbeat()
def updateTelemetry(self):
"""Telemetry updater - run in a separate thread."""
while True:
self.check_dockSignal()
msg = self.receiveQ.get()
# print("Updating telemetry: {}".format(str(msg)))
self.receiveQ.task_done()
if msg == "Shutdown":
break
if type(msg) is feedback: # todo - reexamine and look at voltages
if type(msg.info) is dict:
for k, v in msg.info.items():
# for dockSignal updates, only replace the part of the telemetry
# provided by the current feedback message
# and note the time of the 1 signals to facilitate state /
# time to live management
if k == 'dockSignal':
if k not in self.currentTelemetry:
self.currentTelemetry[k] = {}
for signal in v.keys():
self.currentTelemetry[k][signal] = v[signal]
if signal == 'time':
continue
if v[signal] == 1:
self.dockSignal_state[signal] = v['time']
else:
# for all other updates, replace the entire telemetry entry
# with the current message
self.currentTelemetry[k] = v
else:
print("Please send telemetry feedback as dict: %s" % (msg.info))
self.newTelemetry = True
return
class RobHandler(BaseHTTPRequestHandler):
buffering = 1 # line buffering mode
http_log_file = open(robhttpLogFile, 'w', buffering)
def log_message(self, format, *args):
self.http_log_file.write("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
format % args))
def handle_power(self, msgD):
command = None
# todo msgD type checking
if 'heading' in msgD and msgD['heading'] != '':
command = "/h/%.1f" % float(msgD['heading'])
elif 'turn' in msgD and msgD['turn'] != '':
command = "/t/%.1f" % float(msgD['turn'])
elif 'level' in msgD and msgD['level'] != '':
# print("POWER msgD: {}".format(str(msgD)))
level = float(msgD['level'])
angle = float(msgD['angle'])
range = 0
sensor = 'Forward'
duration = 0
if 'range' in msgD and msgD['range'] != '':
range = int(msgD['range'])
if 'sensor' in msgD and msgD['sensor'] != '':
sensor = msgD['sensor']
if 'duration' in msgD and msgD['duration'] != '':
duration = int(msgD['duration'])
command = "/r/%.2f/%d/%d/%s/%d" % (level, angle, range, sensor, duration)
if msgD['level'] > 0:
self.server.motors_powered = time.time()
elif msgD['level'] == 0:
self.server.motors_powered = 0
if command is not None:
# print("\tSENDING: {}".format(command))
self.server.sendQ.put(command)
self.send_response(200)
else:
if 'speech' not in msgD:
self.send_response(400)
if 'speech' in msgD:
self.handle_say_noreply(msgD)
if command is None:
self.send_response(200)
if self.server.newTelemetry:
self.server.newTelemetry = False
# for now, always send telemetry
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
buffer = json.dumps(self.server.currentTelemetry).encode()
self.wfile.write(buffer)
if self.server.motors_powered > 0:
# todo track heartbeats on a per client basis, otherwise client 2 could accidentally keep alive client 1
self.server.telemetry_sent = time.time()
self.server.set_heartbeat()
if self.server.currentTelemetry == "Shutdown":
logging.info("Shutting down robot http gateway service.")
shutdownThread = threading.Thread(target=self.server.shutdown,
name="Shutdown Thread")
shutdownThread.start()
shutdownThread.join()
return
def handle_telemetry(self):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
buffer = json.dumps(self.server.currentTelemetry).encode()
# json.dump(buffer, self.wfile)
self.wfile.write(buffer)
self.server.telemetry_sent = time.time()
# print("GET path: %s" % self.path)
def handle_docksignal(self, msgD):
self.server.receiveQ.put(feedback(msgD))
self.send_response(204)
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
return
def handle_say_noreply(self, msgD):
try:
if 'text' in msgD['speech']:
speech_command = f"/s/{msgD['speech']['text']}"
else:
speech_command = f"/s/{msgD['speech']}"
except KeyError:
speech_command = f"/s/Bad speech post: {str(msgD)}"
except Exception as e:
speech_command = f"/s/Unexpected error in speech command: {str(msgD)}\n{str(e)}"
self.server.sendQ.put(speech_command)
return
def handle_say(self, msgD):
self.handle_say_noreply(msgD)
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
return
def is_user_authorized(self):
try:
# print(str(self.headers))
user = self.headers['User']
token_type, token = self.headers['Authorization'].split(':')
if token_type == 'TOK' and robauth.is_authorized(user, token):
return True
else:
raise Exception
except Exception as e:
logging.info("Failed authorization. Headers:\n%s\n%s" %
(str(self.headers), str(e)))
return False
def do_OPTIONS(self):
"""" Setup to support ajax queries from client"""
# print("Headers: %s" % str(self.headers))
self.send_response(200, 'ok')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'X-Requested-With, Content-type, User, Authorization')
self.end_headers()
return
def do_GET(self):
if self.path.startswith('/validate'):
logging.debug("/validate with headers %s" % str(self.headers))
if not self.is_user_authorized():
self.send_response(401)
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(b'Validation failed.\r\n')
print("Validation for %s failed" % self.headers['User'])
self.log_message("Validation for %s failed", self.headers['User'])
else:
print("Validation for %s succeeded" % self.headers['User'])
self.log_message("Validation for %s succeeded", self.headers['User'])
self.send_response(200, 'ok')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(b'Validation succeeded.\r\n')
return
if self.path.startswith('/telemetry'):
self.handle_telemetry()
return
self.send_response(404)
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(b'Service not available.\r\n')
return
def do_POST(self):
"""
post power, turn or heading json for operating the motors
post to /docksignal path to communicate receipt of docking signals
post replies:
200 - post reply contains telemetry data
204 - post reply is status only, i.e. no new data.
400 - bad post request, i.e. no power level provided (for now)
401 - authentication failure
"""
tpstart = time.time()
if not self.is_user_authorized():
self.send_response(401)
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
return
#assume json for now, one obj per line.
msgS = self.rfile.readline()
if type(msgS) is bytes:
msgS = msgS.decode()
msgD = json.loads(msgS)
if self.path == '/':
self.handle_power(msgD)
elif self.path == '/docksignal':
self.handle_docksignal(msgD)
elif self.path == '/say':
self.handle_say(msgD)
return
def startService(receiveQ, sendQ, addr=robhttpAddress):
server = RobHTTPService(addr, RobHandler, receiveQ, sendQ)
# server = RobHTTPService(('', 9145), RobHandler, receiveQ, sendQ)
telUpdateThread = threading.Thread(target=server.updateTelemetry,
name = "TelemetryUpdateThread")
logging.debug("Starting Telemetry Updater.")
telUpdateThread.start()
logging.debug("Starting robot http gateway service.")
server.serve_forever()
telUpdateThread.join()
# todo refactor this close
RobHandler.http_log_file.close()
if __name__ == '__main__':
sendQ = multiprocessing.JoinableQueue()
receiveQ = multiprocessing.JoinableQueue()
#address = robhttpAddress
p = multiprocessing.Process(target=startService,
args=(receiveQ, sendQ,
# ('',9145)),
('lbr2a.ballfamily.org',9145)),
#('127.0.0.1',9145)),
name='Robot Http Service')
p.start()
print("Service started..")
cn = 0
while True:
comm = sendQ.get()
sendQ.task_done()
print("%d - %s: %s" % (cn,time.asctime(), comm))
cn += 1
if cn >= 20:
receiveQ.put("Shutdown")
break
else:
receiveQ.put("[{'Return':(%d,%d,%d)}]" % (cn,cn,cn))
print("Joining Queues..")
sendQ.join()
receiveQ.join()
print("Done.")
print("Stopping service process..")
#p.join()
p.terminate()
print("Done.")
|
nilq/baby-python
|
python
|
"""
How plugins work
----------------
From a user's perspective, plugins are enabled and disabled through the command
line interface or through a UI. Users can also configure a plugin's behavior
through the main Kolibri interface.
.. note::
We have not yet written a configuration API, for now just make sure
configuration-related variables are kept in a central location of your
plugin.
It's up to the plugin to provide configuration ``Form`` classes and register
them.
We should aim for a configuration style in which data can be pre-seeded,
dumped and exported easily.
From a developer's perspective, plugins are Django applications listed
in ``INSTALLED_APPS`` and are initialized once when the server starts, mean at
the load time of the django project, i.e. Kolibri.
Loading a plugin
~~~~~~~~~~~~~~~~
In general, a plugin should **never** modify internals of Kolibri or other
plugins without using the hooks API or normal conventional Django scenarios.
.. note::
Each app in ``INSTALLED_APPS`` is searched for the special
``kolibri_plugin`` module.
Everything that a plugin does is expected to be defined through
``<myapp>/kolibri_plugin.py``.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import importlib
import logging
from django.conf.urls import include
from django.conf.urls import url
from .base import KolibriPluginBase
logger = logging.getLogger(__name__)
# : Main registry is private for now, as we figure out if there is any external
# : module that has a legitimate business
__registry = []
__initialized = False
def initialize(apps=None):
"""
Called once at load time to register hook callbacks.
"""
global __initialized, __registry
if not apps:
from django.conf import settings
apps = settings.INSTALLED_APPS
if not __initialized:
logger.debug("Loading kolibri plugin registry...")
for app in apps:
try:
# Handle AppConfig INSTALLED_APPS string
if ".apps." in app:
# remove .apps.Config line in string
import_string = app.split('.apps.')[0]
else:
import_string = app
import_string += ".kolibri_plugin"
plugin_module = importlib.import_module(import_string)
logger.debug("Loaded kolibri plugin: {}".format(app))
# Load a list of all class types in module
all_classes = [cls for cls in plugin_module.__dict__.values() if isinstance(cls, type)]
# Filter the list to only match the ones that belong to the module
# and not the ones that have been imported
plugin_package = plugin_module.__package__ if plugin_module.__package__ else \
plugin_module.__name__.rpartition('.')[0]
all_classes = filter(lambda x: plugin_package + ".kolibri_plugin" == x.__module__, all_classes)
plugin_classes = []
for Klass in all_classes:
if type(Klass) == type and issubclass(Klass, KolibriPluginBase):
plugin_classes.append(Klass)
for PluginClass in plugin_classes:
# Initialize the class, nothing more happens for now.
logger.debug("Initializing plugin: {}".format(PluginClass.__name__))
__registry.append(PluginClass())
except ImportError:
pass
__initialized = True
def get_urls():
global __initialized, __registry
assert __initialized, "Registry not initialized"
urlpatterns = []
for plugin_instance in __registry:
url_module = plugin_instance.url_module()
if url_module:
urlpatterns.append(
url(
plugin_instance.url_slug(),
include(
url_module,
namespace=plugin_instance.url_namespace()
)
)
)
return urlpatterns
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Base classes for Models.
"""
import typing as tp
from uuid import UUID
ModelType = tp.TypeVar("ModelType", bound='ModelBase')
class Model(tp.Protocol):
"""
Interface for base model class.
"""
uid: tp.Optional[UUID]
class ModelBase(object):
"""
Model storage ultimate base class.
"""
def __init__(self, *args: tp.Any, **kwargs: tp.Any) -> None:
self.uid: tp.Optional[UUID] = kwargs.pop('uid', None)
return super().__init__()
def __repr__(self) -> str:
return f"{self.__class__.__name__}(uid={self.uid!r})"
|
nilq/baby-python
|
python
|
# pylint doesn't know about pytest fixtures
# pylint: disable=unused-argument
import datetime
import os
import time
import uuid
import boto3
import pytest
from dagster_k8s.test import wait_for_job_and_get_raw_logs
from dagster_k8s_test_infra.integration_utils import (
can_terminate_run_over_graphql,
image_pull_policy,
launch_run_over_graphql,
terminate_run_over_graphql,
)
from dagster_test.test_project import cleanup_memoized_results, get_test_project_environments_path
from dagster_test.test_project.test_pipelines.repo import define_memoization_pipeline
from dagster import DagsterEventType
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.storage.tags import DOCKER_IMAGE_TAG
from dagster.utils.merger import deep_merge_dicts, merge_dicts
from dagster.utils.yaml_utils import merge_yamls
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
def get_celery_engine_config(dagster_docker_image, job_namespace):
return {
"execution": {
"celery-k8s": {
"config": merge_dicts(
(
{
"job_image": dagster_docker_image,
}
if dagster_docker_image
else {}
),
{
"job_namespace": job_namespace,
"image_pull_policy": image_pull_policy(),
},
)
}
},
}
def get_celery_job_engine_config(
dagster_docker_image, job_namespace, include_dagster_pipeline_env=False
):
return {
"execution": {
"config": merge_dicts(
(
{
"job_image": dagster_docker_image,
}
if dagster_docker_image
else {}
),
{
"job_namespace": job_namespace,
"image_pull_policy": image_pull_policy(),
},
(
{"env_config_maps": ["dagster-pipeline-env"]}
if include_dagster_pipeline_env
else {}
),
)
},
}
def test_execute_on_celery_k8s_default( # pylint: disable=redefined-outer-name
dagster_docker_image,
dagster_instance,
helm_namespace,
dagit_url,
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
run_id = launch_run_over_graphql(
dagit_url, run_config=run_config, pipeline_name="demo_pipeline_celery"
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance.get_run_by_id(run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == dagster_docker_image
def test_execute_on_celery_k8s_job_api( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace, dagit_url
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_job_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
run_id = launch_run_over_graphql(
dagit_url, run_config=run_config, pipeline_name="demo_job_celery"
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance.get_run_by_id(run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == dagster_docker_image
def test_execute_on_celery_k8s_job_api_with_legacy_configmap_set( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace, dagit_url
):
# Originally, jobs needed to include "dagster-pipeline-env" to pick up needed config when
# using the helm chart - it's no longer needed, but verify that nothing breaks if it's included
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_job_engine_config(
dagster_docker_image=dagster_docker_image,
job_namespace=helm_namespace,
include_dagster_pipeline_env=True,
),
)
run_id = launch_run_over_graphql(
dagit_url, run_config=run_config, pipeline_name="demo_job_celery"
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance.get_run_by_id(run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == dagster_docker_image
def test_execute_on_celery_k8s_image_from_origin( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace, dagit_url
):
# Like the previous test, but the image is found from the pipeline origin
# rather than the executor config
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(dagster_docker_image=None, job_namespace=helm_namespace),
)
run_id = launch_run_over_graphql(
dagit_url, run_config=run_config, pipeline_name="demo_pipeline_celery"
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance.get_run_by_id(run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == dagster_docker_image
def test_execute_subset_on_celery_k8s( # pylint: disable=redefined-outer-name
dagster_docker_image, helm_namespace, dagit_url
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_subset.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
run_id = launch_run_over_graphql(
dagit_url,
run_config=run_config,
pipeline_name="demo_pipeline_celery",
solid_selection=["count_letters"],
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
def test_execute_on_celery_k8s_retry_pipeline( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace, dagit_url
):
run_config = merge_dicts(
merge_yamls([os.path.join(get_test_project_environments_path(), "env_s3.yaml")]),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
run_id = launch_run_over_graphql(
dagit_url, run_config=run_config, pipeline_name="retry_pipeline"
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
stats = dagster_instance.get_run_stats(run_id)
assert stats.steps_succeeded == 1
assert DagsterEventType.STEP_START in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_UP_FOR_RETRY in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_RESTARTED in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_SUCCESS in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run_id)
if event.is_dagster_event
]
def test_execute_on_celery_k8s_with_resource_requirements( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace, dagit_url
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
run_id = launch_run_over_graphql(
dagit_url, run_config=run_config, pipeline_name="resources_limit_pipeline"
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
def _test_termination(dagit_url, dagster_instance, run_config):
run_id = launch_run_over_graphql(
dagit_url, run_config=run_config, pipeline_name="resource_pipeline"
)
# Wait for pipeline run to start
timeout = datetime.timedelta(0, 120)
start_time = datetime.datetime.now()
while True:
assert datetime.datetime.now() < start_time + timeout, "Timed out waiting for can_terminate"
pipeline_run = dagster_instance.get_run_by_id(run_id)
if can_terminate_run_over_graphql(dagit_url, run_id):
break
time.sleep(5)
# Wait for step to start
step_start_found = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
event_records = dagster_instance.all_logs(run_id)
for event_record in event_records:
if (
event_record.dagster_event
and event_record.dagster_event.event_type == DagsterEventType.STEP_START
):
step_start_found = True
break
if step_start_found:
break
time.sleep(5)
assert step_start_found
# Terminate run
assert can_terminate_run_over_graphql(dagit_url, run_id=run_id)
terminate_run_over_graphql(dagit_url, run_id=run_id)
# Check that pipeline run is marked as canceled
pipeline_run_status_canceled = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
pipeline_run = dagster_instance.get_run_by_id(run_id)
if pipeline_run.status == PipelineRunStatus.CANCELED:
pipeline_run_status_canceled = True
break
time.sleep(5)
assert pipeline_run_status_canceled
# Check that terminate cannot be called again
assert not can_terminate_run_over_graphql(dagit_url, run_id=run_id)
# Check for step failure and resource tear down
expected_events_found = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
step_failures_count = 0
resource_tear_down_count = 0
resource_init_count = 0
termination_request_count = 0
termination_success_count = 0
event_records = dagster_instance.all_logs(run_id)
for event_record in event_records:
if event_record.dagster_event:
if event_record.dagster_event.event_type == DagsterEventType.STEP_FAILURE:
step_failures_count += 1
elif event_record.dagster_event.event_type == DagsterEventType.PIPELINE_CANCELING:
termination_request_count += 1
elif event_record.dagster_event.event_type == DagsterEventType.PIPELINE_CANCELED:
termination_success_count += 1
elif event_record.message:
if "initializing s3_resource_with_context_manager" in event_record.message:
resource_init_count += 1
if "tearing down s3_resource_with_context_manager" in event_record.message:
resource_tear_down_count += 1
if (
step_failures_count == 1
and resource_init_count == 1
and resource_tear_down_count == 1
and termination_request_count == 1
and termination_success_count == 1
):
expected_events_found = True
break
time.sleep(5)
assert expected_events_found
s3 = boto3.resource("s3", region_name="us-west-1", use_ssl=True, endpoint_url=None).meta.client
bucket = "dagster-scratch-80542c2"
key = "resource_termination_test/{}".format(run_id)
assert s3.get_object(Bucket=bucket, Key=key)
def test_execute_on_celery_k8s_with_termination( # pylint: disable=redefined-outer-name
dagster_docker_image,
dagster_instance,
helm_namespace,
dagit_url,
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
_test_termination(dagit_url, dagster_instance, run_config)
@pytest.fixture(scope="function")
def set_dagster_k8s_pipeline_run_namespace_env(helm_namespace):
old_value = None
try:
old_value = os.getenv("DAGSTER_K8S_PIPELINE_RUN_NAMESPACE")
os.environ["DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"] = helm_namespace
yield
finally:
if old_value is not None:
os.environ["DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"] = old_value
def test_execute_on_celery_k8s_with_env_var_and_termination( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, set_dagster_k8s_pipeline_run_namespace_env, dagit_url
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image,
job_namespace={"env": "DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"},
),
)
_test_termination(dagit_url, dagster_instance, run_config)
def test_execute_on_celery_k8s_with_hard_failure( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, set_dagster_k8s_pipeline_run_namespace_env, dagit_url
):
run_config = merge_dicts(
merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image,
job_namespace={"env": "DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"},
),
),
{"solids": {"hard_fail_or_0": {"config": {"fail": True}}}},
)
run_id = launch_run_over_graphql(dagit_url, run_config=run_config, pipeline_name="hard_failer")
# Check that pipeline run is marked as failed
pipeline_run_status_failure = False
start_time = datetime.datetime.now()
timeout = datetime.timedelta(0, 120)
while datetime.datetime.now() < start_time + timeout:
pipeline_run = dagster_instance.get_run_by_id(run_id)
if pipeline_run.status == PipelineRunStatus.FAILURE:
pipeline_run_status_failure = True
break
time.sleep(5)
assert pipeline_run_status_failure
# Check for step failure for hard_fail_or_0.compute
start_time = datetime.datetime.now()
step_failure_found = False
while datetime.datetime.now() < start_time + timeout:
event_records = dagster_instance.all_logs(run_id)
for event_record in event_records:
if event_record.dagster_event:
if (
event_record.dagster_event.event_type == DagsterEventType.STEP_FAILURE
and event_record.dagster_event.step_key == "hard_fail_or_0"
):
step_failure_found = True
break
time.sleep(5)
assert step_failure_found
def _get_step_events(event_logs):
return [
event_log.dagster_event
for event_log in event_logs
if event_log.dagster_event is not None and event_log.dagster_event.is_step_event
]
def test_memoization_on_celery_k8s( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace, dagit_url
):
ephemeral_prefix = str(uuid.uuid4())
run_config = deep_merge_dicts(
merge_yamls([os.path.join(get_test_project_environments_path(), "env_s3.yaml")]),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
run_config = deep_merge_dicts(
run_config,
{"resources": {"io_manager": {"config": {"s3_prefix": ephemeral_prefix}}}},
)
try:
run_ids = []
for _ in range(2):
run_id = launch_run_over_graphql(
dagit_url,
run_config=run_config,
pipeline_name="memoization_pipeline",
mode="celery",
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
run_ids.append(run_id)
unmemoized_run_id = run_ids[0]
step_events = _get_step_events(dagster_instance.all_logs(unmemoized_run_id))
assert len(step_events) == 4
memoized_run_id = run_ids[1]
step_events = _get_step_events(dagster_instance.all_logs(memoized_run_id))
assert len(step_events) == 0
finally:
cleanup_memoized_results(
define_memoization_pipeline(), "celery", dagster_instance, run_config
)
@pytest.mark.integration
def test_volume_mounts(dagster_docker_image, dagster_instance, helm_namespace, dagit_url):
run_config = deep_merge_dicts(
merge_yamls([os.path.join(get_test_project_environments_path(), "env_s3.yaml")]),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
run_id = launch_run_over_graphql(
dagit_url,
run_config=run_config,
pipeline_name="volume_mount_pipeline",
mode="celery",
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
|
nilq/baby-python
|
python
|
from __future__ import annotations
import src.globe.hexasphere as hexasphere
if __name__ == "__main__":
hs = hexasphere.Hexsphere(50, 1, 0.8)
print(hs)
|
nilq/baby-python
|
python
|
import itertools
from matplotlib import cm
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from rllab.envs.base import Env
from rllab.misc import logger
from rllab.spaces import Box
from rllab.spaces import Discrete
from utils import flat_to_one_hot, np_seed
class DiscreteEnv(Env):
def __init__(self, transition_matrix, reward, init_state, terminate_on_reward=False):
super(DiscreteEnv, self).__init__()
dX, dA, dXX = transition_matrix.shape
self.nstates = dX
self.nactions = dA
self.transitions = transition_matrix
self.init_state = init_state
self.reward = reward
self.terminate_on_reward = terminate_on_reward
self.__observation_space = Box(0, 1, shape=(self.nstates,))
#max_A = 0
#for trans in self.transitions:
# max_A = max(max_A, len(self.transitions[trans]))
self.__action_space = Discrete(dA)
def reset(self):
self.cur_state = self.init_state
obs = flat_to_one_hot(self.cur_state, ndim=self.nstates)
return obs
def step(self, a):
transition_probs = self.transitions[self.cur_state, a]
next_state = np.random.choice(np.arange(self.nstates), p=transition_probs)
r = self.reward[self.cur_state, a, next_state]
self.cur_state = next_state
obs = flat_to_one_hot(self.cur_state, ndim=self.nstates)
done = False
if self.terminate_on_reward and r>0:
done = True
return obs, r, done, {}
def tabular_trans_distr(self, s, a):
return self.transitions[s, a]
def reward_fn(self, s, a):
return self.reward[s, a]
def log_diagnostics(self, paths):
#Ntraj = len(paths)
#acts = np.array([traj['actions'] for traj in paths])
obs = np.array([np.sum(traj['observations'], axis=0) for traj in paths])
state_count = np.sum(obs, axis=0)
#state_count = np.mean(state_count, axis=0)
state_freq = state_count/float(np.sum(state_count))
for state in range(self.nstates):
logger.record_tabular('AvgStateFreq%d'%state, state_freq[state])
@property
def transition_matrix(self):
return self.transitions
@property
def rew_matrix(self):
return self.reward
@property
def initial_state_distribution(self):
return flat_to_one_hot(self.init_state, ndim=self.nstates)
@property
def action_space(self):
return self.__action_space
@property
def observation_space(self):
return self.__observation_space
def random_env(Nstates, Nact, seed=None, terminate=False, t_sparsity=0.75):
assert Nstates >= 2
if seed is None:
seed = 0
reward_state=0
start_state=1
with np_seed(seed):
transition_matrix = np.random.rand(Nstates, Nact, Nstates)
transition_matrix = np.exp(transition_matrix)
for s in range(Nstates):
for a in range(Nact):
zero_idxs = np.random.randint(0, Nstates, size=int(Nstates*t_sparsity))
transition_matrix[s, a, zero_idxs] = 0.0
transition_matrix = transition_matrix/np.sum(transition_matrix, axis=2, keepdims=True)
reward = np.zeros((Nstates, Nact))
reward[reward_state, :] = 1.0
#reward = np.random.randn(Nstates,1 ) + reward
stable_action = seed % Nact #np.random.randint(0, Nact)
transition_matrix[reward_state, stable_action] = np.zeros(Nstates)
transition_matrix[reward_state, stable_action, reward_state] = 1
return DiscreteEnv(transition_matrix, reward=reward, init_state=start_state, terminate_on_reward=terminate)
if __name__ == '__main__':
env = random_env(5, 2, seed=0)
print(env.transitions)
print(env.transitions[0,0])
print(env.transitions[0,1])
env.reset()
for _ in range(100):
print(env.step(env.action_space.sample()))
|
nilq/baby-python
|
python
|
from distutils.core import setup
import setuptools
setup(
name="turkishnlp",
version="0.0.61",
packages=['turkishnlp'],
description="A python script that processes Turkish language",
long_description=open('README.md', encoding="utf8").read(),
long_description_content_type='text/markdown',
url="https://github.com/MeteHanC/turkishnlp",
author="Metehan Cetinkaya",
author_email="metehancet@gmail.com",
maintainer="Metehan Cetinkaya",
maintainer_email="metehancet@gmail.com",
keywords=['turkishnlp', 'python', 'nlp', 'language processing'],
classifiers=[
'Programming Language :: Python',
'Environment :: MacOS X',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
]
)
|
nilq/baby-python
|
python
|
"""Frontend for spectra group project"""
__author__ = """Group01"""
__version__ = '0.1.0'
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.