content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
import time from adafruit_servokit import ServoKit kit = ServoKit(channels=16) def mouth(action): if(action == 0): print("openMouth") kit.servo[0].angle = 40 if(action == 1): print("close") kit.servo[0].angle = 180 if(action == 2): print("talk") #kit.servo[0].angle = 180 while True: kit.servo[0].angle = 140 time.sleep(0.5) kit.servo[0].angle = 100 time.sleep(1) kit.servo[0].angle = 180 time.sleep(0.75) kit.servo[0].angle = 120 time.sleep(0.5) if(action == 3): print("munch") while True: #kit.servo[0].angle = 180 kit.continuous_servo[0].throttle = 1 time.sleep(1.25) kit.continuous_servo[0].throttle = -1 #time.sleep(1) #kit.servo[0].angle = 0 kit.continuous_servo[0].throttle = 0 time.sleep(1.25) elif((action < 0) or (action > 4)): print("No action.")
nilq/baby-python
python
""" Perceptual decision-making task, loosely based on the random dot motion task. """ import numpy as np from pycog import Model, RNN, tasktools #------------------------------------------------------------------------------- # Network structure #------------------------------------------------------------------------------- Nin = 2 N = 100 Nout = 2 # E/I ei, EXC, INH = tasktools.generate_ei(N) # Output connectivity: read out from excitatory units only Cout = np.zeros((Nout, N)) Cout[:,EXC] = 1 #------------------------------------------------------------------------------- # Task structure #------------------------------------------------------------------------------- cohs = [1, 2, 4, 8, 16] left_rights = [1, -1] nconditions = len(cohs)*len(left_rights) pcatch = 1/(nconditions + 1) SCALE = 3.2 def scale(coh): return (1 + SCALE*coh/100)/2 def generate_trial(rng, dt, params): #--------------------------------------------------------------------------- # Select task condition #--------------------------------------------------------------------------- if params.get('catch', rng.rand() < pcatch): catch_trial = True else: catch_trial = False coh = params.get('coh', rng.choice(cohs)) left_right = params.get('left_right', rng.choice(left_rights)) #--------------------------------------------------------------------------- # Epochs #--------------------------------------------------------------------------- if catch_trial: epochs = {'T': 2000} else: fixation = 100 stimulus = 800 decision = 300 T = fixation + stimulus + decision epochs = { 'fixation': (0, fixation), 'stimulus': (fixation, fixation + stimulus), 'decision': (fixation + stimulus, T) } epochs['T'] = T #--------------------------------------------------------------------------- # Trial info #--------------------------------------------------------------------------- t, e = tasktools.get_epochs_idx(dt, epochs) # Time, task epochs trial = {'t': t, 'epochs': epochs} # Trial if catch_trial: trial['info'] = {} else: # Correct choice if left_right > 0: choice = 0 else: choice = 1 # Trial info trial['info'] = {'coh': coh, 'left_right': left_right, 'choice': choice} #--------------------------------------------------------------------------- # Inputs #--------------------------------------------------------------------------- X = np.zeros((len(t), Nin)) if not catch_trial: X[e['stimulus'],choice] = scale(+coh) X[e['stimulus'],1-choice] = scale(-coh) trial['inputs'] = X #--------------------------------------------------------------------------- # Target output #--------------------------------------------------------------------------- if params.get('target_output', False): Y = np.zeros((len(t), Nout)) # Output M = np.zeros_like(Y) # Mask if catch_trial: Y[:] = 0.2 M[:] = 1 else: # Fixation Y[e['fixation'],:] = 0.2 # Decision Y[e['decision'],choice] = 1 Y[e['decision'],1-choice] = 0.2 # Only care about fixation and decision periods M[e['fixation']+e['decision'],:] = 1 # Outputs and mask trial['outputs'] = Y trial['mask'] = M #--------------------------------------------------------------------------- return trial # Performance measure: two-alternative forced choice performance = tasktools.performance_2afc # Terminate training when psychometric performance exceeds 85% def terminate(performance_history): return np.mean(performance_history[-5:]) > 85 # Validation dataset size n_validation = 100*(nconditions + 1) #/////////////////////////////////////////////////////////////////////////////// if __name__ == '__main__': # Train model model = Model(Nin=Nin, N=N, Nout=Nout, ei=ei, Cout=Cout, generate_trial=generate_trial, performance=performance, terminate=terminate, n_validation=n_validation) model.train('savefile.pkl') # Run the trained network with 16*3.2% = 51.2% coherence for choice 1 rnn = RNN('savefile.pkl', {'dt': 0.5}) trial_func = generate_trial trial_args = {'name': 'test', 'catch': False, 'coh': 16, 'left_right': 1} info = rnn.run(inputs=(trial_func, trial_args))
nilq/baby-python
python
from OpenGL.GL import * import threading import random import time class Block: """ Block * Base block class """ def __init__(self, name, renderer): """ Block.__init__ :name: name of the block :texture: texture of the block :parent: the parent window """ self.name = name self.renderer = renderer self.tex_coords = {} self.preloads = [] self.preloads_per_frame = 1 self.preloaded = 0 self.added_data = [] def preload(self, position, chunk, storage): """ preload * Preloads the textures of the block """ self.add(position, chunk, storage) def add(self, position, chunk, storage): """ add * Adds a block to the world :position: the position of the block """ x, y, z = position X, Y, Z = (x + 1, y + 1, z + 1) if not chunk.world.block_exists((x, Y, z)): storage.add((x, Y, Z, X, Y, Z, X, Y, z, x, Y, z), self.tex_coords["top"]) if not chunk.world.block_exists((x, y - 1, z)): storage.add((x, y, z, X, y, z, X, y, Z, x, y, Z), self.tex_coords["bottom"]) if not chunk.world.block_exists((x - 1, y, z)): storage.add((x, y, z, x, y, Z, x, Y, Z, x, Y, z), self.tex_coords["left"]) if not chunk.world.block_exists((X, y, z)): storage.add((X, y, Z, X, y, z, X, Y, z, X, Y, Z), self.tex_coords["right"]) if not chunk.world.block_exists((x, y, Z)): storage.add((x, y, Z, X, y, Z, X, Y, Z, x, Y, Z), self.tex_coords["front"]) if not chunk.world.block_exists((x, y, z - 1)): storage.add((X, y, z, x, y, z, x, Y, z, X, Y, z), self.tex_coords["back"]) def all_blocks(renderer): """ all_blocks * Returns a list of all blocks """ # List all files in the blocks folder # Then import each file as a module # Then get the block class from the module # Then add the block class to the dictionary import os import sys import importlib sys.path.append(os.path.dirname(os.path.abspath(__file__))) blocks = {} for file in os.listdir("./terrain/blocks"): if file.endswith(".py") and file != "__init__.py": module = importlib.import_module("blocks." + file[:-3]) _block = module.block(renderer) blocks[_block.name] = _block return blocks
nilq/baby-python
python
from django.conf import settings from .filebased import FileBackend from .s3 import S3Backend DEFAULT_CLASS = FileBackend def get_backend_class(): if settings.FILE_STORAGE_BACKEND == "s3": return S3Backend elif settings.FILE_STORAGE_BACKEND == "file": return FileBackend else: return DEFAULT_CLASS
nilq/baby-python
python
from setuptools import setup setup(name='data_loader', version='0.1', description='Hackathon data loader', url='https://github.com/snowch-labs/or60-ocado-ibm-hackathon', author='Chris Snow', author_email='chris.snow@uk.ibm.com', license='Apache 2.0', packages=['data_loader'], zip_safe=False)
nilq/baby-python
python
from json import loads from gtts import gTTS import urllib.request import time import random link = "http://suggestqueries.google.com/complete/search?client=firefox&q=" rap = "" def editLinkWithUserInput(link): magicWord = str(input("What do you want your starting words to be? ")) if " " in magicWord: magicWord = magicWord.replace(" ", "%20") return link + magicWord def editLink(link, results): index = random.randint(0,len(results))-1 print(index) wordChoice = results[index] if " " in wordChoice: wordChoice = wordChoice.replace(" ", "%20") return link + wordChoice, index editedLink = editLinkWithUserInput(link) while True: response = urllib.request.urlopen(editedLink) results = loads((response.read()).decode("utf-8"))[1] print(results) editedLink, indexOfPhrase = editLink(link, results) rap += " " + results[indexOfPhrase] print(rap) userInput = int(input("Do you want to continue or change the magic word? 0 for no, 1 yes, 2 for change? ")) if userInput == 0: break elif userInput == 1: editedLink, indexOfPhrase = editLink(link, results) elif userInput == 2: editedLink = editLinkWithUserInput(link) tts = gTTS(text=rap, lang='en', slow=False) tts.save("Google Rap.mp3")
nilq/baby-python
python
import sys import os cur = os.path.dirname(os.path.abspath(__file__)) sys.path.append(cur) sys.path.append(cur+"/..") sys.path.append(cur+"/../common") from SearchRepository import ISearchRepository import unittest from IQRServer import QRContext from IQRRepository import IQRRepository from search_service import * from TokenManager import MockTokenManager class MockSearchRepository(ISearchRepository, IQRRepository): def connect_repository(self, config): pass def get_full_author(self, id): if id == 1: return {'id': id} return None def get_full_series(self, id): if id == 1: return {'id': id} return None def get_full_book(self, id): if id == 1: return {'id': id} return None def get_filtered_books(self, filters: dict, offset=0, limit=100): return [{'id': 1}] def get_filtered_authors(self, filters: dict, offset=0, limit=100): return [{'id': 1}] def get_filtered_series(self, filters: dict, offset=0, limit=100): return [{'id': 1}] def create_context(json_data=dict(), params=dict(), headers=dict(), form=dict(), files=dict()): ctx = QRContext(json_data, params, headers, form, files, repository=MockSearchRepository()) ctx.add_manager(MockTokenManager.get_name(), MockTokenManager()) return ctx class TestGetBook(unittest.TestCase): def test_success(self): res = book(create_context(params={'id': 1})) self.assertEqual(200, res.status_code) self.assertEqual({'id': 1}, res.result) def test_not_found(self): res = book(create_context(params={'id': 2})) self.assertEqual(500, res.status_code) class TestGetSeries(unittest.TestCase): def test_success(self): res = series(create_context(params={'id': 1})) self.assertEqual(200, res.status_code) self.assertEqual({'id': 1}, res.result) def test_not_found(self): res = series(create_context(params={'id': 2})) self.assertEqual(500, res.status_code) class TestGetAuthor(unittest.TestCase): def test_success(self): res = author(create_context(params={'id': 1})) self.assertEqual(200, res.status_code) self.assertEqual({'id': 1}, res.result) def test_not_found(self): res = author(create_context(params={'id': 2})) self.assertEqual(500, res.status_code) class TestMain(unittest.TestCase): # todo add filters test def test_find_all(self): res = main(create_context(params={'find_book': True, 'find_series': True, 'find_author': True})) self.assertEqual(200, res.status_code) self.assertEqual(3, len(res.result)) def test_find_none(self): res = main(create_context(params={})) self.assertEqual(200, res.status_code) self.assertEqual(0, len(res.result))
nilq/baby-python
python
# =============================================================================== # Copyright 2015 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from envisage.ui.tasks.preferences_pane import PreferencesPane from traits.api import Str, Bool, Int from traitsui.api import View, Item, HGroup, VGroup from pychron.core.helpers.strtools import to_bool from pychron.core.pychron_traits import BorderVGroup from pychron.database.tasks.connection_preferences import ConnectionPreferences, ConnectionPreferencesPane, \ ConnectionFavoriteItem from pychron.envisage.tasks.base_preferences_helper import BasePreferencesHelper class DVCConnectionItem(ConnectionFavoriteItem): organization = Str meta_repo_name = Str meta_repo_dir = Str attributes = ('name', 'kind', 'username', 'host', 'dbname', 'password', 'enabled', 'default', 'path', 'organization', 'meta_repo_name', 'meta_repo_dir', 'timeout') def __init__(self, schema_identifier='', attrs=None, load_names=False): super(ConnectionFavoriteItem, self).__init__() self.schema_identifier = schema_identifier if attrs: attrs = attrs.split(',') try: (self.name, self.kind, self.username, self.host, self.dbname, self.password, enabled, default, path) = attrs except ValueError: try: (self.name, self.kind, self.username, self.host, self.dbname, self.password, enabled, default, self.path, self.organization, self.meta_repo_name, self.meta_repo_dir) = attrs except ValueError: (self.name, self.kind, self.username, self.host, self.dbname, self.password, enabled, default, self.path, self.organization, self.meta_repo_name, self.meta_repo_dir, timeout) = attrs self.timeout = int(timeout) self.enabled = to_bool(enabled) self.default = to_bool(default) if load_names: self.load_names() class DVCConnectionPreferences(ConnectionPreferences): preferences_path = 'pychron.dvc.connection' _adapter_klass = 'pychron.dvc.dvc_database.DVCDatabase' _schema_identifier = 'AnalysisTbl' _fav_klass = DVCConnectionItem class DVCConnectionPreferencesPane(ConnectionPreferencesPane): model_factory = DVCConnectionPreferences category = 'DVC' def traits_view(self): ev = View(Item('organization'), Item('meta_repo_name'), Item('meta_repo_dir')) fav_grp = self.get_fav_group(edit_view=ev) return View(fav_grp) class DVCPreferences(BasePreferencesHelper): preferences_path = 'pychron.dvc' use_cocktail_irradiation = Bool use_cache = Bool max_cache_size = Int update_currents_enabled = Bool use_auto_pull = Bool(True) class DVCPreferencesPane(PreferencesPane): model_factory = DVCPreferences category = 'DVC' def traits_view(self): v = View(VGroup(BorderVGroup(Item('use_cocktail_irradiation', tooltip='Use the special cocktail.json for defining the ' 'irradiation flux and chronology', label='Use Cocktail Irradiation')), BorderVGroup(Item('use_auto_pull', label='Auto Pull', tooltip='If selected, automatically ' 'update your version to the ' 'latest version. Deselect if ' 'you want to be asked to pull ' 'the official version.')), BorderVGroup(Item('update_currents_enabled', label='Enabled'), label='Current Values'), BorderVGroup(HGroup(Item('use_cache', label='Enabled'), Item('max_cache_size', label='Max Size')), label='Cache'))) return v class DVCExperimentPreferences(BasePreferencesHelper): preferences_path = 'pychron.dvc.experiment' use_dvc_persistence = Bool class DVCExperimentPreferencesPane(PreferencesPane): model_factory = DVCExperimentPreferences category = 'Experiment' def traits_view(self): v = View(BorderVGroup(Item('use_dvc_persistence', label='Use DVC Persistence'), label='DVC')) return v class DVCRepositoryPreferences(BasePreferencesHelper): preferences_path = 'pychron.dvc.repository' check_for_changes = Bool class DVCRepositoryPreferencesPane(PreferencesPane): model_factory = DVCRepositoryPreferences category = 'Repositories' def traits_view(self): v = View(BorderVGroup(Item('check_for_changes', label='Check for Changes'), label='')) return v # ============= EOF =============================================
nilq/baby-python
python
#!/usr/bin/python from __future__ import division, print_function import multiprocessing from subprocess import call import numpy as np import pandas as pd import numpy.linalg as linalg from math import sqrt import ld.ldscore as ld import ld.parse as ps from ldsc_thin import __filter_bim__ from scipy.stats import norm from collections import OrderedDict def nearest_Corr(input_mat): d, v = linalg.eigh(input_mat) A = (v * np.maximum(d, 0)).dot(v.T) A = (A + A.T) / 2 multiplier = 1 / np.sqrt(np.diag(A)) A = A * multiplier A = (A.T * multiplier).T return A def calLocalCov(i, partition, geno_array, coords, bps, gwas_snps, ld_scores, n1, n2, pheno_corr, pheno_corr_var): m = len(gwas_snps) CHR = partition.iloc[i, 0] START = partition.iloc[i, 1] END = partition.iloc[i, 2] idx = np.logical_and(np.logical_and(gwas_snps['CHR']==CHR, bps <= END), bps >= START) m0 = np.sum(idx) if m0 < 120: df = pd.DataFrame(OrderedDict({"chr":[], "start":[], "end":[], "rho":[], "corr":[], "h2_1":[], "h2_2":[], "var":[], "p":[], "m":[]})) return df tmp_coords = coords[idx] block_gwas_snps = gwas_snps[idx] block_ld_scores = ld_scores[idx] max_dist = 0.03 block_left = ld.getBlockLefts(tmp_coords, max_dist) lN, blockLD = geno_array.ldCorrVarBlocks(block_left, idx) lN = block_ld_scores["L2"] meanLD = np.mean(lN) local_LD = nearest_Corr(blockLD) d, v = linalg.eigh(local_LD) order = d.argsort()[::-1] d = d[order] v = v[:,order] if np.sum(d>0) < 120: df = pd.DataFrame(OrderedDict({"chr":[], "start":[], "end":[], "rho":[], "corr":[], "h2_1":[], "h2_2":[], "var":[], "p":[], "m":[]})) return df sub_d = d[d>0] sub_v = v[:,d>0] tz1 = np.dot(sub_v.T, block_gwas_snps['Z_x']) tz2 = np.dot(sub_v.T, block_gwas_snps['Z_y']) y = tz1 * tz2 - pheno_corr * sub_d Localh1 = (np.mean(block_gwas_snps['Z_x'] ** 2) - 1) / meanLD * m0 / n1 Localh2 = (np.mean(block_gwas_snps['Z_y'] ** 2) - 1) / meanLD * m0 / n2 Z_x = gwas_snps['Z_x'] Z_y = gwas_snps['Z_y'] h1 = (np.mean(Z_x ** 2) - 1) / np.mean(ld_scores['L2']) * m / n1 h2 = (np.mean(Z_y ** 2) - 1) / np.mean(ld_scores['L2']) * m / n2 wh1 = h1 * m0 / m wh2 = h2 * m0 / m #wh12 = np.max([Localh1, 0]) #wh22 = np.max([Localh2, 0]) #wh1 = (wh11 + wh12) / 2 #wh2 = (wh21 + wh22) / 2 Localrho = (np.sum(block_gwas_snps['Z_x'] * block_gwas_snps['Z_y']) - pheno_corr * m0) / meanLD / sqrt(n1 * n2) threshold = 1 cur_d = sub_d[sub_d>threshold] cur_y = y[sub_d>threshold] cur_dsq = cur_d ** 2 denominator = (wh1 * cur_d / m0 + 1 / n1) * (wh2 * cur_d / m0 + 1 / n2) cur_v1 = np.sum(cur_dsq / denominator) cur_v2 = np.sum(cur_y / sqrt(n1 * n2) / denominator) cur_v3 = np.sum(cur_y ** 2 / (n1 * n2) / (denominator * cur_dsq)) emp_var = [(cur_v3 - (cur_v2 ** 2) / cur_v1) / (cur_v1 * (len(cur_d) - 1))] theo_var = [1 / cur_v1] for K in range(len(cur_d), len(sub_d)): eig = sub_d[K] tmp_y = y[K] cur_v1 += eig ** 2 / ((wh1 * eig / m0 + 1 / n1) * (wh2 * eig / m0 + 1 / n2)) cur_v2 += tmp_y / sqrt(n1 * n2) / ((wh1 * eig / m0 + 1 / n1) * (wh2 * eig / m0 + 1 / n2)) cur_v3 += tmp_y ** 2 / (n1 * n2) / ((wh1 * eig ** 2 / m0 + eig / n1) * (wh2 * eig ** 2 / m0 + eig / n2)) emp_var.append((cur_v3 - (cur_v2 ** 2) / cur_v1) / (cur_v1 * K)) theo_var.append(1 / cur_v1) max_emp_theo = np.maximum(emp_var, theo_var) min_idx = np.argmin(max_emp_theo) y = y[:(len(cur_d)+min_idx-1)] sub_d = sub_d[:(len(cur_d)+min_idx-1)] sub_dsq = sub_d ** 2 var_rho = m0 ** 2 * min(max_emp_theo) q = (wh1 * sub_d / m0 + 1 / n1) * (wh2 * sub_d / m0 + 1 / n2) v4 = np.sum(sub_d/q)/np.sum(sub_dsq/q) var_phencorr = pheno_corr_var / (n1 * n2) * m0 ** 2 * v4 ** 2 var_rho += var_phencorr se_rho = sqrt(var_rho) p_value = norm.sf(abs(Localrho / se_rho)) * 2 if Localh1 < 0 or Localh2 < 0: corr = np.nan else: corr = Localrho / sqrt(Localh1 * Localh2) df = pd.DataFrame(OrderedDict({"chr":[CHR], "start":[START], "end":[END], "rho":[Localrho], "corr":[corr], "h2_1":[Localh1], "h2_2":[Localh2], "var":[var_rho], "p":[p_value], "m":[m0]})) return df def _supergnova(bfile, partition, thread, gwas_snps, ld_scores, n1, n2, pheno_corr, pheno_corr_var): m = len(gwas_snps) snp_file, snp_obj = bfile+'.bim', ps.PlinkBIMFile ind_file, ind_obj = bfile+'.fam', ps.PlinkFAMFile array_file, array_obj = bfile+'.bed', ld.PlinkBEDFile # read bim/snp array_snps = snp_obj(snp_file) chr_bfile = list(set(array_snps.df['CHR'])) tmp_partition = partition[partition.iloc[:,0].isin(chr_bfile)] tmp_gwas_snps = gwas_snps[gwas_snps.iloc[:,0].isin(chr_bfile)].reset_index(drop=True) tmp_ld_scores = ld_scores[ld_scores.iloc[:,0].isin(chr_bfile)].reset_index(drop=True) blockN = len(tmp_partition) # snp list annot_matrix, annot_colnames, keep_snps = None, None, None n_annot = 1 keep_snps = __filter_bim__(tmp_gwas_snps, array_snps) array_indivs = ind_obj(ind_file) n = len(array_indivs.IDList) keep_indivs = None ## reading genotype geno_array = array_obj(array_file, n, array_snps, keep_snps=keep_snps, keep_indivs=keep_indivs, mafMin=None) coords = np.array(array_snps.df['CM'])[geno_array.kept_snps] bps = np.array(array_snps.df['BP'])[geno_array.kept_snps] ## Calculating local genetic covariance results = [] def collect_results(result): results.append(result) pool = multiprocessing.Pool(processes = thread) for i in range(blockN): pool.apply_async(calLocalCov, args=(i, tmp_partition, geno_array, coords, bps, tmp_gwas_snps, tmp_ld_scores, n1, n2, pheno_corr, pheno_corr_var), callback=collect_results) pool.close() pool.join() df = pd.concat(results, ignore_index=True) #df = pd.DataFrame(results) #df.columns = ["chr", "start", "end", "rho", "corr", "h1", "h2", "var", "p", "m"] convert_dict = {"chr": int, "start": int, "end":int, "m":int} df = df.astype(convert_dict) return df def calculate(bfile, partition, thread, gwas_snps, ld_scores, n1, n2, pheno_corr, pheno_corr_var): if thread is None: thread = multiprocessing.cpu_count() print('{C} CPUs are detected. Using {C} threads in computation ... '.format(C=str(thread))) else: cpuNum = multiprocessing.cpu_count() thread = min(thread, cpuNum) print('{C} CPUs are detected. Using {N} threads in computation ... '.format(C=str(cpuNum), N=str(thread))) df = None if '@' in bfile: all_dfs = [] chrs = list(set(partition.iloc[:,0])) for i in range(len(chrs)): cur_bfile = bfile.replace('@', str(chrs[i])) all_dfs.append(_supergnova(cur_bfile, partition, thread, gwas_snps, ld_scores, n1, n2, pheno_corr, pheno_corr_var)) print('Computed local genetic covariance for chromosome {}'.format(chrs[i])) df = pd.concat(all_dfs, ignore_index=True) else: df = _supergnova(bfile, partition, thread, gwas_snps, ld_scores, n1, n2, pheno_corr, pheno_corr_var) return df
nilq/baby-python
python
file1=open("./protein1.pdb","r") file2=open("./protein2.pdb","r") from math import * model1=[] model2=[] for line in file1: line=line.rstrip() if "CA" in line: list1=line.split() model1.append([float(list1[6]),float(list1[7]),float(list1[8])]) for line in file2: line=line.rstrip() if "CA" in line: list2=line.split() model2.append([float(list2[5]),float(list2[6]),float(list2[7])]) #this first part generates two lists of lists, one for each file, #in which every element represents the coordinates of each alfa carbon def rmsd(a,b): """This function computes the RMSD of two sequences, given the two lists of alfa carbons coordinates""" Di=[] for i in range(len(a)): D=((a[i][0]-b[i][0])**2+(a[i][1]-b[i][1])**2+(a[i][2]-b[i][2])**2) Di.append(D) RMSD=sqrt(0.01*fsum(Di)) return RMSD print rmsd(model1,model2) file1.close() file2.close()
nilq/baby-python
python
""" 关键点解析 链表的基本操作(删除指定节点) 虚拟节点dummy 简化操作 其实设置dummy节点就是为了处理特殊位置(头节点),这这道题就是如果头节点是给定的需要删除的节点呢? 为了保证代码逻辑的一致性,即不需要为头节点特殊定制逻辑,才采用的虚拟节点。 如果连续两个节点都是要删除的节点,这个情况容易被忽略。 eg: // 只有下个节点不是要删除的节点才更新current if (!next || next.val !== val) { current = next; } """ """ Before writing any code, it's good to make a list of edge cases that we need to consider. This is so that we can be certain that we're not overlooking anything while coming up with our algorithm, and that we're testing all special cases when we're ready to test. These are the edge cases that I came up with. The linked list is empty, i.e. the head node is None. Multiple nodes with the target value in a row. The head node has the target value. The head node, and any number of nodes immediately after it have the target value. All of the nodes have the target value. The last node has the target value. """ # Definition for singly-linked list. class ListNode(object): def __init__(self, x): self.val = x self.next = None class Solution: def removeElements(self,head,val): dummy_head = ListNode(-1) dummy_head.next = head current_node = dummy_head while current_node.next != None: if current_node.next.val == val: current_node.next = current_node.next.next else: current_node = current_node.next return dummy_head.next def removeElements1(self,head:ListNode,val:int)->ListNode: prev = ListNode(0) prev.next = head cur = prev while cur.next: if cur.next.val == val: cur.next == cur.next.next else: cur = cur.next return prev.next
nilq/baby-python
python
""" Given two strings s and t, return true if t is an anagram of s, and false otherwise. An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase, typically using all the original letters exactly once. Example 1: Input: s = "anagram", t = "nagaram" Output: true Example 2: Input: s = "rat", t = "car" Output: false Constraints: - 1 <= s.length, t.length <= 5 * 10^4 - s and t consist of lowercase English letters. Follow up: What if the inputs contain Unicode characters? How would you adapt your solution to such a case? """ def solution(s: str, t: str) -> bool: return "".join(sorted(s)) == "".join(sorted(t))
nilq/baby-python
python
import redis rds=redis.StrictRedis('db', 6379)
nilq/baby-python
python
from django import forms from crispy_forms.helper import FormHelper from crispy_forms.layout import Submit from django.contrib.auth.forms import PasswordChangeForm class CreateProject(forms.Form): projectname = forms.SlugField(label="Enter project name", max_length=50, required=True) helper = FormHelper() helper.form_method = 'POST' helper.add_input(Submit('submit', 'Create Project')) helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default')) class DeleteProject(forms.Form): helper = FormHelper() helper.form_method = 'POST' helper.add_input(Submit('submit', 'Confirm')) helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default')) class CreatePipeline(forms.Form): pipelinename = forms.SlugField(label="Pipeline name", max_length=50, required=True) pipelineorder = forms.IntegerField(label="Order", required=True, min_value=1, max_value=900) pipelinefunction = forms.CharField(label="Pipeline function:", required=False, widget=forms.Textarea) helper = FormHelper() helper.form_tag = False class LinkGenerator(forms.Form): function = forms.CharField(label="Write your link generator function here:", required=False, widget=forms.Textarea) helper = FormHelper() helper.form_tag = False class Scraper(forms.Form): function = forms.CharField(label="Write your scraper function here:", required=False, widget=forms.Textarea) helper = FormHelper() helper.form_tag = False class ItemName(forms.Form): itemname = forms.SlugField(label="Enter item name", max_length=50, required=True) helper = FormHelper() helper.form_tag = False class FieldName(forms.Form): fieldname = forms.SlugField(label="Field 1", max_length=50, required=False) extra_field_count = forms.CharField(widget=forms.HiddenInput()) helper = FormHelper() helper.form_tag = False def __init__(self, *args, **kwargs): extra_fields = kwargs.pop('extra', 0) super(FieldName, self).__init__(*args, **kwargs) self.fields['extra_field_count'].initial = extra_fields for index in range(int(extra_fields)): # generate extra fields in the number specified via extra_fields self.fields['field_{index}'.format(index=index+2)] = forms.CharField(required=False) class ChangePass(PasswordChangeForm): helper = FormHelper() helper.form_method = 'POST' helper.add_input(Submit('submit', 'Change')) class Settings(forms.Form): settings = forms.CharField(required=False, widget=forms.Textarea) helper = FormHelper() helper.form_tag = False class ShareDB(forms.Form): username = forms.CharField(label="Enter the account name for the user with whom you want to share the database", max_length=150, required=True) helper = FormHelper() helper.form_method = 'POST' helper.add_input(Submit('submit', 'Share')) helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default')) class ShareProject(forms.Form): username = forms.CharField(label="Enter the account name for the user with whom you want to share the project", max_length=150, required=True) helper = FormHelper() helper.form_method = 'POST' helper.add_input(Submit('submit', 'Share')) helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default'))
nilq/baby-python
python
from JumpScale import j def cb(): from .HttpClient import HttpClient return HttpClient() j.base.loader.makeAvailable(j, 'clients') j.clients._register('http', cb)
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Assumes: Python 3 (>= 3.6) # selenium ($ pip install selenium) # ChromeDriver (http://chromedriver.chromium.org) # Chrome binary (> v61) # __author__ = "Adam Mikeal <adam@tamu.edu>" __version__ = "0.8" import os import sys import logging import subprocess from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from selenium.common.exceptions import NoSuchElementException # Module variables CHROME_PATH = '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome' DRIVER_PATH = 'bin/chromedriver' CHROME_MINVER = '61' DRIVER_MINVER = '2.4' LOG_LEVEL = logging.DEBUG DUO_TIMEOUT = 15 # Set up logging LOG = logging.getLogger('web_driver') log_handler = logging.StreamHandler() log_handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(levelname)s: %(message)s')) LOG.addHandler(log_handler) LOG.setLevel(LOG_LEVEL) class AuthenticatedWeb(object): TARGET_URL = None DRIVER = None AUTH_URL = 'https://cas.tamu.edu' def __init__(self, url, chrome_path=None, chrome_driver=None, auth_url=None, duo_timeout=None, log_level=None): # Set the log level first (if specified) if log_level: self.set_log_level(log_level) # store object variables self.TARGET_URL = url LOG.info(f"Using target URL: {self.TARGET_URL}") # Override the default binary paths if specified if chrome_path: self.CHROME_PATH = os.path.abspath(chrome_path) else: self.CHROME_PATH = os.path.abspath(CHROME_PATH) LOG.info(f"Using Chrome binary loaction: {self.CHROME_PATH}") if chrome_driver: self.DRIVER_PATH = os.path.abspath(chrome_driver) else: self.DRIVER_PATH = os.path.abspath(DRIVER_PATH) LOG.info(f"Using selenium driver location: {self.DRIVER_PATH}") # Override the default CAS URL if specified if auth_url: self.AUTH_URL = auth_url if duo_timeout: if isinstance(duo_timeout, int): LOG.info(f"DUO_TIMEOUT set to {duo_timeout} seconds") DUO_TIMEOUT = duo_timeout else: LOG.error(f"Unable to set DUO_TIMEOUT to specified value ('{duo_timeout}'); must be an integer. Using default value ({DUO_TIMEOUT})") # Test paths and binaries if not os.path.isfile(self.CHROME_PATH): LOG.error(f"No binary found at CHROME_PATH: {self.CHROME_PATH}") return None if not self._check_version(self.CHROME_PATH, CHROME_MINVER, version_index=2): LOG.error(f"Chrome version specified is too old: must be >{CHROME_MINVER}") return None if not os.path.isfile(self.DRIVER_PATH): LOG.error(f"No binary found at DRIVER_PATH: {self.DRIVER_PATH}") return None if not self._check_version(self.DRIVER_PATH, DRIVER_MINVER): LOG.error(f"Chrome driver specified is too old: must be >{DRIVER_MINVER}") return None # Prep the headless Chrome chrome_options = Options() chrome_options.add_argument("--headless") chrome_options.binary_location = self.CHROME_PATH self.DRIVER = webdriver.Chrome(executable_path=self.DRIVER_PATH, options=chrome_options) # # Attempt to get to the target site (expect CAS redirection) # https://selenium-python.readthedocs.io/api.html#selenium.webdriver.remote.webdriver.WebDriver # self.DRIVER.get(self.TARGET_URL) # Detect if CAS redirection happened if self.AUTH_URL in self.DRIVER.current_url: LOG.debug(f"Auth redirection detected; current URL: {self.DRIVER.current_url}") def __repr__(self): return f"Headless Chrome object for URL: {self.TARGET_URL} (currently at {self.DRIVER.current_url})" def __del__(self): # Close the connection to the headless browser (clean up resources) if self.DRIVER: LOG.debug("Calling close() on selenium driver...") self.DRIVER.close() def set_log_level(self, lvl): if not isinstance(lvl, int): LOG.error(f"Invalid log level: '{lvl}' (expects integer)") raise ValueError(f"Invalid log level: '{lvl}'") LOG_LEVEL = lvl LOG.setLevel(LOG_LEVEL) LOG.info(f"New log level set: {lvl} ({logging.getLevelName(lvl)})") def _check_version(self, binary_path, minimum_version, version_index=1, flag='--version'): try: # grab the version string by passing '--version' option to the binary output = subprocess.check_output(f"'{binary_path}' {flag}", shell=True) LOG.debug(f"Version output: {output.decode('utf-8')}") # split the output string into parts and grab the part specified by 'version_index' output_parts = output.decode('utf-8').split() LOG.debug(f"Version index: {version_index}; List element: '{output_parts[version_index]}'") # compare the version part to the 'minumum_version' string if output_parts[version_index] < minimum_version: return False else: return True except Exception as e: LOG.error(f"Unable to verify version for binary: {binary_path}") LOG.debug(f"{e.__class__.__name__}: {e}") return False def authenticate(self, netid, password, expect_duo=True): # Check for AUTH_URL and exit if not seen if self.AUTH_URL not in self.DRIVER.current_url: LOG.error(f"Unable to perform authentication (expected {self.AUTH_URL}; current_url={self.DRIVER.current_url} )") return False # Start the auth process LOG.info(f"Authenticating using NetID: {netid}") LOG.info(f"Authenticating using password: {password[0]}{'*'*(len(password)-2)}{password[-1]}") try: # Find the username field and enter the NetID u_fld = self.DRIVER.find_element_by_id("username") u_fld.clear() u_fld.send_keys(netid) u_fld.send_keys(Keys.RETURN) # Enter the password p_fld = self.DRIVER.find_element_by_id("password") p_fld.clear() p_fld.send_keys(password) p_fld.send_keys(Keys.RETURN) except NoSuchElementException as e: LOG.error(f"Unable to locate username or password field") LOG.debug(f"{e.__class__.__name__}: {e}") return False except Exception as e: LOG.error(f"Unable to access username or password field") LOG.debug(f"{e.__class__.__name__}: {e}") return False # return now if expect_duo is set to False if not expect_duo: LOG.debug(f"expect_duo=False; Not attempting 2FA") return True # Handle the Duo 2-factor auth try: # Enter the Duo iframe LOG.debug("Attempting to enter Duo <iframe> for 2FA") self.DRIVER.switch_to.frame(self.DRIVER.find_element_by_id("duo_iframe")) # Get the correct button and click it LOG.debug("Clicking button for default 2FA method (should be push notification)") button = self.DRIVER.find_element_by_xpath('//*[@id="auth_methods"]/fieldset[1]/div[1]/button') button.click() # Wait for the page to redirect LOG.info(f"Waiting {DUO_TIMEOUT} seconds for Duo 2FA...") WebDriverWait(self.DRIVER, DUO_TIMEOUT).until(EC.url_contains(self.TARGET_URL)) LOG.debug(f"Detected redirect to target URL ('{self.TARGET_URL}')") return True except Exception as e: LOG.error("Could not complete Duo 2FA process.") LOG.debug(f"{e.__class__.__name__}: {e}") return False def by_xpath(self, xpath_str, find_all=False): LOG.debug(f"Called by_xpath() using expression: '{xpath_str}'") if find_all: return self.DRIVER.find_elements_by_xpath(xpath_str) else: return self.DRIVER.find_element_by_xpath(xpath_str) def by_name(self, elem_name, find_all=False): LOG.debug(f"Called by_name() using string: '{elem_name}'") if find_all: return self.DRIVER.find_elements_by_name(elem_name) else: return self.DRIVER.find_element_by_name(elem_name) def by_id(self, elem_id): LOG.debug(f"Called by_id() using string: '{elem_id}'") return self.DRIVER.find_element_by_id(elem_id) def send_keys(self, keys): #TODO: Don't think this method is valid here LOG.debug(f"Called send_keys() using string: '{keys}'") return self.DRIVER.send_keys(keys) def go(self, url): LOG.debug(f"Called get() with url: '{url}'") return self.DRIVER.get(url)
nilq/baby-python
python
""" Azdevman Consts This module contains constant variables that will not change """ # Environment Variables AZDEVMAN_ENV_PREFIX = "AZDEVMAN_" # Azure Devops AZ_BASE_URL = "https://dev.azure.com/" AZ_DEFAULT_ORG = "ORGANIZATION" AZ_DEFAULT_PAT = "UEFUCg==" AZ_DEFAULT_PROJECT = "PROJECT" # Config file CONFIG_DIR = ".azdevman" CONFIG_FILE_NAME = "config.json" CONFIG_FILE_DEFAULT_PROFILE = "default" CONFIG_FILE_DEFAULT_CONTENT = { "CurrentContext": CONFIG_FILE_DEFAULT_PROFILE, "Profiles": { "default": { "Azure DevOps Organization": AZ_DEFAULT_ORG, "Personal Access Token": AZ_DEFAULT_PAT, "Project": AZ_DEFAULT_PROJECT } } } # Azure DevOps build definition AZ_DEFAULT_BUILD_DEF_PROCESS = { "phases": [ { "condition": "succeeded()", "jobAuthorizationScope": "projectCollection", "name": "Agent job 1", "refName": "Job_1", "target": { "allowScriptsAuthAccessOption": False, "executionOptions": { "type": 0 }, "type": 1 } } ], "type": 1 } AZ_DEFAULT_BUILD_DEF_QUEUE = { "id": 12, "name": "Hosted VS2017", "pool": { "id": 3, "is_hosted": True, "name": "Hosted VS2017" } } AZ_DEFAULT_BRANCH = "refs/heads/master"
nilq/baby-python
python
class JintaroException(Exception): """Base class for Jintaro exceptions""" class ConfigError(JintaroException): """Base class for config exceptions""" class UnknownOptionError(ConfigError): """""" class ConfigValueError(ConfigError): """""" class InputListError(JintaroException): """""" class OutputError(JintaroException): """""" class HookRunError(JintaroException): """"""
nilq/baby-python
python
#! /usr/bin/env python def read_file(): """Opens Project Euer Name file. Reads names, sorts and converts str into a list object""" a = open('names.txt', 'r') data = a.read() names = data.split(",") a.close() names.sort() return names def name_score(): """Calculates the total name score of each name in the sorted file """ names = read_file() total = 0 for i in xrange(len(names)): score = 0 for letter in names[i]: if letter != '"': score += (ord(letter) - 64) score = score * (i+1) total += score return total if __name__ == "__main__": print name_score()
nilq/baby-python
python
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from abc import ABCMeta, abstractmethod import numpy as np XYZ_ORDER = 0 ZYX_ORDER = 1 XYZT_ORDER = 2 TZYX_ORDER = 3 @six.add_metaclass(ABCMeta) class ChunkProcessor(object): def __init__(self): """ A class that implements a chunk processor which outputs ndarrays for uploading Args: """ self.parameters = None @abstractmethod def setup(self, parameters): """ Method to initialize the chunk processor based on custom parameters from the configuration file e.g. Connect to a database, etc. Args: parameters (dict): Parameters for the dataset to be processed Returns: None """ return NotImplemented @abstractmethod def process(self, file_path, x_index, y_index, z_index): """ Method to take a chunk indices and return an ndarray with the correct data Args: file_path(str): An absolute file path for the specified chunk x_index(int): The tile index in the X dimension y_index(int): The tile index in the Y dimension z_index(int): The tile index in the Z dimension Returns: (np.ndarray, int): ndarray for the specified chunk, array order (XYZ_ORDER, TZYX_ORDER, etc) """ return NotImplemented
nilq/baby-python
python
#!/usr/bin/env python # J-Y Peterschmitt - LSCE - 09/2011 - pmip2web@lsce.ipsl.fr # Test the use of hatches and patterns in the isofill # and fill area graphics methods # Import some standard modules from os import path # Import what we need from CDAT import cdms2 import vcs # Some data we can plot from the 'sample_data' directory # supplied with CDAT data_file = 'tas_ccsr-95a_1979.01-1979.12.nc' var_name = 'tas' # data_file = 'clt.nc' # var_name = 'clt' # Zone that we want to plot # # NOTE: the (latmin, latmax, lonmin, lonmax) information HAS TO be the # same in the variable, the 'isof' isofill method and the 2 'cont_*' # continents plotting methods! Otherwise, the data will not match the # continents that are plotted over it... (latmin, latmax, lonmin, lonmax) = (-90, 90, -180, 180) # Use black on white continents (nicer with black and white plots) i.e # we plot a 'large' white continent outline over the data, and then a # smaller 'black' continent outline bw_cont = False # bw_cont = True # Read one time step (the first one) from the data file # and explicitely specify the lat/lon range we need. cdms2 # will retrieve the data in the order we want, regardless of the way # it is stored in the data file f = cdms2.open(path.join(vcs.sample_data, data_file)) v = f(var_name, time=slice(0, 1), latitude=(latmin, latmax), longitude=(lonmin, lonmax, 'co'), squeeze=1) # v = f(var_name) f.close() # Initialize the graphics canvas x = vcs.init() x.setantialiasing(0) x.setcolormap("rainbow") # Create the isofill method isof = x.createboxfill('test_hatch') isof.boxfill_type = "custom" # isof.datawc(latmin, latmax, lonmin, lonmax) # isof.levels = [220, 230, 240, 250, 260, 270, 280, 290, 300, 310, 320] isof.levels = [290, 300] isof.fillareastyle = 'hatch' # isof.fillareacolors = [241, 241, 241, 241, 241] # All black isof.fillareacolors = [10, 20, 30, 40, 50, 60, 70, 80, 90, 99, 45] # Colors # isof.fillareacolors = [50] # Colors # isof.fillareacolors = [242, 242, 242, 242] # Colors # isof.fillareaindices = [1, 2, 12, 13, 5, 6, 7, 8, 9, 10, 11, 12] isof.fillareaindices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] # isof.fillareaindices = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20] # isof.fillareaindices = [4] # isof.fillareaindices = [16, 19, 3, 4, 1, 2, 3, 4] isof.fillareaopacity = [60, 30, 55, 63, 100, 20, 40, 50, 80, 60] # isof.fillareapixelspacing = [10, 10] # isof.fillareapixelscale = 10.0 boxf = x.createboxfill('test_solid') boxf.boxfill_type = "custom" boxf.levels = [220, 230] boxf.fillareastyle = "solid" boxf.fillareacolors = [99] # Define some graphics methods for plotting black on white continents if bw_cont: cont_black = x.createcontinents('black') cont_black.datawc(latmin, latmax, lonmin, lonmax) cont_black.linecolor = 241 cont_black.linewidth = 2 cont_white = x.createcontinents('white') cont_white.datawc(latmin, latmax, lonmin, lonmax) cont_white.linecolor = 240 cont_white.linewidth = 6 cont_type = 0 # Do not plot the default continents else: cont_type = 1 # Plot the test data # # We have to make sure the data and the continents are plotted at the # same place ('data' area) on the canvas, by using the same template! # It's even better if we can use for the continents a template that # will only plot the data area (the priority of the other elements of # the canvas is set to zero) tpl = x.createtemplate('tpl', 'default') # x.plot(v, boxf, tpl, continents=cont_type) x.plot(tpl, isof, v, continents=cont_type) if bw_cont: tpl_data = x.createtemplate('tpl_data', 'default_dud') # plots only data area x.plot(tpl_data, cont_white) x.plot(tpl_data, cont_black) # Create a test plot for listing all the hatches and patterns style_list = [] index_list = [] col_cycle = [243, 248, 254, 252, 255] nb_cols = len(col_cycle) color_list = [] x_list = [] y_list = [] txt_x_list = [] txt_y_list = [] txt_str_list = [] # shear_x = .05 shear_x = .0 # for j, style in enumerate(['hatch']): for j, style in enumerate(['hatch', 'pattern']): slide_y = j * .4 for i in range(20): slide_x = i * 0.04 x1, y1 = (.05 + slide_x, .25 + slide_y) x2, y2 = (.08 + slide_x, .45 + slide_y) # Add (sheared) rectangles to the list of positions # NOTE: no need to close the fill area. Giving 4 vertices # for getting a filled rectangle is enough x_list.append([x1, x2, x2 + shear_x, x1 + shear_x]) y_list.append([y1, y1, y2, y2]) style_list.append(style) # Hatches/Patterns indices have to be in 1-20 range index_list.append(i % 20 + 1) col_idx = col_cycle[i % nb_cols] color_list.append(20 + i * 10) # Annotations txt_x_list.append(x1 + 0.015) txt_y_list.append(y1 - 0.015) txt_str_list.append('%s = %i - Color = %i' % (style, i + 1, col_idx)) # Create the fill area and the text annotations fill_test = x.createfillarea('fill_test') fill_test.style = style_list fill_test.index = index_list fill_test.color = color_list fill_test.x = x_list fill_test.y = y_list fill_test.pixelspacing = [10, 10] fill_test.pixelscale = 10 fill_info = x.createtext('fill_info') fill_info.angle = 45 fill_info.height = 12 fill_info.color = 241 # Black fill_info.string = txt_str_list fill_info.x = txt_x_list fill_info.y = txt_y_list # Create a title plot_title = x.createtext('plot_title') plot_title.height = 40 plot_title.string = ['Testing hatches and patterns in VCS/CDAT'] plot_title.x = [.01] plot_title.y = [.9] # # Initialize and use a second graphics canvas # y = vcs.init() # y.setcolormap("rainbow") # y.plot(plot_title) # y.plot(fill_test) # y.plot(fill_info) # Save the plots x.interact() x.pdf('test_fillarea', textAsPaths=False) x.png('test_fillarea') # y.pdf('test_fillarea_list', textAsPaths=False) # y.png('test_fillarea_list') # Note: depending on the version of CDAT, text may not resize # correctly when creating a bigger png # x.png('test_fillarea_big', width=3*11, height=3*8) # y.png('test_fillarea_list_big', width=3*11, height=3*8) # The end
nilq/baby-python
python
from thresher.scraper import Scraper from thresher.query_share import QueryShare import furl import csv import os from slugify import slugify import wget import json ### Possibly convert this to docopt script in the future ### class Thresher: #assumes that links is a list of dictionaries with the keys as a content-link and mimetype def create_manifest(self,directory,filename,content_items): print('---begin writing manifest file---') #if directory exists just catch error try: os.mkdir(directory) except: pass #get current directory working_directory = os.getcwd() try : os.chdir(directory) with open(filename, 'w') as csvfile: i = 0 for content in content_items: if i == 0: fieldnames = content.keys() writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore') writer.writeheader() writer.writerow(content) i = i + 1 except: pass #change back to working directory os.chdir(working_directory) print('---done writing manifest file---') def write_json_file(self,directory,filename,json_obj): print('---begin writing json metadata file---') #if directory exists just catch error try: os.mkdir(directory) except: pass #get current directory working_directory = os.getcwd() try : os.chdir(directory) with open(filename, 'w') as outfile: json.dump(json_obj, outfile, ensure_ascii=False) outfile.close() except Exception as inst: print("had exception on write to json: ", inst) pass #change back to working directory os.chdir(working_directory) print('---done writing json metadata file---') def prepare_link_data(self,links): #converts link hash to list of dictionaries with content-type and mime-type as keys link_list = [] for link in links: link_dict = {} link_dict['content-link'] = link link_dict['mime-type'] = links[link] link_list.append(link_dict) return link_list def create_data_folder(self,dir_name): #if exists just catch error try: os.mkdir(dir_name) except: pass def download_content_file(self,dir_name,url): working_directory = os.getcwd() filename = None #if directory exists just catch error try: os.mkdir(dir_name) except: pass try: os.chdir(dir_name) filename = wget.download(url) except Exception as inst: print("had exception on wget: ", inst) pass #reset directory os.chdir(working_directory) return filename def thresher(self): return ## End Thresher class #TODO move link parameters to separate configuration file SHARE_API = 'https://staging-share.osf.io/api/v2/search/creativeworks/_search' PROD_SHARE_API= 'https://share.osf.io/api/v2/search/creativeworks/_search' search_url = furl.furl(PROD_SHARE_API) search_url.args['size'] = 20 #recent_results = requests.get(search_url.url).json() query_share = QueryShare() #recent_results = recent_results['hits']['hits'] affiliation_query = query_share.generate_institution_query(); affiliation_results = query_share.query_share(search_url.url, affiliation_query) records = affiliation_results['hits']['hits'] print('The request URL is {}'.format(search_url.url)) print('----------') scrape = Scraper() thresh = Thresher() i = 0 #create data folder print("--------------creating data folder-----------") thresh.create_data_folder("data") os.chdir("data") for result in records: i += 1 print("---------------------------------") print( 'Getting Content for Record {}: {} -- from {}'.format( i, result['_source']['title'], result['_source']['identifiers'] ) ) title = result['_source']['title']; links = {} for identifier in result['_source']['identifiers']: if "http" in identifier: print ("Getting links for identifer: ", identifier) links = scrape.get_content_urls_from_html_page(identifier,"curate.nd.edu") print("Links Found are: ", links) if links: link_list = thresh.prepare_link_data(links) identifier_directory = slugify(title + "_" + identifier) filename = identifier_directory + ".csv" downloaded_link_list = [] for link in link_list: content_filename = None try: print("downloading file from: ", link['content-link']) content_filename = thresh.download_content_file(identifier_directory,link['content-link']) print(" downloaded file: ", content_filename) except: content_filename = None if content_filename is None: content_filename = "Failed to download" link['filename'] = content_filename downloaded_link_list.append(link) thresh.create_manifest(identifier_directory,filename,downloaded_link_list) thresh.write_json_file(identifier_directory,identifier_directory+".json",result['_source']) #write json file #TODO write JSON SHARE record to directory #could use python wget module, but will just call wget at command line for now #create folder for the record #write out the json record file #write a manifest of files to be downloaded #write each file # call query_share # get list of records # grab identifiers from records # get content links for each record # download content for each record
nilq/baby-python
python
from hknweb.academics.views.base_viewset import AcademicEntityViewSet from hknweb.academics.models import Instructor from hknweb.academics.serializers import InstructorSerializer class InstructorViewSet(AcademicEntityViewSet): queryset = Instructor.objects.all() serializer_class = InstructorSerializer
nilq/baby-python
python
import sys if len(sys.argv) == 2: print("hello, {}".format(sys.argv[1])) #print("hello,"+(sys.argv[1])) else: print("hello world")
nilq/baby-python
python
import pydot # I like to use the full path for the image as it seems less error prone. # Therefore, first we find the current path of this file and use that to locate the image - assuming the image, # is in the same folder as this file. import pathlib current_path = pathlib.Path(__file__).parent.resolve() # Create the graph graph = pydot.Dot("my_graph", graph_type="graph", overlap=False, splines='true') """ Use labelloc argument. 'b' = bottom 't' = top Images must be .png format. """ graph.add_node(pydot.Node("Node 1", image=(str(current_path) + "/" + "Flower.png"), labelloc="b")) graph.add_node(pydot.Node("Node 2")) # Add edge graph.add_edge(pydot.Edge("Node 1", "Node 2")) # Save the output graph.write_png("AddNodeImage.png")
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Copyright [2020] [Sinisa Seslak (seslaks@gmail.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --- Ratios file for CredPy package (https://github.com/seslak/CredPy) @author: Sinisa Seslak """ # Ratios def getratio(dataset, ratio, c, days=365): """ These are liquidity ratios. Currently available: current, quick, cashr, nwc, cashta, salestor, dayssales, costsales, ctr If you plan to commit, please follow this structure. """ if ratio == "current": # Current ratio return dataset['tsta']/dataset['tso'] if ratio == "quick": # Quick/Acid ratio return (dataset['tsta']-dataset['inventory'])/dataset['tso'] if ratio == "cashr": # Cash ratio return dataset['cash']/(dataset['tso']+dataset['ltloansyear']+dataset['otherstobl']+dataset['ltloans']+dataset['otherltobl']) if ratio == "nwc": # Net-working capital return dataset['tsta']-dataset['tso'] if ratio == "cashta": # Cash to assets ratio return dataset['cash']/dataset['ta'] if ratio == "salestor": # Sales to receivables (or turnover ratio) return dataset['revenues']/dataset['receivables'] if ratio == "dayssales": # Days sales outstanding return dataset['receivables']/dataset['revenues']*days if ratio == "costsales": # Cost of sales return (dataset['cogs']+dataset['gna']+dataset['salaries'])/dataset['receivables'] if ratio == "ctr": # Cash turnover return dataset['revenues']/dataset['cash'] """ These are leverage ratios. Currently available: debtequ, debt, fatonw, ebitint, earnings, equityr If you plan to commit, please follow this structure. """ if ratio == "debtequ": # Debt to equity ratio return dataset['tli']/dataset['paidincap'] if ratio == "debt": # Debt ratio return dataset['tli']/dataset['ta'] if ratio == "fatonw": # Fixed-assets to net-worth from errors import error error("fatonw") return (dataset['equipment']+dataset['buildings']+dataset['land']-dataset['amortization']*2)/(dataset['equipment']+dataset['buildings']+dataset['land']-dataset['tli']) if ratio == "ebitint": # Interest coverage return dataset['ebit']/dataset['interest'] if ratio == "earnings": # Retained earnings ratio compared to equity return dataset['retainedear']/dataset['equity'] if ratio == "equityr": # Equity ratio return dataset['equity']/dataset['ta'] """ These are efficiency ratios. Currently available: invtr, invhp, invta, acctr, acccp, dpo If you plan to commit, please follow this structure. """ if ratio == "invtr": # Inventory turnover return dataset['revenues']/dataset['inventory'] if ratio == "invhp": # Inventory holding period return days/dataset['revenues']/dataset['inventory'] if ratio == "invta": # Inventory to assets ratio return days/dataset['inventory']/dataset['ta'] if ratio == "acctr": # Accounts receivable turnover return dataset['revenues']/dataset['receivables'] if ratio == "acccp": # Accounts receivable collection period return days/dataset['revenues']/dataset['receivables'] if ratio == "dpo": # Days payable outstanding return dataset['payables']/dataset['cogs']*days
nilq/baby-python
python
import numpy as np class Kalman(object): DIMENSIONS = 3 MEASUREMENT = 1 def __init__(self, q, r): #initialise self.Q = np.matrix(np.eye(Kalman.DIMENSIONS)*q) self.R = np.matrix(np.eye(Kalman.MEASUREMENT)*r) self.H = np.matrix(np.zeros((Kalman.MEASUREMENT, Kalman.DIMENSIONS))) for i in range(Kalman.MEASUREMENT): self.H[i, i] = 1.0 #state self.x = np.matrix(np.zeros((Kalman.DIMENSIONS, 1))) self.P = np.matrix(np.eye(Kalman.DIMENSIONS)) def make_A(self, dt): A = np.matrix(np.eye(Kalman.DIMENSIONS)) for i in range(Kalman.MEASUREMENT): A[i, Kalman.MEASUREMENT+i] = dt return A def predict(self, dt): A = self.make_A(dt) x = A*self.x P = A*self.P*A.T + self.Q return x, P def update(self, z, dt): x_p, P_p = self.predict(dt) K = P_p*self.H.T*(self.H*P_p*self.H.T + self.R).I self.x = x_p + K*(z - self.H*x_p) self.P = (np.matrix(np.eye(Kalman.DIMENSIONS)) - K*self.H)*P_p return self.position(), self.velocity() def update_without_measurement(self, dt): self.x, self.P = self.predict(dt) return self.position(), self.velocity() def position(self): return self.x[0:Kalman.MEASUREMENT] def velocity(self): return self.x[Kalman.MEASUREMENT:2*Kalman.MEASUREMENT] if __name__ == "__main__": """ Kalman Filter Test """ k = Kalman(1, 0.2) for x in range(10): k.update([1.0,2.0,3.0], 0.1) print x, " Pos: ", k.position() print x, " Velo: ", k.velocity() print 25*"="
nilq/baby-python
python
# Databricks notebook source # MAGIC %run ./_databricks-academy-helper $lesson="dlt_demo" # COMMAND ---------- try: dbutils.fs.unmount("/mnt/training") except: pass # %run ./mount-datasets # COMMAND ---------- class DataFactory: def __init__(self): self.source = f"{DA.paths.data_source}/tracker/streaming/" self.userdir = DA.paths.data_landing_location try: self.curr_mo = 1 + int(max([x[1].split(".")[0] for x in dbutils.fs.ls(self.userdir)])) except: self.curr_mo = 1 def load(self, continuous=False): if self.curr_mo > 12: print("Data source exhausted\n") elif continuous == True: while self.curr_mo <= 12: curr_file = f"{self.curr_mo:02}.json" target_dir = f"{self.userdir}/{curr_file}" print(f"Loading the file {curr_file} to the {target_dir}") dbutils.fs.cp(f"{self.source}/{curr_file}", target_dir) self.curr_mo += 1 else: curr_file = f"{str(self.curr_mo).zfill(2)}.json" target_dir = f"{self.userdir}/{curr_file}" print(f"Loading the file {curr_file} to the {target_dir}") dbutils.fs.cp(f"{self.source}/{curr_file}", target_dir) self.curr_mo += 1 # COMMAND ---------- # DA.cleanup() DA.init() DA.paths.data_source = "/mnt/training/healthcare" DA.paths.storage_location = f"{DA.paths.working_dir}/storage" DA.paths.data_landing_location = f"{DA.paths.working_dir}/source/tracker" # bronzePath = f"{DA.paths.wokring_dir}/bronze" # recordingsParsedPath = f"{DA.paths.wokring_dir}/silver/recordings_parsed" # recordingsEnrichedPath = f"{DA.paths.wokring_dir}/silver/recordings_enriched" # dailyAvgPath = f"{DA.paths.wokring_dir}/gold/daily_avg" # checkpointPath = f"{DA.paths.wokring_dir}/checkpoints" #bronzeCheckpoint = f"{DA.paths.checkpoints}/bronze" # recordingsParsedCheckpoint = f"{DA.paths.checkpoints}/recordings_parsed" # recordingsEnrichedCheckpoint = f"{DA.paths.checkpoints}/recordings_enriched" # dailyAvgCheckpoint = f"{DA.paths.checkpoints}/dailyAvgPath" DA.data_factory = DataFactory() DA.conclude_setup() # sqlContext.setConf("spark.sql.shuffle.partitions", spark.sparkContext.defaultParallelism)
nilq/baby-python
python
r""" bilibili_api.live 直播相关 """ import time from enum import Enum import logging import json import struct import base64 import asyncio from typing import List import aiohttp import brotli from aiohttp.client_ws import ClientWebSocketResponse from .utils.Credential import Credential from .utils.network import get_session, request from .utils.utils import get_api from .utils.Danmaku import Danmaku from .utils.AsyncEvent import AsyncEvent from .exceptions.LiveException import LiveException API = get_api("live") class ScreenResolution(Enum): """ 直播源清晰度。 清晰度编号,4K 20000,原画 10000,蓝光(杜比)401,蓝光 400,超清 250,高清 150,流畅 80 + FOUR_K : 4K。 + ORIGINAL : 原画。 + BLU_RAY_DOLBY : 蓝光(杜比)。 + BLU_RAY : 蓝光。 + ULTRA_HD : 超清。 + HD : 高清。 + FLUENCY : 流畅。 """ FOUR_K = 20000 ORIGINAL = 10000 BLU_RAY_DOLBY = 401 BLU_RAY = 400 ULTRA_HD = 250 HD = 150 FLUENCY = 80 class LiveProtocol(Enum): """ 直播源流协议。 流协议,0 为 FLV 流,1 为 HLS 流。默认:0,1 + FLV : 0。 + HLS : 1。 + DEFAULT : 0,1 """ FLV = 0 HLS = 1 DEFAULT = '0,1' class LiveFormat(Enum): """ 直播源容器格式 容器格式,0 为 flv 格式;1 为 ts 格式(仅限 hls 流);2 为 fmp4 格式(仅限 hls 流)。默认:0,2 + FLV : 0。 + TS : 1。 + FMP4 : 2。 + DEFAULT : 2。 """ FLV = 0 TS = 1 FMP4 = 2 DEFAULT = '0,1,2' class LiveCodec(Enum): """ 直播源视频编码 视频编码,0 为 avc 编码,1 为 hevc 编码。默认:0,1 + AVC : 0。 + HEVC : 1。 + DEFAULT : 0,1。 """ AVC = 0 HEVC = 1 DEFAULT = '0,1' class LiveRoom: """ 直播类,获取各种直播间的操作均在里边。 """ def __init__(self, room_display_id: int, credential: Credential = None): """ Args: room_display_id (int) : 房间展示 ID(即 URL 中的 ID) credential (Credential, optional): 凭据. Defaults to None. """ self.room_display_id = room_display_id if credential is None: self.credential = Credential() else: self.credential = credential self.__ruid = None async def get_room_play_info(self): """ 获取房间信息(真实房间号,封禁情况等) Returns: API 调用返回结果 """ api = API["info"]["room_play_info"] params = { "room_id": self.room_display_id, } resp = await request(api['method'], api['url'], params=params, credential=self.credential) # 缓存真实房间 ID self.__ruid = resp['uid'] return resp async def __get_ruid(self): """ 获取真实房间 ID,若有缓存则使用缓存 """ if self.__ruid is None: await self.get_room_play_info() return self.__ruid async def get_chat_conf(self): """ 获取聊天弹幕服务器配置信息(websocket) """ api = API["info"]["chat_conf"] params = { "room_id": self.room_display_id } return await request(api['method'], api["url"], params, credential=self.credential) async def get_room_info(self): """ 获取直播间信息(标题,简介等) """ api = API["info"]["room_info"] params = { "room_id": self.room_display_id } return await request(api['method'], api["url"], params, credential=self.credential) async def get_user_info_in_room(self): """ 获取自己在直播间的信息(粉丝勋章等级,直播用户等级等) """ self.credential.raise_for_no_sessdata() api = API["info"]["user_info_in_room"] params = { "room_id": self.room_display_id } return await request(api['method'], api["url"], params, credential=self.credential) async def get_dahanghai(self, page: int = 1): """ 获取大航海列表 Args: page (int, optional): 页码. Defaults to 1 """ api = API["info"]["dahanghai"] params = { "roomid": self.room_display_id, "ruid": await self.__get_ruid(), "page_size": 30, "page": page } return await request(api['method'], api["url"], params, credential=self.credential) async def get_gaonengbang(self, page: int = 1): """ 获取高能榜列表 Args: page (int, optional): 页码. Defaults to 1 """ api = API["info"]["gaonengbang"] params = { "roomId": self.room_display_id, "ruid": await self.__get_ruid(), "pageSize": 50, "page": page } return await request(api['method'], api["url"], params, credential=self.credential) async def get_seven_rank(self): """ 获取七日榜 """ api = API["info"]["seven_rank"] params = { "roomid": self.room_display_id, "ruid": await self.__get_ruid(), } return await request(api['method'], api["url"], params, credential=self.credential) async def get_fans_medal_rank(self): """ 获取粉丝勋章排行 """ api = API["info"]["fans_medal_rank"] params = { "roomid": self.room_display_id, "ruid": await self.__get_ruid() } return await request(api['method'], api["url"], params, credential=self.credential) async def get_black_list(self): """ 获取黑名单列表 Args: page (int, optional): 页码. Defaults to 1 """ api = API["info"]["black_list"] params = { "room_id": self.room_display_id, "ps": 1 } return await request(api['method'], api["url"], params, credential=self.credential) async def get_room_play_url(self, screen_resolution: ScreenResolution = ScreenResolution.ORIGINAL): """ 获取房间直播流列表 Args: screen_resolution (ScreenResolution, optional): 清晰度. Defaults to ScreenResolution.ORIGINAL """ api = API["info"]["room_play_url"] params = { "cid": self.room_display_id, "platform": "web", "qn": screen_resolution.value, "https_url_req": "1", "ptype": "16" } return await request(api['method'], api["url"], params, credential=self.credential) async def get_room_play_info_v2(self, live_protocol: LiveProtocol = LiveProtocol.DEFAULT, live_format: LiveFormat = LiveFormat.DEFAULT, live_codec: LiveCodec = LiveCodec.DEFAULT, live_qn: ScreenResolution = ScreenResolution.ORIGINAL): """ 获取房间信息及可用清晰度列表 Args: live_protocol (LiveProtocol, optional) : 直播源流协议. Defaults to LiveProtocol.DEFAULT. live_format (LiveFormat, optional) : 直播源容器格式. Defaults to LiveFormat.DEFAULT. live_codec (LiveCodec, optional) : 直播源视频编码. Defaults to LiveCodec.DEFAULT. live_qn (ScreenResolution, optional): 直播源清晰度. Defaults to ScreenResolution.ORIGINAL. """ api = API["info"]["room_play_info_v2"] params = { "room_id": self.room_display_id, "platform": "web", "ptype": "16", "protocol": live_protocol.value, "format": live_format.value, "codec": live_codec.value, "qn": live_qn.value } return await request(api['method'], api['url'], params=params, credential=self.credential) async def ban_user(self, uid: int): """ 封禁用户 Args: uid (int): 用户 UID """ self.credential.raise_for_no_sessdata() api = API["operate"]["add_block"] data = { "room_id": self.room_display_id, "tuid": uid, "mobile_app": "web", "visit_id": "" } return await request(api['method'], api["url"], data=data, credential=self.credential) async def unban_user(self, block_id: int): """ 解封用户 Args: block_id (int): 封禁用户时会返回该封禁事件的 ID,使用该值 """ self.credential.raise_for_no_sessdata() api = API["operate"]["del_block"] data = { "roomid": self.room_display_id, "id": block_id, "visit_id": "", } return await request(api['method'], api["url"], data=data, credential=self.credential) async def send_danmaku(self, danmaku: Danmaku): """ 直播间发送弹幕 Args: danmaku (Danmaku): 弹幕类 """ self.credential.raise_for_no_sessdata() api = API["operate"]["send_danmaku"] data = { "mode": danmaku.mode.value, "msg": danmaku.text, "roomid": self.room_display_id, "bubble": 0, "rnd": int(time.time()), "color": int(danmaku.color, 16), "fontsize": danmaku.font_size.value } return await request(api['method'], api["url"], data=data, credential=self.credential) async def sign_up_dahanghai(self, task_id: int = 1447): """ 大航海签到 Args: task_id (int, optional): 签到任务 ID. Defaults to 1447 """ self.credential.raise_for_no_sessdata() self.credential.raise_for_no_bili_jct() api = API["operate"]["sign_up_dahanghai"] data = { "task_id": task_id, "uid": await self.__get_ruid(), } return await request(api['method'], api["url"], data=data, credential=self.credential) async def send_gift_from_bag(self, uid: int, bag_id: int, gift_id: int, gift_num: int, storm_beat_id: int = 0, price: int = 0): """ 赠送包裹中的礼物,获取包裹信息可以使用 get_self_bag 方法 Args: uid (int) : 赠送用户的 UID bag_id (int) : 礼物背包 ID gift_id (int) : 礼物 ID gift_num (int) : 礼物数量 storm_beat_id (int, optional) : 未知, Defaults to 0 price (int, optional) : 礼物单价,Defaults to 0 """ self.credential.raise_for_no_sessdata() self.credential.raise_for_no_bili_jct() api = API["operate"]["send_gift_from_bag"] data = { "uid": uid, "bag_id": bag_id, "gift_id": gift_id, "gift_num": gift_num, "platform": "pc", "send_ruid": 0, "storm_beat_id": storm_beat_id, "price": price, "biz_code": "live", "biz_id": self.room_display_id, "ruid": await self.__get_ruid(), } return await request(api['method'], api["url"], data=data, credential=self.credential) async def receive_reward(self, receive_type: int = 2): """ 领取自己在某个直播间的航海日志奖励 Args: receive_type (int) : 领取类型,Defaults to 2 """ self.credential.raise_for_no_sessdata() api = API["operate"]["receive_reward"] data = { "ruid": await self.__get_ruid(), "receive_type": receive_type, } return await request(api['method'], api["url"], data=data, credential=self.credential) async def get_general_info(self, act_id: int = 100061): """ 获取自己在该房间的大航海信息, 比如是否开通, 等级等 Args: act_id (int, optional) : 未知,Defaults to 100061 """ self.credential.raise_for_no_sessdata() api = API["info"]["general_info"] params = { "actId": act_id, "roomId": self.room_display_id, "uid": await self.__get_ruid() } return await request(api['method'], api["url"], params=params, credential=self.credential) async def get_gift_common(self): """ 获取当前直播间内的普通礼物列表 """ api_room_info = API["info"]["room_info"] params_room_info = { "room_id": self.room_display_id, } res_room_info = await request(api_room_info['method'], api_room_info["url"], params=params_room_info, credential=self.credential) area_id, area_parent_id = res_room_info["room_info"]["area_id"], res_room_info["room_info"]["parent_area_id"] api = API["info"]["gift_common"] params = { "room_id": self.room_display_id, "area_id": area_id, "area_parent_id": area_parent_id, "platform": "pc", "source": "live" } return await request(api['method'], api["url"], params=params, credential=self.credential) async def get_gift_special(self, tab_id: int): """ 获取当前直播间内的特殊礼物列表 Args: tab_id (int) : 2:特权礼物,3:定制礼物 """ api_room_info = API["info"]["room_info"] params_room_info = { "room_id": self.room_display_id, } res_room_info = await request(api_room_info['method'], api_room_info["url"], params=params_room_info, credential=self.credential) area_id, area_parent_id = res_room_info["room_info"]["area_id"], res_room_info["room_info"]["parent_area_id"] api = API["info"]["gift_special"] params = { "tab_id": tab_id, "area_id": area_id, "area_parent_id": area_parent_id, "room_id": await self.__get_ruid(), "source": "live", "platform": "pc", "build": 1 } return await request(api['method'], api["url"], params=params, credential=self.credential) async def send_gift_gold(self, uid: int, gift_id: int, gift_num: int, price: int, storm_beat_id: int = 0): """ 赠送金瓜子礼物 Args: uid (int) : 赠送用户的 UID gift_id (int) : 礼物 ID (可以通过 get_gift_common 或 get_gift_special 或 get_gift_config 获取) gift_num (int) : 赠送礼物数量 price (int) : 礼物单价 storm_beat_id (int, Optional): 未知,Defaults to 0 """ self.credential.raise_for_no_sessdata() self.credential.raise_for_no_bili_jct() api = API["operate"]["send_gift_gold"] data = { "uid": uid, "gift_id": gift_id, "gift_num": gift_num, "price": price, "ruid": await self.__get_ruid(), "biz_code": "live", "biz_id": self.room_display_id, "platform": "pc", "storm_beat_id": storm_beat_id, "send_ruid": 0, "coin_type": "gold", "bag_id": "0", "rnd": int(time.time()), "visit_id": "" } return await request(api['method'], api["url"], data=data, credential=self.credential) async def send_gift_silver(self, uid: int, gift_id: int, gift_num: int, price: int, storm_beat_id: int = 0,): """ 赠送银瓜子礼物 Args: uid (int) : 赠送用户的 UID gift_id (int) : 礼物 ID (可以通过 get_gift_common 或 get_gift_special 或 get_gift_config 获取) gift_num (int) : 赠送礼物数量 price (int) : 礼物单价 storm_beat_id (int, Optional): 未知, Defaults to 0 """ self.credential.raise_for_no_sessdata() self.credential.raise_for_no_bili_jct() api = API["operate"]["send_gift_silver"] data = { "uid": uid, "gift_id": gift_id, "gift_num": gift_num, "price": price, "ruid": await self.__get_ruid(), "biz_code": "live", "biz_id": self.room_display_id, "platform": "pc", "storm_beat_id": storm_beat_id, "send_ruid": 0, "coin_type": "silver", "bag_id": 0, "rnd": int(time.time()), "visit_id": "" } return await request(api['method'], api["url"], data=data, credential=self.credential) class LiveDanmaku(AsyncEvent): """ Websocket 实时获取直播弹幕 Events: + DANMU_MSG: 用户发送弹幕 + SEND_GIFT: 礼物 + COMBO_SEND:礼物连击 + GUARD_BUY:续费大航海 + SUPER_CHAT_MESSAGE:醒目留言(SC) + SUPER_CHAT_MESSAGE_JPN:醒目留言(带日语翻译?) + WELCOME: 老爷进入房间 + WELCOME_GUARD: 房管进入房间 + NOTICE_MSG: 系统通知(全频道广播之类的) + PREPARING: 直播准备中 + LIVE: 直播开始 + ROOM_REAL_TIME_MESSAGE_UPDATE: 粉丝数等更新 + ENTRY_EFFECT: 进场特效 + ROOM_RANK: 房间排名更新 + INTERACT_WORD: 用户进入直播间 + ACTIVITY_BANNER_UPDATE_V2: 好像是房间名旁边那个 xx 小时榜 + =========================== + 本模块自定义事件: + ========================== + VIEW: 直播间人气更新 + ALL: 所有事件 + DISCONNECT: 断开连接(传入连接状态码参数) + TIMEOUT: 心跳响应超时 + VERIFICATION_SUCCESSFUL: 认证成功 """ PROTOCOL_VERSION_RAW_JSON = 0 PROTOCOL_VERSION_HEARTBEAT = 1 PROTOCOL_VERSION_BROTLI_JSON = 3 DATAPACK_TYPE_HEARTBEAT = 2 DATAPACK_TYPE_HEARTBEAT_RESPONSE = 3 DATAPACK_TYPE_NOTICE = 5 DATAPACK_TYPE_VERIFY = 7 DATAPACK_TYPE_VERIFY_SUCCESS_RESPONSE = 8 STATUS_INIT = 0 STATUS_CONNECTING = 1 STATUS_ESTABLISHED = 2 STATUS_CLOSING = 3 STATUS_CLOSED = 4 STATUS_ERROR = 5 def __init__(self, room_display_id: int, debug: bool = False, credential: Credential = None, max_retry: int = 5, retry_after: float = 1): """ Args: room_display_id (int) : 房间展示 ID debug (bool, optional) : 调试模式,将输出更多信息。. Defaults to False. credential (Credential, optional): 凭据. Defaults to None. max_retry (int, optional) : 连接出错后最大重试次数. Defaults to 5 retry_after (int, optional) : 连接出错后重试间隔时间(秒). Defaults to 1 """ super().__init__() self.credential = credential if credential is not None else Credential() self.room_display_id = room_display_id self.max_retry = max_retry self.retry_after = retry_after self.__room_real_id = None self.__status = 0 self.__ws = None self.__tasks = [] self.__debug = debug self.__heartbeat_timer = 30.0 self.err_reason = "" # logging self.logger = logging.getLogger(f"LiveDanmaku_{self.room_display_id}") self.logger.setLevel(logging.DEBUG if debug else logging.INFO) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter( "[" + str(room_display_id) + "][%(asctime)s][%(levelname)s] %(message)s")) self.logger.addHandler(handler) def get_status(self): """ 获取连接状态 Returns: int: 0 初始化,1 连接建立中,2 已连接,3 断开连接中,4 已断开,5 错误 """ return self.__status async def connect(self): """ 连接直播间 """ if self.get_status() == self.STATUS_CONNECTING: raise LiveException('正在建立连接中') if self.get_status() == self.STATUS_ESTABLISHED: raise LiveException('连接已建立,不可重复调用') if self.get_status() == self.STATUS_CLOSING: raise LiveException('正在关闭连接,不可调用') await self.__main() async def disconnect(self): """ 断开连接 """ if self.get_status() != self.STATUS_ESTABLISHED: raise LiveException('尚未连接服务器') self.__status = self.STATUS_CLOSING self.logger.info('连接正在关闭') # 取消所有任务 while len(self.__tasks) > 0: self.__tasks.pop().cancel() self.__status = self.STATUS_CLOSED await self.__ws.close() self.logger.info('连接已关闭') async def __main(self): """ 入口 """ self.__status == self.STATUS_CONNECTING room = LiveRoom(self.room_display_id, self.credential) self.logger.info(f'准备连接直播间 {self.room_display_id}') # 获取真实房间号 self.logger.debug("正在获取真实房间号") self.__room_real_id = (await room.get_room_play_info())["room_id"] self.logger.debug(f"获取成功,真实房间号:{self.__room_real_id}") # 获取直播服务器配置 self.logger.debug("正在获取聊天服务器配置") conf = await room.get_chat_conf() self.logger.debug("聊天服务器配置获取成功") # 连接直播间 self.logger.debug("准备连接直播间") session = get_session() available_hosts: List[dict] = conf["host_server_list"] retry = self.max_retry host = None @self.on('TIMEOUT') async def on_timeout(ev): # 连接超时 self.err_reason = '心跳响应超时' await self.__ws.close() while True: self.err_reason = '' # 重置心跳计时器 self.__heartbeat_timer = 0 if not available_hosts: self.err_reason = '已尝试所有主机但仍无法连接' break if host is None or retry <= 0: host = available_hosts.pop() retry = self.max_retry port = host['wss_port'] protocol = "wss" uri = f"{protocol}://{host['host']}:{port}/sub" self.__status == self.STATUS_CONNECTING self.logger.info(f"正在尝试连接主机: {uri}") try: async with session.ws_connect(uri) as ws: @self.on('VERIFICATION_SUCCESSFUL') async def on_verification_successful(data): # 新建心跳任务 self.__tasks.append( asyncio.create_task(self.__heartbeat(ws))) self.__ws = ws self.logger.debug(f"连接主机成功, 准备发送认证信息") await self.__send_verify_data(ws, conf['token']) async for msg in ws: if msg.type == aiohttp.WSMsgType.BINARY: self.logger.debug(f'收到原始数据:{msg.data}') await self.__handle_data(msg.data) elif msg.type == aiohttp.WSMsgType.ERROR: self.__status = self.STATUS_ERROR self.logger.error('出现错误') elif msg.type == aiohttp.WSMsgType.CLOSING: self.logger.debug('连接正在关闭') self.__status = self.STATUS_CLOSING elif msg.type == aiohttp.WSMsgType.CLOSED: self.logger.info('连接已关闭') self.__status = self.STATUS_CLOSED # 正常断开情况下跳出循环 if self.__status != self.STATUS_CLOSED or self.err_reason: # 非用户手动调用关闭,触发重连 raise LiveException( '非正常关闭连接' if not self.err_reason else self.err_reason) else: break except Exception as e: self.logger.exception(e) if retry <= 0 or len(available_hosts) == 0: self.logger.error('无法连接服务器') self.err_reason = '无法连接服务器' break self.logger.warning(f'将在 {self.retry_after} 秒后重新连接...') self.__status = self.STATUS_ERROR retry -= 1 await asyncio.sleep(self.retry_after) async def __handle_data(self, data): """ 处理数据 """ data = self.__unpack(data) self.logger.debug(f"收到信息:{data}") for info in data: callback_info = { 'room_display_id': self.room_display_id, 'room_real_id': self.__room_real_id } # 依次处理并调用用户指定函数 if info["datapack_type"] == LiveDanmaku.DATAPACK_TYPE_VERIFY_SUCCESS_RESPONSE: # 认证反馈 if info["data"]["code"] == 0: # 认证成功反馈 self.logger.info("连接服务器并认证成功") self.__status = self.STATUS_ESTABLISHED callback_info['type'] = 'VERIFICATION_SUCCESSFUL' callback_info['data'] = None self.dispatch('VERIFICATION_SUCCESSFUL', callback_info) self.dispatch('ALL', callback_info) elif info["datapack_type"] == LiveDanmaku.DATAPACK_TYPE_HEARTBEAT_RESPONSE: # 心跳包反馈,返回直播间人气 self.logger.debug("收到心跳包反馈") # 重置心跳计时器 self.__heartbeat_timer = 30.0 callback_info["type"] = 'VIEW' callback_info["data"] = info["data"]["view"] self.dispatch('VIEW', callback_info) self.dispatch('ALL', callback_info) elif info["datapack_type"] == LiveDanmaku.DATAPACK_TYPE_NOTICE: # 直播间弹幕、礼物等信息 callback_info["type"] = info["data"]["cmd"] # DANMU_MSG 事件名特殊:DANMU_MSG:4:0:2:2:2:0,需取出事件名,暂不知格式 if callback_info["type"].find('DANMU_MSG') > -1: callback_info["type"] = 'DANMU_MSG' info["data"]["cmd"] = 'DANMU_MSG' callback_info["data"] = info["data"] self.dispatch(callback_info["type"], callback_info) self.dispatch('ALL', callback_info) else: self.logger.warning("检测到未知的数据包类型,无法处理") async def __send_verify_data(self, ws: ClientWebSocketResponse, token: str): verifyData = {"uid": 0, "roomid": self.__room_real_id, "protover": 3, "platform": "web", "type": 2, "key": token} data = json.dumps(verifyData).encode() await self.__send(data, self.PROTOCOL_VERSION_HEARTBEAT, self.DATAPACK_TYPE_VERIFY, ws) async def __heartbeat(self, ws: ClientWebSocketResponse): """ 定时发送心跳包 """ HEARTBEAT = self.__pack(b'[object Object]', self.PROTOCOL_VERSION_HEARTBEAT, self.DATAPACK_TYPE_HEARTBEAT) while True: if self.__heartbeat_timer == 0: self.logger.debug("发送心跳包") await ws.send_bytes(HEARTBEAT) elif self.__heartbeat_timer <= -30: # 视为已异常断开连接,发布 TIMEOUT 事件 self.dispatch('TIMEOUT') break await asyncio.sleep(1.0) self.__heartbeat_timer -= 1 async def __send(self, data: bytes, protocol_version: int, datapack_type: int, ws: ClientWebSocketResponse): """ 自动打包并发送数据 """ data = self.__pack(data, protocol_version, datapack_type) self.logger.debug(f'发送原始数据:{data}') await ws.send_bytes(data) @staticmethod def __pack(data: bytes, protocol_version: int, datapack_type: int): """ 打包数据 """ sendData = bytearray() sendData += struct.pack(">H", 16) assert 0 <= protocol_version <= 2, LiveException("数据包协议版本错误,范围 0~2") sendData += struct.pack(">H", protocol_version) assert datapack_type in [2, 7], LiveException("数据包类型错误,可用类型:2, 7") sendData += struct.pack(">I", datapack_type) sendData += struct.pack(">I", 1) sendData += data sendData = struct.pack(">I", len(sendData) + 4) + sendData return bytes(sendData) @staticmethod def __unpack(data: bytes): """ 解包数据 """ ret = [] offset = 0 header = struct.unpack(">IHHII", data[:16]) if header[2] == LiveDanmaku.PROTOCOL_VERSION_BROTLI_JSON: realData = brotli.decompress(data[16:]) else: realData = data if header[2] == LiveDanmaku.PROTOCOL_VERSION_HEARTBEAT and header[3] == LiveDanmaku.DATAPACK_TYPE_HEARTBEAT_RESPONSE: realData = realData[16:] # 心跳包协议特殊处理 recvData = { "protocol_version": header[2], "datapack_type": header[3], "data": { "view": struct.unpack('>I', realData[0:4])[0] } } ret.append(recvData) return ret while offset < len(realData): header = struct.unpack(">IHHII", realData[offset:offset + 16]) length = header[0] recvData = { "protocol_version": header[2], "datapack_type": header[3], "data": None } chunkData = realData[(offset + 16):(offset + length)] if header[2] == 0: recvData["data"] = json.loads(chunkData.decode()) elif header[2] == 2: recvData["data"] = json.loads(chunkData.decode()) elif header[2] == 1: if header[3] == LiveDanmaku.DATAPACK_TYPE_HEARTBEAT_RESPONSE: recvData["data"] = { "view": struct.unpack(">I", chunkData)[0]} elif header[3] == LiveDanmaku.DATAPACK_TYPE_VERIFY_SUCCESS_RESPONSE: recvData["data"] = json.loads(chunkData.decode()) ret.append(recvData) offset += length return ret async def get_self_info(credential: Credential): """ 获取自己直播等级、排行等信息 """ credential.raise_for_no_sessdata() api = API["info"]["user_info"] return await request(api['method'], api["url"], credential=credential) async def get_self_live_info(credential: Credential): """ 获取自己的粉丝牌、大航海等信息 """ credential.raise_for_no_sessdata() api = API["info"]["live_info"] return await request(api['method'], api["url"], credential=credential) async def get_self_dahanghai_info(page: int = 1, page_size: int = 10, credential: Credential = None): """ 获取自己开通的大航海信息 Args: page (int, optional): 页数. Defaults to 1. page_size (int, optional): 每页数量. Defaults to 10. 总页数取得方法: ```python import math info = live.get_self_live_info(credential) pages = math.ceil(info['data']['guards'] / 10) ``` """ if credential is None: credential = Credential() credential.raise_for_no_sessdata() api = API["info"]["user_guards"] params = { "page": page, "page_size": page_size } return await request(api['method'], api["url"], params=params, credential=credential) async def get_self_bag(credential: Credential): """ 获取自己的直播礼物包裹信息 """ credential.raise_for_no_sessdata() api = API["info"]["bag_list"] return await request(api['method'], api["url"], credential=credential) async def get_gift_config(room_id: int = None, area_id: int = None, area_parent_id: int = None): """ 获取所有礼物的信息,包括礼物 id、名称、价格、等级等。 同时填了 room_id、area_id、area_parent_id,则返回一个较小的 json,只包含该房间、该子区域、父区域的礼物。 但即使限定了三个条件,仍然会返回约 1.5w 行的 json。不加限定则是 2.8w 行。 Args: room_id (int, optional) : 房间显示 ID. Defaults to None. area_id (int, optional) : 子分区 ID. Defaults to None. area_parent_id (int, optional) : 父分区 ID. Defaults to None. """ api = API["info"]["gift_config"] params = { "platform": "pc", "source": "live", "room_id": room_id if room_id is not None else "", "area_id": area_id if area_id is not None else "", "area_parent_id": area_parent_id if area_parent_id is not None else "" } return await request(api['method'], api["url"], params=params) async def get_area_info(): """ 获取所有分区信息 """ api = API["info"]["area_info"] return await request(api['method'], api["url"]) async def get_live_followers_info(need_recommend: bool = True, credential: Credential = None): """ 获取关注列表中正在直播的直播间信息,包括房间直播热度,房间名称及标题,清晰度,是否官方认证等信息。 Args: need_recommend (bool, optional): 是否接受推荐直播间,Defaults to True """ if credential is None: credential = Credential() credential.raise_for_no_sessdata() api = API["info"]["followers_live_info"] params = { "need_recommend": int(need_recommend), "filterRule": 0 } return await request(api['method'], api["url"], params=params, credential=credential) async def get_unlive_followers_info(page: int = 1, page_size: int = 30, credential: Credential = None): """ 获取关注列表中未在直播的直播间信息,包括上次开播时间,上次开播的类别,直播间公告,是否有录播等。 Args: page (int, optional): 页码, Defaults to 1. page_size (int, optional): 每页数量 Defaults to 30. """ if credential is None: credential = Credential() credential.raise_for_no_sessdata() api = API["info"]["followers_unlive_info"] params = { "page": page, "pagesize": page_size, } return await request(api['method'], api["url"], params=params, credential=credential)
nilq/baby-python
python
import sys import os import torch import librosa import soundfile as sf import numpy as np import tkinter as tk from tkinter import filedialog import openunmix from PySide6 import QtCore class Main(QtCore.QThread): def __init__(self): super(Main, self).__init__() self.global_objects = {} def add_ffmpeg_to_env(self): self.global_objects['PATH'] = os.environ['PATH'] if 'ffmpeg' in self.global_objects['PATH']: ffmpeg_path = os.path.dirname(os.path.abspath(__file__)) ffmpeg_path = os.path.join(ffmpeg_path, 'ffmpeg') ffmpeg_path = os.path.join(ffmpeg_path, 'bin') os.environ['PATH'] += ';' + ffmpeg_path # Input Tensor Shape of (nb_samples, nb_channels, nb_timesteps) # Output Tensor Shape of (nb_samples, nb_channels, nb_timesteps) def predict(self, wav: torch.Tensor) -> (torch.Tensor, int): separator = openunmix.umxhq() estimates = separator(wav) return estimates[:, 0, :, :].squeeze() # Input filename in string # Output wav Tensor of shape (nb_samples, nb_channels, nb_timesteps), and sample_rate in int def load(self, filename: str) -> torch.Tensor: wav, sample_rate = librosa.load(filename, sr=22050, mono=False, dtype=np.float64) wav = torch.Tensor(wav) if wav.ndim == 1: wav = torch.stack([wav, wav]) wav = wav.reshape((1, wav.shape[0], wav.shape[1])) return wav, sample_rate # Input path in string, Tensor of shape (nb_channels, nb_timesteps) def save(self, path: str, wav: torch.Tensor, sample_rate: int) -> None: if not os.path.exists(os.path.dirname(path)): os.mkdir(os.path.dirname(path)) sf.write(path, np.transpose(wav.detach().numpy()), sample_rate) def run(self): wav, rate = self.load(self.global_objects['filename']) wav_out = self.predict(wav) filename = os.path.basename(self.global_objects['filename']) path = os.path.dirname(self.global_objects['filename']) filename = 'extracted_' + filename filename, _ = os.path.splitext(filename) filename += '.wav' path = os.path.join(path, filename) self.save(path, wav_out, rate)
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: train-atari.py # Author: Yuxin Wu import numpy as np import sys import os import uuid import argparse import cv2 import tensorflow as tf import six from six.moves import queue from tensorpack import * from tensorpack.tfutils import optimizer from tensorpack.utils.concurrency import ensure_proc_terminate, start_proc_mask_signal from tensorpack.utils.serialize import dumps from tensorpack.tfutils.gradproc import MapGradient, SummaryGradient, FilterNoneGrad from tensorpack.utils.gpu import get_num_gpu import gym from simulator import SimulatorProcess, SimulatorMaster, TransitionExperience from common import Evaluator, eval_model_multithread, play_n_episodes from atari_wrapper import MapState, FrameStack, FireResetEnv, LimitLength from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope from tensorflow.python.ops import control_flow_ops, state_ops from tensorpack.utils.argtools import call_only_once, memoized from tensorpack.tfutils.tower import TowerFuncWrapper import functools import tensorflow.contrib.slim as slim if six.PY3: from concurrent import futures CancelledError = futures.CancelledError else: CancelledError = Exception GAMMA = 0.99 STATE_SHAPE = (4,) LOCAL_TIME_MAX = 5 STEPS_PER_EPOCH = 100 EVAL_EPISODE = 5 BATCH_SIZE = 32 PREDICT_BATCH_SIZE = 15 # batch for efficient forward SIMULATOR_PROC = 8 PREDICTOR_THREAD_PER_GPU = 3 PREDICTOR_THREAD = None NUM_ACTIONS = None ENV_NAME = None import trpo def get_player(train=False, dumpdir=None): env = gym.make(ENV_NAME) if dumpdir: env = gym.wrappers.Monitor(env, dumpdir, video_callable=lambda _: True) # env = FireResetEnv(env) # env = MapState(env, lambda im: cv2.resize(im, IMAGE_SIZE)) # env = FrameStack(env, 4) if train: env = LimitLength(env, 60000) return env class MySimulatorWorker(SimulatorProcess): def _build_player(self): return get_player(train=True) class Model(ModelDesc): def inputs(self): assert NUM_ACTIONS is not None return [tf.placeholder(tf.float32, (None,) + STATE_SHAPE, 'state'), tf.placeholder(tf.int64, (None,), 'action'), tf.placeholder(tf.float32, (None,), 'futurereward'), tf.placeholder(tf.float32, (None,), 'action_prob'), ] @auto_reuse_variable_scope def _get_NN_prediction(self, state, action, futurereward, action_prob): # image = tf.cast(image, tf.float32) / 255.0 with argscope(FullyConnected, activation=tf.nn.relu): l = state l = FullyConnected('fc', l, 64) for i in range(5): l = FullyConnected('fc%d' % i, l, 64) # l = FullyConnected('fc0', l, 64) # l = PReLU('prelu', l) policy = tf.nn.softmax(FullyConnected('fc-pi', l, NUM_ACTIONS), name='policy') # unnormalized policy return policy def build_graph(self, state, action, futurereward, action_prob): self.policy = self._get_NN_prediction(state, action, futurereward, action_prob) is_training = get_current_tower_context().is_training if not is_training: return pi_a_given_s = tf.reduce_sum(self.policy * tf.one_hot(action, NUM_ACTIONS), 1) # (B,) importance = tf.clip_by_value(pi_a_given_s / (action_prob + 1e-8), 0, 10) policy_loss = -tf.reduce_sum(futurereward * importance, name='policy_loss') cost = policy_loss self.cost = tf.truediv(cost, tf.cast(tf.shape(futurereward)[0], tf.float32), name='cost') # summary.add_moving_summary(advantage, cost, tf.reduce_mean(importance, name='importance')) return self.cost def optimizer(self): # opt = tf.train.AdamOptimizer() opt = trpo.ConjugateGradientOptimizer(self.policy, self.cost, delta=0.1) gradprocs = [SummaryGradient()] opt_proc = optimizer.apply_grad_processors(opt, gradprocs) return opt_proc, opt class MySimulatorMaster(SimulatorMaster, Callback): def __init__(self, pipe_c2s, pipe_s2c, gpus): super(MySimulatorMaster, self).__init__(pipe_c2s, pipe_s2c) self.queue = queue.Queue(maxsize=BATCH_SIZE * 8 * 2) self._gpus = gpus def _setup_graph(self): # create predictors on the available predictor GPUs. num_gpu = len(self._gpus) predictors = [self.trainer.get_predictor( ['state'], ['policy'], self._gpus[k % num_gpu]) for k in range(PREDICTOR_THREAD)] self.async_predictor = MultiThreadAsyncPredictor( predictors, batch_size=PREDICT_BATCH_SIZE) def _before_train(self): self.async_predictor.start() def _on_state(self, state, client): """ Launch forward prediction for the new state given by some client. """ def cb(outputs): try: distrib = outputs.result()[0] except CancelledError: logger.info("Client {} cancelled.".format(client.ident)) return assert np.all(np.isfinite(distrib)), distrib action = np.random.choice(len(distrib), p=distrib) client.memory.append(TransitionExperience( state, action, reward=None, prob=distrib[action])) self.send_queue.put([client.ident, dumps(action)]) self.async_predictor.put_task([state], cb) def _process_msg(self, client, state, reward, isOver): """ Process a message sent from some client. """ # in the first message, only state is valid, # reward&isOver should be discarded if len(client.memory) > 0: client.memory[-1].reward = reward if isOver: # should clear client's memory and put to queue self._parse_memory(0, client, True) # else: # if len(client.memory) == LOCAL_TIME_MAX + 1: # R = client.memory[-1].value # self._parse_memory(R, client, False) # feed state and return action self._on_state(state, client) def _parse_memory(self, init_r, client, isOver): mem = client.memory if not isOver: last = mem[-1] mem = mem[:-1] mem.reverse() R = float(init_r) for idx, k in enumerate(mem): R = k.reward + GAMMA * R self.queue.put([k.state, k.action, R, k.prob]) if not isOver: client.memory = [last] else: client.memory = [] class MyTrainer(SimpleTrainer): """ Single-GPU single-cost single-tower trainer. """ def __init__(self): super(MyTrainer, self).__init__() def setup_graph2(self, inputs_desc, input, get_cost_fn, get_policy_fn, get_opt_fn): get_cost_fn = TowerFuncWrapper(get_cost_fn, inputs_desc) get_policy_fn = TowerFuncWrapper(get_policy_fn, inputs_desc) get_opt_fn = memoized(get_opt_fn) self.tower_func = get_cost_fn # TODO setup may want to register monitor as well?? input_callbacks = self._setup_input(inputs_desc, input) train_callbacks = self._setup_graph2(input, get_cost_fn, get_policy_fn, get_opt_fn) self.register_callback(input_callbacks + train_callbacks) def _make_get_grad_fn(self, input, get_cost_fn, get_opt_fn): """ Returns: a get_grad_fn for GraphBuilder to use. """ # internal use only assert input.setup_done() def get_grad_fn(): ctx = get_current_tower_context() cost = get_cost_fn(*input.get_input_tensors()) if not ctx.is_training: return None # this is the tower function, could be called for inference if ctx.has_own_variables: varlist = ctx.get_collection_in_tower(tf.GraphKeys.TRAINABLE_VARIABLES) else: varlist = tf.trainable_variables() opt = get_opt_fn()[0] grads = opt.compute_gradients( cost, var_list=varlist, gate_gradients=self.GATE_GRADIENTS, colocate_gradients_with_ops=self.COLOCATE_GRADIENTS_WITH_OPS, aggregation_method=self.AGGREGATION_METHOD) grads = FilterNoneGrad().process(grads) return grads return get_grad_fn def _setup_graph2(self, input, get_cost_fn, get_policy_fn, get_opt_fn): logger.info("Building graph for a single training tower ...") with TowerContext('', is_training=True): grads = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)() opt_proc, self.opt = get_opt_fn() self.opt.cost_fn = functools.partial(get_cost_fn, *input.get_input_tensors()) self.opt.policy_fn = functools.partial(get_policy_fn, *input.get_input_tensors()) self.opt.cache_vars = [tf.Variable(v.initialized_value(), name=v.op.name + 'cache', trainable=False) for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] self.opt.var2cache = control_flow_ops.group([state_ops.assign(c, v) for c, v in zip(self.opt.cache_vars, tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES))]) self.opt.cache2var = control_flow_ops.group([state_ops.assign(v, c) for c, v in zip(self.opt.cache_vars, tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES))]) with TowerContext('', is_training=True): self.train_op = opt_proc.apply_gradients(grads, name='min_op') return [] def launch_train_with_config2(config, trainer): assert isinstance(trainer, SingleCostTrainer), trainer assert isinstance(config, TrainConfig), config assert config.model is not None assert config.dataflow is not None or config.data is not None model = config.model inputs_desc = model.get_inputs_desc() input = config.data or config.dataflow input = apply_default_prefetch(input, trainer) trainer.setup_graph2( inputs_desc, input, model._build_graph_get_cost, lambda *inputs: model._get_NN_prediction(*inputs), model.get_optimizer) trainer.train_with_defaults( callbacks=config.callbacks, monitors=config.monitors, session_creator=config.session_creator, session_init=config.session_init, steps_per_epoch=config.steps_per_epoch, starting_epoch=config.starting_epoch, max_epoch=config.max_epoch, extra_callbacks=config.extra_callbacks) def train(): dirname = os.path.join('train_log', 'train-atari-{}'.format(ENV_NAME)) logger.set_logger_dir(dirname) # assign GPUs for training & inference num_gpu = get_num_gpu() global PREDICTOR_THREAD if num_gpu > 0: if num_gpu > 1: # use half gpus for inference predict_tower = list(range(num_gpu))[-num_gpu // 2:] else: predict_tower = [0] PREDICTOR_THREAD = len(predict_tower) * PREDICTOR_THREAD_PER_GPU train_tower = list(range(num_gpu))[:-num_gpu // 2] or [0] logger.info("[Batch-A3C] Train on gpu {} and infer on gpu {}".format( ','.join(map(str, train_tower)), ','.join(map(str, predict_tower)))) else: logger.warn("Without GPU this model will never learn! CPU is only useful for debug.") PREDICTOR_THREAD = 1 predict_tower, train_tower = [0], [0] # setup simulator processes name_base = str(uuid.uuid1())[:6] prefix = '@' if sys.platform.startswith('linux') else '' namec2s = 'ipc://{}sim-c2s-{}'.format(prefix, name_base) names2c = 'ipc://{}sim-s2c-{}'.format(prefix, name_base) procs = [MySimulatorWorker(k, namec2s, names2c) for k in range(SIMULATOR_PROC)] ensure_proc_terminate(procs) start_proc_mask_signal(procs) master = MySimulatorMaster(namec2s, names2c, predict_tower) dataflow = BatchData(DataFromQueue(master.queue), BATCH_SIZE) config = AutoResumeTrainConfig( model=Model(), dataflow=dataflow, callbacks=[ ModelSaver(), master, StartProcOrThread(master), PeriodicTrigger(Evaluator( EVAL_EPISODE, ['state'], ['policy'], get_player), every_k_epochs=1), ], steps_per_epoch=STEPS_PER_EPOCH, session_init=get_model_loader(args.load) if args.load else None, max_epoch=1000, ) trainer = MyTrainer() if config.nr_tower == 1 else AsyncMultiGPUTrainer(train_tower) launch_train_with_config2(config, trainer) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') parser.add_argument('--env', help='env', default='CartPole-v0') parser.add_argument('--task', help='task to perform', choices=['play', 'eval', 'train', 'dump_video'], default='train') parser.add_argument('--output', help='output directory for submission', default='output_dir') parser.add_argument('--episode', help='number of episode to eval', default=1, type=int) args = parser.parse_args() ENV_NAME = args.env logger.info("Environment Name: {}".format(ENV_NAME)) NUM_ACTIONS = get_player().action_space.n logger.info("Number of actions: {}".format(NUM_ACTIONS)) if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.task != 'train': assert args.load is not None pred = OfflinePredictor(PredictConfig( model=Model(), session_init=get_model_loader(args.load), input_names=['state'], output_names=['policy'])) if args.task == 'play': play_n_episodes(get_player(train=False), pred, args.episode, render=True) elif args.task == 'eval': eval_model_multithread(pred, args.episode, get_player) elif args.task == 'dump_video': play_n_episodes( get_player(train=False, dumpdir=args.output), pred, args.episode) else: train()
nilq/baby-python
python
#!/usr/bin/env python import os import sys import socket import rospy from robotiq_control.cmodel_urscript import RobotiqCModelURScript from robotiq_msgs.msg import CModelCommand, CModelStatus def mainLoop(urscript_topic): # Gripper is a C-Model that is connected to a UR controller. # Commands should be published to ur_modern_driver's URScript topic. gripper = RobotiqCModelURScript(urscript_topic) # The Gripper status pub = rospy.Publisher('status', CModelStatus, queue_size=3) # The Gripper command rospy.Subscriber('command', CModelCommand, gripper.sendCommand) while not rospy.is_shutdown(): # Get and publish the Gripper status status = gripper.getStatus() pub.publish(status) # Wait a little rospy.sleep(0.05) if __name__ == '__main__': rospy.init_node('cmodel_urscript_driver') try: mainLoop(sys.argv[1]) except rospy.ROSInterruptException: pass
nilq/baby-python
python
import torch import torch.nn.functional as F class DynamicsModel(torch.nn.Module): # transitioin function def __init__(self, D_in, D_out, hidden_unit_num): print("[DynamicsModel] H =",hidden_unit_num) super(DynamicsModel, self).__init__() # zero hidden layer #self.l1 = torch.nn.Linear(D_in, D_out, bias=False) # one hidden layer self.l1 = torch.nn.Linear(D_in, hidden_unit_num) self.l2 = torch.nn.Linear(hidden_unit_num, D_out) # , bias=False self.logvar = torch.nn.Parameter(torch.zeros(D_out), requires_grad=True) # two hidden layer #self.l1 = torch.nn.Linear(D_in, hidden_unit_num) #self.l2 = torch.nn.Linear(hidden_unit_num, hidden_unit_num) #self.l3 = torch.nn.Linear(hidden_unit_num, D_out) def forward(self, X): mu = self.l2(torch.tanh(self.l1(X))) return self.l2(torch.tanh(self.l1(X))), self.logvar*torch.ones_like(mu) #return self.l2(F.relu(self.l1(X))) #return self.l3(torch.tanh(self.l2(torch.tanh(self.l1(X))))) #return self.l3(F.relu(self.l2(F.relu(self.l1(X))))) class RatioModel(torch.nn.Module): # density ratio def __init__(self, D_in, hidden_unit_num): super().__init__() print("[RatioModel] H =",hidden_unit_num) #self.l1 = torch.nn.Linear(D_in, hidden_unit_num) #self.l2 = torch.nn.Linear(hidden_unit_num, 1) # output dimension is always 1. self.l1 = torch.nn.Linear(D_in, hidden_unit_num) self.l2 = torch.nn.Linear(hidden_unit_num, hidden_unit_num) self.l3 = torch.nn.Linear(hidden_unit_num, 1) def forward(self, X): #return F.softplus(self.l2(torch.tanh(self.l1(X)))) return F.softplus(self.l3(torch.tanh(self.l2(torch.tanh(self.l1(X)))))) class GradLSDModel(torch.nn.Module): # gradient of log-stationary distribution def __init__(self, D_in, D_out): super().__init__() self.l1 = torch.nn.Linear(D_in, D_out) def forward(self, X): return self.l1(X) class NLLModel(torch.nn.Module): # nll def __init__(self, D_in, hidden_unit_num): super().__init__() print("[NLLModel] H =", hidden_unit_num) self.l1 = torch.nn.Linear(D_in, hidden_unit_num) #self.l2 = torch.nn.Linear(hidden_unit_num, 1) # , bias=False self.l2 = torch.nn.Linear(hidden_unit_num, hidden_unit_num) self.l3 = torch.nn.Linear(hidden_unit_num, 1) def forward(self, X): #return self.l2(torch.tanh(self.l1(X))) return self.l3(torch.tanh(self.l2(torch.tanh(self.l1(X)))))
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Script to plot storage IO timing usage from profiling data. This script requires the matplotlib and numpy Python modules. """ from __future__ import print_function from __future__ import unicode_literals import argparse import glob import os import sys import numpy # pylint: disable=import-error from matplotlib import pyplot # pylint: disable=import-error def Main(): """The main program function. Returns: bool: True if successful or False if not. """ argument_parser = argparse.ArgumentParser(description=( 'Plots storage IO timing from profiling data.')) argument_parser.add_argument( '--output', dest='output_file', type=str, help=( 'path of the output file to write the graph to instead of using ' 'interactive mode. The output format deduced from the extension ' 'of the filename.')) argument_parser.add_argument( '--process', dest='process', type=str, default='', help=( 'comma separated list of names of processes to graph.')) argument_parser.add_argument( 'profile_path', type=str, help=( 'path to the directory containing the profiling data.')) options = argument_parser.parse_args() if not os.path.isdir(options.profile_path): print('No such directory: {0:s}'.format(options.profile_path)) return False processes = [] if options.process: processes = options.process.split(',') names = [ 'time', 'name', 'operation', 'description', 'cpu', 'logical_size', 'size'] glob_expression = os.path.join(options.profile_path, 'storage-*.csv.gz') for csv_file_name in glob.glob(glob_expression): process_name = os.path.basename(csv_file_name) process_name = process_name.replace('storage-', '').replace('.csv.gz', '') if processes and process_name not in processes: continue data = numpy.genfromtxt( csv_file_name, delimiter='\t', dtype=None, encoding='utf-8', names=names, skip_header=1) if data.size > 0: for name in numpy.unique(data['name']): data_by_name = numpy.extract(data['name'] == name, data) data_bytes_per_second = numpy.divide( data_by_name['logical_size'], data_by_name['cpu']) label = '-'.join([name, process_name]) pyplot.plot(data_by_name['time'], data_bytes_per_second, label=label) pyplot.title('Bytes read/write over time') pyplot.xlabel('Time') pyplot.xscale('linear') pyplot.ylabel('Bytes per seconds') pyplot.yscale('linear') pyplot.legend() if options.output_file: pyplot.savefig(options.output_file) else: pyplot.show() return True if __name__ == '__main__': if not Main(): sys.exit(1) else: sys.exit(0)
nilq/baby-python
python
# -*- coding: utf-8 -*- import datetime from django.db import models from django.core.validators import MaxValueValidator, MinValueValidator from taxi_online_example.utils import date_now_or_future_validator, UTC from django.forms.models import model_to_dict class TaxiLocation(models.Model): taxi_id = models.CharField(max_length=200, unique=True) lon = models.DecimalField(max_digits=9, decimal_places=6, db_index=True, validators=[MinValueValidator(-180), MaxValueValidator(180)]) lat = models.DecimalField(max_digits=9, decimal_places=6, db_index=True, validators=[MinValueValidator(-90), MaxValueValidator(90)]) is_busy = models.BooleanField(default=False) def change_activity(self, is_busy): self.is_busy = is_busy self.save() def description(self): return '<TaxiLocation %s>' % _get_model_object_description(self) class PassengerOrder(models.Model): passenger_id = models.CharField(max_length=200, unique=True, db_index=True) lon = models.DecimalField(max_digits=9, decimal_places=6, validators=[MinValueValidator(-180), MaxValueValidator(180)]) lat = models.DecimalField(max_digits=9, decimal_places=6, validators=[MinValueValidator(-90), MaxValueValidator(90)]) time_to_pick_up = models.DateTimeField(null=True, blank=True, db_index=True, default=datetime.datetime.now, validators=[date_now_or_future_validator]) taxi_id = models.CharField(max_length=200, null=True, blank=True, unique=True, db_index=True, default=None) def is_waiting_for_taxi(self): return True if self.taxi_id else False def remove_taxi(self): self.taxi_id = None self.save() @classmethod def get_all_passengers_for_pick_up(cls): return cls.objects.filter(time_to_pick_up__lte=datetime.datetime.now(tz=UTC()), taxi_id__isnull=True).order_by('time_to_pick_up') def get_nearest_free_taxi(self, radius=10): # http://www.plumislandmedia.net/mysql/haversine-mysql-nearest-loc/ sql = """SELECT tl.id, tl.taxi_id, p.distance_unit * DEGREES(ACOS(COS(RADIANS(p.latpoint)) * COS(RADIANS(tl.lat)) * COS(RADIANS(p.longpoint) - RADIANS(tl.lon)) + SIN(RADIANS(p.latpoint)) * SIN(RADIANS(tl.lat)))) AS distance_in_km FROM %(taxi_location_table_name)s AS tl JOIN ( /* these are the query parameters */ SELECT %(latpoint)s AS latpoint, %(longpoint)s AS longpoint, %(radius)s AS radius, 111.045 AS distance_unit ) AS p ON 1=1 WHERE tl.is_busy = false AND tl.lat BETWEEN p.latpoint - (p.radius / p.distance_unit) AND p.latpoint + (p.radius / p.distance_unit) AND tl.lon BETWEEN p.longpoint - (p.radius / (p.distance_unit * COS(RADIANS(p.latpoint)))) AND p.longpoint + (p.radius / (p.distance_unit * COS(RADIANS(p.latpoint)))) ORDER BY distance_in_km LIMIT 1""" % {'taxi_location_table_name': TaxiLocation._meta.db_table, 'latpoint': self.lat, 'longpoint': self.lon, 'radius': radius} for p in TaxiLocation.objects.raw(sql): return p return False def description(self): return '<PassengerOrder %s>' % _get_model_object_description(self) def _get_model_object_description(obj): return ' '.join([('%s=%s' % (k, str(v))) for k, v in model_to_dict(obj).iteritems()])
nilq/baby-python
python
from get_data import *
nilq/baby-python
python
import os import gzip import shutil import struct import urllib import numpy as np import matplotlib.pyplot as plt import tensorflow as tf os.environ['TF_CPP_MIN_LOG_LEVEL']='2' def read_data(filename): """ :param filename :return: array """ text = open(filename, 'r').readlines()[1:] data = [line[:-1].split('\t') for line in text] births = [float(line[1]) for line in data] life = [float(line[2]) for line in data] data = list(zip(births, life)) n_samples = len(data) data = np.asarray(data, dtype=np.float32) return data, n_samples def huber_loss(y, y_pred, delta): diff = tf.abs(y - y_pred) def f1(): return 0.5 * tf.square(diff) def f2(): return delta * diff - 0.5 * tf.square(delta) return tf.cond(diff < delta, f1, f2) def download_mnist(path): safe_mkdir(path) url = 'http://yann.lecun.com/exdb/mnist' filenames = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'] expected_bytes = [9912422, 28881, 1648877, 4542] for filename, byte in zip(filenames, expected_bytes): download_url = os.path.join(url, filename) download_url = download_url.replace('\\', '/') # download_url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz' local_path = os.path.join(path, filename) download_file(download_url, local_path, byte, True) def safe_mkdir(path): """ Create a directory if there isn't one already. """ try: os.mkdir(path) except OSError: pass def download_file(download_url, local_path, expected_byte=None, unzip_and_remove=False): """ Download the file from download_url into local_dest if the file doesn't already exists. If expected_byte is provided, check if the downloaded file has the same number of bytes. If unzip_and_remove is True, unzip the file and remove the zip file """ if os.path.exists(local_path) or os.path.exists(local_path[:-3]): print('%s already exists' %local_path) else: print('Downloading %s' %download_url) local_file, _ = urllib.request.urlretrieve(download_url, local_path) file_stat = os.stat(local_path) if expected_byte: if file_stat.st_size == expected_byte: print('Successfully downloaded %s' %local_path) if unzip_and_remove: with gzip.open(local_path, 'rb') as f_in, open(local_path[:-3],'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(local_path) else: print('The downloaded file has unexpected number of bytes') def read_mnist(path, flatten=True, num_train=55000): imgs, labels = parse_data(path, 'train', flatten) indices = np.random.permutation(labels.shape[0]) train_idx, val_idx = indices[:num_train], indices[num_train:] train_img, train_labels = imgs[train_idx, :], labels[train_idx, :] val_img, val_labels = imgs[val_idx, :], labels[val_idx, :] test = parse_data(path, 't10k', flatten) return (train_img, train_labels), (val_img, val_labels), test def parse_data(path, dataset, flatten): if dataset != 'train' and dataset != 't10k': raise NameError('dataset must be train or t10k') label_file = os.path.join(path, dataset + '-labels-idx1-ubyte') with open(label_file, 'rb') as file: _, num = struct.unpack(">II", file.read(8)) labels = np.fromfile(file, dtype=np.int8) #int8 new_labels = np.zeros((num, 10)) new_labels[np.arange(num), labels] = 1 img_file = os.path.join(path, dataset + '-images-idx3-ubyte') with open(img_file, 'rb') as file: _, num, rows, cols = struct.unpack(">IIII", file.read(16)) imgs = np.fromfile(file, dtype=np.uint8).reshape(num, rows, cols) #uint8 imgs = imgs.astype(np.float32) / 255.0 if flatten: imgs = imgs.reshape([num, -1]) return imgs, new_labels
nilq/baby-python
python
#!/usr/bin/env python """ DBSCAN Project - M2 SSI - Istic, Univ. Rennes 1. Andriamilanto Tompoariniaina <tompo.andri@gmail.com> This module is an implementation of K-mean algorithm to confront it with our implementation of the DBSCAN one. """ # -- Imports import sys import random import operator from pandas import DataFrame from pathlib import Path from datas import (read_dataset, dataframe_to_points, display_clusters, Center, Cluster) # -- Classes class Kmean(object): """The class representation of our implementation of Kmean.""" def __init__(self, dataset, k, precision=1): """Initialization function, called when creating a new object.""" # Type checking the dataset if not isinstance(dataset, DataFrame) or dataset.empty: raise TypeError( 'Dataset given to Kmean class has to be a non empty', 'pandas.DataFrame instance' ) # If asking more clusters than the number of points if k > dataset.size: raise ValueError( 'k cannot be superior than dataset size (> %d)' % dataset.size ) # Initialize private attributes self._k = k self._precision = precision self._points = [] self._clusters = [] self._neighbour_counter = {} # Create the Point objects from the DataFrame one self._points = dataframe_to_points(dataset) # Initialize the neighbour counter for point in self._points: self._neighbour_counter[point] = 0 # DEBUG: Display initial state of the algorithm # display_clusters(self._clusters, self._points) def _turn(self): """Run a turn of the algorithm till we reach the convergence point.""" # Varible put to False only to enter the first time into the loop converged = False nb_loop = 0 # While we still haven't reached the point of convergence while not converged: # DEBUG: Display the state at each loop # display_clusters(self._clusters) # Put the converged value back to True, if a point changes its # cluster, we will know that we still haven't converged converged = True # For every point (we assume that they are already into a cluster) for p in self._points: # The closest is the current cluster of the point closest = p.cluster curr_dist = p.dist(closest.center) # Parse all the other clusters for cluster in self._clusters: # If one is closest than the current one if p.dist(cluster.center) < curr_dist: closest = cluster curr_dist = p.dist(closest.center) # If the closest cluster is different than the current one, # assign this point to this cluster and we know that we still # haven't converged if p.cluster != closest: closest.assign(p) converged = False # Reassign the center of the clusters self._update_cluster_center() # Simple counter nb_loop += 1 # Return the number of loops that this turn took return nb_loop def run(self): """Run the algorithm a precision number of times.""" # Do a precision number of turns nb_loop = 0 for turn in range(self._precision): # Initialization with random centers self._initialization() # Execute the turn and counting its number of loops nb_loop += self._turn() # Count the number of neighbour points of each points self._count_neighbours() # Execute the last turn with optimized centers opt_loop = self._optimized_turn() # At the end, print the final convergence time print('%d, %d, %d' % (self._k, nb_loop/self._precision, opt_loop)) # Display the final state of the clusters display_clusters(self._clusters) # for c in self._clusters: # print(c) def _optimized_turn(self): """Optimized turn to get the 'best' centers for clusters.""" # Get k points with the max neighbours which will make better centers best_centers = [] for i in range(self._k): # Get the id of the point with maximum neighbours (better center) new_max_point = max( self._neighbour_counter.items(), key=operator.itemgetter(1) )[0] # For every point into the cluster of the maximum one, remove them # in order to not select two centers into the same cluster cluster = new_max_point.cluster # closest = cluster.points[0] closest = new_max_point for point in cluster.points: # if point.dist(cluster.center) < closest.dist(cluster.center): # closest = point self._neighbour_counter[point] = 0 # Just add the created center into the center list best_centers.append(Center(i, closest.x, closest.y)) # Clear the clusters self._clear_clusters() # Create the clusters with their optimized centers for center in best_centers: c = Cluster() c.center = center self._clusters.append(c) # Assign each point to its closest cluster self._assign_point_to_closest_cluster() # Reassign the center of the clusters self._update_cluster_center() # Execute the final and optimized turn and counting its number of loops return self._turn() def _count_neighbours(self): """Count the number of neighbours of each point.""" for point in self._points: self._neighbour_counter[point] += len(point.cluster.points) def _initialization(self): """Initialization part of the algorithm. Note that the points will be assigned to their nearest cluster and the center points of the clusters are scattered on the diagonal going from left bottom to top right. """ # Clear the clusters self._clear_clusters() # Initialize the clusters self._init_clusters() # Assign each point to its closest cluster self._assign_point_to_closest_cluster() # Reassign the center of the clusters self._update_cluster_center() def _update_cluster_center(self): """Update the cluster's center.""" # Update the center of each cluster if there are points into it for cluster in self._clusters: # Get the number of points into this cluster nb_points = len(cluster.points) if nb_points > 0: # Update the way of getting sums and centers for 3D points # Add all x and y values of each point of this cluster x_sum, y_sum = 0, 0 for point in cluster.points: x_sum += point.x y_sum += point.y # Reassign the center of this cluster by getting the mean cluster.center.x = x_sum / nb_points cluster.center.y = y_sum / nb_points # DEBUG: Display the new centers approximations # print( # 'center.x=%s and center.y=%s' % # (cluster.center.x, cluster.center.y) # ) def _clear_clusters(self): """Clear the clusters between each turn.""" for point in self._points: point.cluster = None self._clusters.clear() def _init_clusters(self): """Initialize the clusters.""" # Select randomly k points and put them as cluster centers for index in range(self._k): # Select a random point random_point = random.choice(self._points) # Update what is needed for 3D centers using 3D points # Create a new cluster with this a random point as its center c = Cluster() c.center = Center(index, random_point.x, random_point.y) self._clusters.append(c) def _assign_point_to_closest_cluster(self): """Assign each point to its closes cluster.""" for p in self._points: # The closest is the first cluster in the list (for the moment) closest = self._clusters[0] curr_dist = p.dist(closest.center) # Parse all the other clusters for cluster in self._clusters[1:]: # If one is closest than the current one if p.dist(cluster.center) < curr_dist: closest = cluster curr_dist = p.dist(closest.center) # Assign this point to its closest cluster closest.assign(p) # -- Private functions def __get_params(argv): """Function to manage input parameters.""" # Correct syntax syntax = '%s filename k [precision]' % argv[0] # Not enough parameters if len(argv) not in (3, 4): print('Usage: %s' % syntax) exit() # Get the parameter k try: k = int(argv[2]) if k < 1: raise ValueError except ValueError: print( 'Parameter k as %s is invalid, must be a positive integer' % argv[2] ) exit() # Get the filename after checking that the file exists and is a .csv f = Path(argv[1]) if not f.is_file() or f.suffix != '.csv': print('The file %s was not found' % argv[1]) exit() # Get the precision value try: precision = int(argv[3]) if precision < 1: raise ValueError except IndexError: precision = 1 except ValueError: print( 'Parameter precision as %s is invalid, must be a positive integer' % argv[3] ) exit() # Return the parameters return argv[1], k, precision if __name__ == "__main__": """Main function to be launched when this script is called """ # -- Normal functionment # Get parameters and execute K-mean algorithm dataset, k, precision = __get_params(sys.argv) Kmean(read_dataset(dataset), k, precision).run() # -- Convergence measurement gives 3 columns csv file # => (k | normal convergence time | optimized version convergence time) # datasets = [ # 'carnet2.csv', # 'carnet_bis.csv', # 'circles.csv', # 'density_gap.csv', # 'example.csv', # 'stats_reseaux_ping_download.csv' # ] # # from contextlib import redirect_stdout # for ds in datasets: # with open('../Report/convergences/' + ds, 'w') as f: # with redirect_stdout(f): # print('k, convergence_time') # try: # for k in range(1, 100): # Kmean(read_dataset('../datasets/' + ds), k).run() # except ValueError: # pass
nilq/baby-python
python
import csv from stations import Station, Stations csvfile="./source/stations_aod.csv" def readCSV(csv_file): stations_aod=dict(); with open(csv_file,'rb') as csvfile: spamreader=csv.reader(csvfile,delimiter=",",quotechar='|') for row in spamreader: stations_aod[row[0]]=(row) # print(row[0]) # print(", ".join(row)) for key in stations_aod.keys(): (stId,lat,lon,alt,stname,calibr)=stations_aod[key] print "stId={0},lat={1},lon={2},alt={3},stname={4},calibr={5}".format(stId,lat,lon,alt,stname,calibr) def readCSV2(csv_file): stations=Stations() stations.read(csv_file) for name in stations.getstNames(): station = stations.get(name) if station is not None: if station.hasCalibr(): print "stId={0},lat={1},lon={2},alt={3},stname={4},calibr={5}".format( station.stId, station.lat, station.lon, station.alt, station.stname, station.calibr) else: print "{0} hasn't calibr".format(name) else: print "{0} not exists".format(name) readCSV2(csvfile)
nilq/baby-python
python
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None from math import log class Solution: # @param {TreeNode} root # @return {integer[]} def rightSideView(self, root): def dfs(node, pos): if not node: return row = int(log(pos, 2)) if row >= len(ans): ans.append((node, pos)) elif pos > ans[row][1]: ans[row] = (node, pos) dfs(node.left, pos*2) dfs(node.right, pos*2+1) ans = [] dfs(root, 1) return [p[0].val for p in ans]
nilq/baby-python
python
#Ejercicio 2 - Cuaderno 2 """ Implementa un programa modularizado que, leyendo de teclado los valores necesarios, muestre en pantalla el área de un círculo, un cuadrado y un triángulo. Utiliza el valor 3.1416 como aproximación de П (pi) o importa el valor del módulo “math”. """ import math print ('Círculo') radio= float (input('Radio = ')) print ('') print ('Cuadrado') lado= float (input('Lado = ')) print ('') print ('Triángulo') base= float (input('Base = ')) altura= float (input('Altura = ')) def area_circulo (radio): """ float --> float OBJ: calcular área círculo """ area_circulo = math.pi * radio**2 return area_circulo print ('Área círculo = ', area_circulo(radio)) def area_cuadrado (lado): """ float --> float OBJ: calcular área cuadrado """ area_cuadrado= lado**2 return area_cuadrado print ('Área cuadrado = ', area_cuadrado(lado)) def area_triangulo (base, altura): """ float --> float OBJ: calcular área triángulo """ area_triangulo = base * altura /2 return area_triangulo print ('Área triángulo = ', area_triangulo(base, altura))
nilq/baby-python
python
import sys def main(): infile=open(sys.argv[1],"r") counter=0 tf="" for l in infile: if (">" in l): s=l.split() if (tf!=""): print(tf+'\t'+str(counter)) counter=0 tf=s[1].upper() elif ("#" not in l): counter+=1 print(tf+'\t'+str(counter)) infile.close() main()
nilq/baby-python
python
# -*- coding: utf-8 -*- """ @author: Hiromasa Kaneko """ import pandas as pd from sklearn.neighbors import NearestNeighbors # k-NN k_in_knn = 5 # k-NN における k rate_of_training_samples_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset = pd.read_csv('resin.csv', index_col=0, header=0) x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0) # データ分割 y = dataset.iloc[:, 0] # 目的変数 x = dataset.iloc[:, 1:] # 説明変数 # 標準偏差が 0 の特徴量の削除 deleting_variables = x.columns[x.std() == 0] x = x.drop(deleting_variables, axis=1) x_prediction = x_prediction.drop(deleting_variables, axis=1) # オートスケーリング autoscaled_x = (x - x.mean()) / x.std() autoscaled_x_prediction = (x_prediction - x.mean()) / x.std() # k-NN による AD ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言 ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの x を model_ad に格納することに対応 # サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに # トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定 knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1) knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換 mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え ad_threshold = sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1] # トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する k-NN 距離の計算 knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換 mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_prediction.columns=['inside_ad_flag'] inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
nilq/baby-python
python
from BogoBogoSort import bogoBogoSort from BogoSort import bogoSort from BozoSort import bozoSort from CommunismSort import communismSort from MiracleSort import miracleSort from StalinSort import stalinSort from SlowSort import slowSort import numpy as np import time import matplotlib import matplotlib.pyplot as plt from pick import pick def time_it(func): start = time.time() func() end = time.time() #print('sorted list: '+ str(func())) print('Finished in {} seconds.'.format(end - start)) if alg_name == 'miracleSort': if func() != None: return end - start else: return 0 else: return end - start algsList = [bogoBogoSort, bogoSort, bozoSort, communismSort, miracleSort, stalinSort, slowSort] title = 'Please choose a algorithm: ' options = ['bogoBogoSort', 'bogoSort', 'bozoSort', 'communismSort', 'miracleSort', 'stalinSort', 'slowSort'] option, index = pick(options, title) alg_name = str(algsList[index].__name__) times = [] max_n = int(input('Enter max n: ')) print('\n'+alg_name+ '...') for i in range(1,max_n+1): randlist = np.random.randint(0, 100, i).tolist() print('\n'+'unsorted list: ', randlist) times.append(time_it(lambda: algsList[index](randlist))) n = range(1,max_n+1) fig, ax = plt.subplots() ax.plot(n, times) ax.set(xlabel='array length (n)', ylabel='time (s)', title=alg_name) ax.grid() fig.savefig("img/"+alg_name+".png") plt.show()
nilq/baby-python
python
# # Copyright 2021 Red Hat Inc. # SPDX-License-Identifier: Apache-2.0 # """Django model mixins and utilities.""" class RunTextFieldValidators: """ Mixin to run all field validators on a save method call This mixin should appear BEFORE Model. """ def save(self, *args, **kwargs): """ For all fields, run any default and specified validators before calling save """ for f in ( c for c in self._meta.get_fields() if hasattr(self, c.name) and c.get_internal_type() == "TextField" ): val = getattr(self, f.name) if val is not None: val = str(val) f.run_validators(val) super().save(*args, **kwargs)
nilq/baby-python
python
#! python3 # voicechannelcontrol.py """ ============================================================================== MIT License Copyright (c) 2020 Jacob Lee Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== """ import asyncio import json import os import discord from discord.ext import commands class VoiceChannelControl(commands.Cog): """ Allow member to claim voice channels and control member properties """ def __init__(self, bot): self.bot = bot self.emojis = [ u'0\ufe0f\u20e3', u'1\ufe0f\u20e3', u'2\ufe0f\u20e3', u'3\ufe0f\u20e3', u'4\ufe0f\u20e3', u'5\ufe0f\u20e3', u'6\ufe0f\u20e3', u'7\ufe0f\u20e3', u'8\ufe0f\u20e3', u'9\ufe0f\u20e3'] self.claims = {} self.disabled = [] @commands.Cog.listener() async def on_voice_state_update(self, member, before, after): """ Forcefully yields voice channel claim """ # Verify member has a voice channel claim if member.id not in self.claims: return direct_message = await member.create_dm() # Check if member disconnected from voice channels if before.channel and after.channel is None: await self.yield_control(member) await direct_message.send( f"{member.mention}: All claims forcefully yielded after voice channel disconnect" ) # Check if member is AFK if after.afk: await self.yield_control(member) await direct_message.send( f"{member.mention}: All claims forcefully yielded after AFK" ) @commands.command( name="claim", case_insensitive=True, pass_context=True ) async def claim(self, ctx): """ Invokes a request to claim voice channels Member will be prompted with two embed to claim voice channels - The first embed designates a voice channel as a Game Lobby - The second embed optionally designates a voice channel as a Ghost Lobby """ await ctx.message.delete() # Verify member does not have a voice channel claim if ctx.author.id in self.claims: await ctx.send("You already have a voice channel claim") return # Prompt member to select a voice channel for a Game Lobby game = await self.claim_voice_channel(ctx, style="Game Lobby") if game is None: return self.claims[ctx.author.id] = [game] # Prompt member to optionally select a voice channel for a Ghost Lobby ghost = await self.claim_voice_channel(ctx, style="Ghost Lobby") if ghost is None: await self.voice_control(ctx, game=game, ghost=None) else: self.claims[ctx.author.id].append(ghost) await self.voice_control(ctx, game=game, ghost=ghost) @commands.command( name="claimed", case_insensitive=True, pass_context=True ) async def claimed(self, ctx): """ Returns all members with claims and repsective claimed voice channels """ # Construct embed to send data embed = discord.Embed( title="Claimed Voice Channels", color=0x0000ff ) # Parse through all claims for claim in self.claims: # Get Game Lobby voice channel name game = self.bot.get_channel( id=self.claims[claim][0] ) value = f"`Game`: {game.name}" # Get Ghost Lobby voice channel name, if applicable if len(self.claims[claim]) == 2: ghost = self.bot.get_channel( id=self.claims[claim][1] ) value += f"\n`Ghost`: {ghost.name}" # Add member name and voice channels as embed field embed.add_field( name=discord.utils.get( ctx.guild.members, id=ctx.author.id ).name, value=value ) # Delete invocation message await ctx.message.delete() # Send constructed embed message = await ctx.channel.send(embed=embed) await asyncio.sleep(10) await message.delete() @commands.command( name="locked", case_insensitive=True, pass_context=True ) async def locked(self, ctx): """ Checks if MapDatabase commands are locked for member """ # Determine whether commands are locked for member locked = self.check_commands(ctx) # Construct embed to send data embed = discord.Embed( title="Commands Enabled/Disabled Check", color=0x0000ff ) # Add member name as embed field embed.add_field( name="Member", value=ctx.author.mention ) # Add whether commands are locked for member as embed field embed.add_field( name="`MapDatabase` Commands Locked?", value=f"`{locked}`" ) # Delete invocation message await ctx.message.delete() # Send constructed embed message = await ctx.channel.send(embed=embed) await asyncio.sleep(10) await message.delete() async def claim_voice_channel(self, ctx, *, style): """ Sends an embed with reactions for member to designate a lobby VC """ # Get all available voice channels, if any claimed = [] for claim in self.claims.values(): claimed.extend(claim) voice_channels = [ c for c in ctx.guild.voice_channels if c.id not in claimed ][:10] if not voice_channels: await ctx.channel.send( "There are no available voice channels to claim." ) return # Construct embed to send data embed = discord.Embed( title=f"Claim a Voice Channel for a {style}", color=0x0000ff ) # Add embed fields fields = { "Channel Options": '\n'.join([ f"{self.emojis[voice_channels.index(c)]} - {c}" for c in voice_channels ]), "Claim": "Use the reactions below to claim a voice channel", "Close": "React with :x:" } for field in fields: embed.add_field(name=field, value=fields[field]) # Set embed footer embed.set_footer( text="This message will automatically close after 10s" ) # Send constructed embed message = await ctx.channel.send(embed=embed) # Add a reaction for each available voice channel for chan in voice_channels: await message.add_reaction( self.emojis[voice_channels.index(chan)] ) # Add reaction to close message await message.add_reaction(u"\u274c") # Wait for and handle member input try: payload = await self.bot.wait_for( "raw_reaction_add", timeout=10.0, check=lambda p: ( p.member.id == ctx.author.id and p.message_id == message.id ) ) await message.delete() # Close message if payload.emoji.name == u"\u274c": return # Return corresponding voice channel return voice_channels[ self.emojis.index(payload.emoji.name) ].id # Delete message if message times out except asyncio.TimeoutError: await message.delete() return async def voice_control(self, ctx, game, ghost): """ Allows member to control member properties in claimed voice channels """ # Get Game Lobby voice channel game = self.bot.get_channel(id=game) # Get Game Lobby and Ghost Lobby reactions and fields with open( os.path.join("data", "VoiceChannelControl", "vcc_content.txt") ) as file: data = json.load(file) if ghost is None: reactions = data["game"]["reactions"] fields = data["game"]["fields"] fields["Claimed"] = fields["Claimed"].format(game.name) else: ghost = self.bot.get_channel(id=ghost) reactions = data["ghost"]["reactions"] fields = data["ghost"]["fields"] fields["Claimed"] = fields["Claimed"].format(game.name, ghost.name) # Construct embed to send data embed = discord.Embed( title="Voice Channel Control", color=0x0000ff) # Add embed fields for field in fields: embed.add_field(name=field, value=fields[field]) # Send constructed embed message = await ctx.channel.send(embed=embed) # Add appropriate reactions for rxn in reactions: await message.add_reaction(rxn) # Process member input await self.process_input(message, ctx) async def process_input(self, message, ctx): """ Handles member emoji usage and perform corresponding action(s) """ # Loop continuously until message times out while True: try: payload = await self.bot.wait_for( "raw_reaction_add", timeout=600, check=lambda p: ( (p.member.id == ctx.author.id or p.member.server_permissions.administrator) and p.message_id == message.id ) ) # Verify member is actively using voice channel claim except asyncio.TimeoutError: if await self.verify_activity(ctx): continue break # Handle member emoji usage if payload.emoji.name in [u"\U0001F507", u"\U0001F508"]: await self.manage_mute(payload) elif payload.emoji.name in [u"\U0001F515", u"\U0001F514"]: await self.manage_deafen(payload) elif payload.emoji.name == u"\U0001F47B": await self.move_member(payload, dest="Ghost Lobby") elif payload.emoji.name == u"\U0001F3E5": await self.move_member(payload, dest="Game Lobby") elif payload.emoji.name == u"\U0001F504": await self.reset_game(payload.member) elif payload.emoji.name == u"\U0001F3F3": await self.yield_control(payload.member) await ctx.channel.send( f"{ctx.author.mention}: All claims yielded successfully" ) break elif payload.emoji.name == u"\U0001F512": await self.manage_commands(payload.member) await message.remove_reaction(payload.emoji, payload.member) await message.delete() async def verify_activity(self, ctx): """ Verifies member with claim is still active """ # Send message with member mention to alert member check = await ctx.channel.send( f"{ctx.author.mention}: React to confirm you're still active" ) await check.add_reaction(u"\U0001F44D") # Wait for member response to inactivity warning try: await self.bot.wait_for( "raw_reaction_add", timeout=60.0, check=lambda p: ( p.member.id == ctx.author.id and p.message_id == check.id ) ) await check.delete() return True # If message times out, forcefully yield voice channel claim except asyncio.TimeoutError: await check.clear_reactions() await self.yield_control(ctx.author) await check.edit( content=f"{ctx.author.mention}: All claims yielded due to inactivity" ) return False async def manage_mute(self, payload): """ Mutes/Un-Mutes members in Game Lobby """ # Process information in payload channel = self.bot.get_channel(payload.channel_id) voice_channel = self.bot.get_channel( id=self.claims.get(payload.member.id)[0] ) # Verify members are present in the voice channel if not voice_channel.members: msg = await channel.send( f"There are no members in {voice_channel.name}" ) await asyncio.sleep(2) await msg.delete() # Edit all members' mute status according to the emoji used else: emojis = {"\U0001F507": True, "\U0001F508": False} for member in voice_channel.members: await member.edit( mute=emojis.get(payload.emoji.name) ) async def manage_deafen(self, payload): """ Deafens/Un-Deafens members in Game Lobby """ # Process information in payload channel = self.bot.get_channel(payload.channel_id) voice_channel = self.bot.get_channel( id=self.claims.get(payload.member.id)[0] ) # Verify members are present in the voice channel if not voice_channel.members: msg = await channel.send( f"There are no members in {voice_channel.name}" ) await asyncio.sleep(2) await msg.delete() # Edit all members' deafen status according to the emoji used else: emojis = {u"\U0001F515": True, u"\U0001F514": False} for member in voice_channel.members: await member.edit( deafen=emojis.get(payload.emoji.name) ) async def move_member(self, payload, dest): """ Moves members between Game Lobby and Ghost Lobby voice channels """ # Process informatio nin payload channel = self.bot.get_channel(payload.channel_id) # Get Game Lobby and Ghost Lobby voice channels game, ghost = [ self.bot.get_channel(id=c) for c in self.claims[payload.member.id] ] # Get destination voice channel and members who can be moved if dest == "Ghost Lobby": new_vc = ghost member_list = [m for m in game.members if m.id not in self.claims][:10] elif dest == "Game Lobby": new_vc = game member_list = ghost.members[:10] else: return # Verify members are present in the original voice channel if not member_list: await channel.send("There are no members who can be moved") return # Construct embed to send data embed = discord.Embed( title=f"Move members to `{dest}`", color=0x0000ff ) # Add embed fields fields = { "Select Members": '\n'.join([ f"{self.emojis[member_list.index(m)]} - {m}" for m in member_list ]), "Move Members": "Selected members will be moved once this message closes." } for field in fields: embed.add_field(name=field, value=fields[field]) # Set embed footer embed.set_footer( text="This message with automatically close when stale for 5s." ) # Send constructed embed message = await channel.send(embed=embed) # Add appropriate number of reactions for mem in member_list: await message.add_reaction( self.emojis[member_list.index(mem)] ) # Wait for member to add all reactions while True: try: await self.bot.wait_for( "raw_reaction_add", timeout=5.0, check=lambda p: ( p.member.id == payload.member.id and p.message_id == message.id and p.emoji.name in self.emojis ) ) except asyncio.TimeoutError: break # Move members according to message reactions message = await channel.fetch_message(message.id) for rxn in message.reactions: async for user in rxn.users(): # Ignore reaction if only added by bot if user.id == payload.member.id: await member_list[ self.emojis.index(rxn.emoji) ].move_to(new_vc) await message.delete() async def reset_game(self, member): """ Reverts member properties to defaults """ # Get Game Lobby voice channel game = self.bot.get_channel( id=self.claims[member.id][0] ) # If Ghost Lobby exists, move all members to Game Lobby voice channel if len(self.claims[member.id]) == 2: ghost = self.bot.get_channel( id=self.claims[member.id][1] ) for mem in ghost.members: await mem.move_to(game) # Un-Mute and Un-Deafen all members for mem in game.members: await mem.edit(mute=False, deafen=False) async def yield_control(self, member): """ Yields control of voice channel claims """ # Reset voice channel(s) await self.reset_game(member) # Delete channel from list of locked voice channels game = self.claims[member.id][0] if game in self.disabled: self.disabled.remove(game) # Delete channel from claimed channels del self.claims[member.id] async def manage_commands(self, member): """ Disables/Enables MapDatabase commands for member in voice channels """ # Get Game Lobby voice channel game = self.claims[member.id][0] # Enable/Disable MapDatabase commands if previously disabled/enabled if game in self.disabled: self.disabled.remove(game) else: self.disabled.append(game) def check_commands(self, ctx): """ Checks if MapDatabase commands are disabled for member """ # Parse through all voice channel with MapDatabase commands disabled for vcid in self.disabled: voice_channel = discord.utils.get( ctx.guild.voice_channels, id=vcid ) if voice_channel is None: continue # Check if member is in voice channel if ctx.author in voice_channel.members: return True return False def setup(bot): """ Adds VoiceChannelControl cog """ bot.add_cog(VoiceChannelControl(bot))
nilq/baby-python
python
""" Copyright MIT and Harvey Mudd College MIT License Summer 2020 Lab 3B - Depth Camera Cone Parking """ ######################################################################################## # Imports ######################################################################################## import sys import cv2 as cv import numpy as np sys.path.insert(0, "../../library") from racecar_core import rc import racecar_utils as rc_utils ######################################################################################## # Global variables ######################################################################################## rc = racecar_core.create_racecar() # Add any global variables here ######################################################################################## # Functions ######################################################################################## def start(): """ This function is run once every time the start button is pressed """ # Have the car begin at a stop rc.drive.stop() # Print start message print(">> Lab 3B - Depth Camera Cone Parking") def update(): """ After start() is run, this function is run every frame until the back button is pressed """ # TODO: Park the car 30 cm away from the closest orange cone. # Use both color and depth information to handle cones of multiple sizes. # You may wish to copy some of your code from lab2b.py pass ######################################################################################## # DO NOT MODIFY: Register start and update and begin execution ######################################################################################## if __name__ == "__main__": rc.set_start_update(start, update, None) rc.go()
nilq/baby-python
python
import pytest class RespIs: @staticmethod async def no_content(resp): assert resp.status == 204 @staticmethod async def bad_gateway(resp, message="Bad gateway"): """ Check whether a response object is a valid Virtool ``bad gateway``. """ assert resp.status == 502 assert await resp.json() == {"id": "bad_gateway", "message": message} @staticmethod async def bad_request(resp, message="Bad request"): """ Check whether a response object is a valid Virtool ``bad_request``. """ assert resp.status == 400 assert await resp.json() == {"id": "bad_request", "message": message} @staticmethod async def insufficient_rights(resp, message="Insufficient rights"): """ Check whether a response object is a valid Virtool ``insufficient_rights``. """ assert resp.status == 403 assert await resp.json() == {"id": "insufficient_rights", "message": message} @staticmethod async def not_permitted(resp, message="Not permitted"): return resp.status == 403 and await resp.json() == { "id": "not_permitted", "message": message, } @staticmethod async def not_found(resp, message="Not found"): """ Check whether a response object is a valid Virtool ``not_found``. """ assert resp.status == 404 assert await resp.json() == {"id": "not_found", "message": message} @staticmethod async def conflict(resp, message="Conflict"): """ Check whether a response object is a valid Virtool ``not_found``. """ assert resp.status == 409 assert await resp.json() == {"id": "conflict", "message": message} @staticmethod async def invalid_input(resp, errors): """ Check whether a response object is a valid Virtool ``invalid_input``. """ assert resp.status == 422 assert await resp.json() == { "id": "invalid_input", "message": "Invalid input", "errors": errors, } @staticmethod async def invalid_query(resp, errors): """ Check whether a response object is a valid Virtool ``invalid_query``. """ assert resp.status == 422 assert await resp.json() == { "id": "invalid_query", "message": "Invalid query", "errors": errors, } @pytest.fixture(scope="session") def resp_is(): return RespIs()
nilq/baby-python
python
""" Some examples playing around with yahoo finance data """ from datetime import datetime from pandas.compat import zip import matplotlib.finance as fin import numpy as np from pylab import show from pandas import Index, DataFrame from pandas.core.datetools import BMonthEnd from pandas import ols startDate = datetime(2008, 1, 1) endDate = datetime(2009, 9, 1) def getQuotes(symbol, start, end): quotes = fin.quotes_historical_yahoo(symbol, start, end) dates, open, close, high, low, volume = zip(*quotes) data = { 'open': open, 'close': close, 'high': high, 'low': low, 'volume': volume } dates = Index([datetime.fromordinal(int(d)) for d in dates]) return DataFrame(data, index=dates) msft = getQuotes('MSFT', startDate, endDate) aapl = getQuotes('AAPL', startDate, endDate) goog = getQuotes('GOOG', startDate, endDate) ibm = getQuotes('IBM', startDate, endDate) px = DataFrame({'MSFT': msft['close'], 'IBM': ibm['close'], 'GOOG': goog['close'], 'AAPL': aapl['close']}) returns = px / px.shift(1) - 1 # Select dates subIndex = ibm.index[(ibm['close'] > 95) & (ibm['close'] < 100)] msftOnSameDates = msft.reindex(subIndex) # Insert columns msft['hi-lo spread'] = msft['high'] - msft['low'] ibm['hi-lo spread'] = ibm['high'] - ibm['low'] # Aggregate monthly def toMonthly(frame, how): offset = BMonthEnd() return frame.groupby(offset.rollforward).aggregate(how) msftMonthly = toMonthly(msft, np.mean) ibmMonthly = toMonthly(ibm, np.mean) # Statistics stdev = DataFrame({ 'MSFT': msft.std(), 'IBM': ibm.std() }) # Arithmetic ratios = ibm / msft # Works with different indices ratio = ibm / ibmMonthly monthlyRatio = ratio.reindex(ibmMonthly.index) # Ratio relative to past month average filledRatio = ibm / ibmMonthly.reindex(ibm.index, method='pad')
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- import math import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo import torch.autograd as autograd from torch.autograd.variable import Variable from threading import Lock from torch.distributions import Categorical global_lock = Lock() model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out # ============================== # Original Model without Gating # ============================== class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x def resnet18(pretrained=False, **kwargs): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(pretrained=False, **kwargs): """Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(pretrained=False, **kwargs): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet74(pretrained=False, **kwargs): """ ResNet-74""" model = ResNet(Bottleneck, [3, 4, 14, 3], **kwargs) return model def resnet101(pretrained=False, **kwargs): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(pretrained=False, **kwargs): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model # ====================== # Recurrent Gate Design # ====================== def repackage_hidden(h): if type(h) == Variable: return Variable(h.data) else: return tuple(repackage_hidden(v) for v in h) class RNNGate(nn.Module): """given the fixed input size, return a single layer lstm """ def __init__(self, input_dim, hidden_dim, rnn_type='lstm'): super(RNNGate, self).__init__() self.rnn_type = rnn_type self.input_dim = input_dim self.hidden_dim = hidden_dim if self.rnn_type == 'lstm': self.rnn = nn.LSTM(input_dim, hidden_dim) else: self.rnn = None self.hidden = None # reduce dim self.proj = nn.Conv2d(in_channels=hidden_dim, out_channels=1, kernel_size=1, stride=1) self.prob = nn.Sigmoid() def init_hidden(self, batch_size): # Before we've done anything, we dont have any hidden state. # Refer to the Pytorch documentation to see exactly # why they have this dimensionality. # The axes semantics are (num_layers, minibatch_size, hidden_dim) return (autograd.Variable(torch.zeros(1, batch_size, self.hidden_dim).cuda()), autograd.Variable(torch.zeros(1, batch_size, self.hidden_dim).cuda())) def repackage_hidden(self): self.hidden = repackage_hidden(self.hidden) def forward(self, x): batch_size = x.size(0) self.rnn.flatten_parameters() out, self.hidden = self.rnn(x.view(1, batch_size, -1), self.hidden) out = out.squeeze() proj = self.proj(out.view(out.size(0), out.size(1), 1, 1,)).squeeze() prob = self.prob(proj) disc_prob = (prob > 0.5).float().detach() - prob.detach() + prob disc_prob = disc_prob.view(batch_size, 1, 1, 1) return disc_prob, prob # ======================= # Recurrent Gate Model # ======================= class RecurrentGatedResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, embed_dim=10, hidden_dim=10, gate_type='rnn', **kwargs): self.inplanes = 64 super(RecurrentGatedResNet, self).__init__() self.num_layers = layers self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.embed_dim = embed_dim self.hidden_dim = hidden_dim # going to have 4 groups of layers. For the easiness of skipping, # We are going to break the sequential of layers into a list of layers. self._make_group(block, 64, layers[0], group_id=1, pool_size=56) self._make_group(block, 128, layers[1], group_id=2, pool_size=28) self._make_group(block, 256, layers[2], group_id=3, pool_size=14) self._make_group(block, 512, layers[3], group_id=4, pool_size=7) if gate_type == 'rnn': self.control = RNNGate(embed_dim, hidden_dim, rnn_type='lstm') else: print('gate type {} not implemented'.format(gate_type)) self.control = None self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): n = m.weight.size(0) * m.weight.size(1) m.weight.data.normal_(0, math.sqrt(2. / n)) def _make_group(self, block, planes, layers, group_id=1, pool_size=56): """ Create the whole group """ for i in range(layers): if group_id > 1 and i == 0: stride = 2 else: stride = 1 meta = self._make_layer_v2(block, planes, stride=stride, pool_size=pool_size) setattr(self, 'group{}_ds{}'.format(group_id, i), meta[0]) setattr(self, 'group{}_layer{}'.format(group_id, i), meta[1]) setattr(self, 'group{}_gate{}'.format(group_id, i), meta[2]) def _make_layer_v2(self, block, planes, stride=1, pool_size=56): """ create one block and optional a gate module """ downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layer = block(self.inplanes, planes, stride, downsample) self.inplanes = planes * block.expansion # this is for having the same input dimension to rnn gate. gate_layer = nn.Sequential( nn.AvgPool2d(pool_size), nn.Conv2d(in_channels=planes * block.expansion, out_channels=self.embed_dim, kernel_size=1, stride=1)) if downsample: return downsample, layer, gate_layer else: return None, layer, gate_layer def repackage_hidden(self): self.control.hidden = repackage_hidden(self.control.hidden) def forward(self, x): """mask_values is for the test random gates""" # pdb.set_trace() batch_size = x.size(0) x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) # reinitialize hidden units self.control.hidden = self.control.init_hidden(batch_size) masks = [] gprobs = [] # must pass through the first layer in first group x = getattr(self, 'group1_layer0')(x) # gate takes the output of the current layer gate_feature = getattr(self, 'group1_gate0')(x) mask, gprob = self.control(gate_feature) gprobs.append(gprob) masks.append(mask.squeeze()) prev = x # input of next layer for g in range(4): for i in range(0 + int(g == 0), self.num_layers[g]): if getattr(self, 'group{}_ds{}'.format(g+1, i)) is not None: prev = getattr(self, 'group{}_ds{}'.format(g+1, i))(prev) x = getattr(self, 'group{}_layer{}'.format(g+1, i))(x) prev = x = mask.expand_as(x)*x + (1-mask).expand_as(prev)*prev gate_feature = getattr(self, 'group{}_gate{}'.format(g+1, i))(x) mask, gprob = self.control(gate_feature) if not (g == 3 and i == (self.num_layers[3]-1)): # not add the last mask to masks gprobs.append(gprob) masks.append(mask.squeeze()) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x, masks, gprobs, self.control.hidden def imagenet_rnn_gate_18(pretrained=False, **kwargs): """ Construct SkipNet-18 + SP """ model = RecurrentGatedResNet(BasicBlock, [2, 2, 2, 2], embed_dim=10, hidden_dim=10, gate_type='rnn') return model def imagenet_rnn_gate_34(pretrained=False, **kwargs): """ Construct SkipNet-34 + SP """ model = RecurrentGatedResNet(BasicBlock, [3, 4, 6, 3], embed_dim=10, hidden_dim=10, gate_type='rnn') return model def imagenet_rnn_gate_50(pretrained=False, **kwargs): """ Construct SkipNet-50 + SP """ model = RecurrentGatedResNet(Bottleneck, [3, 4, 6, 3], embed_dim=10, hidden_dim=10, gate_type='rnn') return model def imagenet_rnn_gate_101(pretrained=False, **kwargs): """ Constructs SkipNet-101 + SP """ model = RecurrentGatedResNet(Bottleneck, [3, 4, 23, 3], embed_dim=10, hidden_dim=10, gate_type='rnn') return model def imagenet_rnn_gate_152(pretrained=False, **kwargs): """Constructs SkipNet-152 + SP """ model = RecurrentGatedResNet(Bottleneck, [3, 8, 36, 3], embed_dim=10, hidden_dim=10, gate_type='rnn') return model # ============================= # Recurrent Gate Model with RL # ============================= class RNNGatePolicy(nn.Module): def __init__(self, input_dim, hidden_dim, rnn_type='lstm'): super(RNNGatePolicy, self).__init__() self.rnn_type = rnn_type self.input_dim = input_dim self.hidden_dim = hidden_dim if self.rnn_type == 'lstm': self.rnn = nn.LSTM(input_dim, hidden_dim) else: self.rnn = None self.hidden = None self.proj = nn.Conv2d(in_channels=hidden_dim, out_channels=1, kernel_size=1, stride=1) self.prob = nn.Sigmoid() def hotter(self, t): self.proj.weight.data /= t self.proj.bias.data /= t def init_hidden(self, batch_size): # Before we've done anything, we dont have any hidden state. # Refer to the Pytorch documentation to see exactly # why they have this dimensionality. # The axes semantics are (num_layers, minibatch_size, hidden_dim) return (autograd.Variable(torch.zeros(1, batch_size, self.hidden_dim).cuda()), autograd.Variable(torch.zeros(1, batch_size, self.hidden_dim).cuda())) def repackage_hidden(self): self.hidden = repackage_hidden(self.hidden) def forward(self, x): batch_size = x.size(0) self.rnn.flatten_parameters() out, self.hidden = self.rnn(x.view(1, batch_size, -1), self.hidden) out = out.squeeze() out = out.view(out.size(0), out.size(1), 1, 1) proj = self.proj(out).squeeze() prob = self.prob(proj) bi_prob = torch.stack([1-prob, prob]).t() # do action selection in the forward pass if self.training: # action = bi_prob.multinomial() dist = Categorical(bi_prob) action = dist.sample() else: dist = None action = (prob > 0.5).float() action_reshape = action.view(action.size(0), 1, 1, 1).float() return action_reshape, prob, action, dist # ================================ # Recurrent Gate Model with RL # ================================ class RecurrentGatedRLResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, embed_dim=10, hidden_dim=10, **kwargs): self.inplanes = 64 super(RecurrentGatedRLResNet, self).__init__() self.num_layers = layers self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.embed_dim = embed_dim self.hidden_dim = hidden_dim # going to have 4 groups of layers. For the easiness of skipping, # We are going to break the sequential of layers into a list of layers. self._make_group(block, 64, layers[0], group_id=1, pool_size=56) self._make_group(block, 128, layers[1], group_id=2, pool_size=28) self._make_group(block, 256, layers[2], group_id=3, pool_size=14) self._make_group(block, 512, layers[3], group_id=4, pool_size=7) self.control = RNNGatePolicy(embed_dim, hidden_dim, rnn_type='lstm') self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) self.softmax = nn.Softmax() # save everything self.saved_actions = {} self.saved_dists = {} self.saved_outputs = {} self.saved_targets = {} for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): n = m.weight.size(0) * m.weight.size(1) m.weight.data.normal_(0, math.sqrt(2. / n)) m.bias.data.zero_() def _make_group(self, block, planes, layers, group_id=1, pool_size=56): """ Create the whole group""" for i in range(layers): if group_id > 1 and i == 0: stride = 2 else: stride = 1 meta = self._make_layer_v2(block, planes, stride=stride, pool_size=pool_size) setattr(self, 'group{}_ds{}'.format(group_id, i), meta[0]) setattr(self, 'group{}_layer{}'.format(group_id, i), meta[1]) setattr(self, 'group{}_gate{}'.format(group_id, i), meta[2]) def _make_layer_v2(self, block, planes, stride=1, pool_size=56): """ create one block and optional a gate module """ downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layer = block(self.inplanes, planes, stride, downsample) self.inplanes = planes * block.expansion gate_layer = nn.Sequential( nn.AvgPool2d(pool_size), nn.Conv2d(in_channels=planes * block.expansion, out_channels=self.embed_dim, kernel_size=1, stride=1)) return downsample, layer, gate_layer def forward(self, x, target_var, reinforce=False): batch_size = x.size(0) x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) # reinitialize hidden units self.control.hidden = self.control.init_hidden(batch_size) masks = [] gprobs = [] actions = [] dists = [] # must pass through the first layer in first group x = getattr(self, 'group1_layer0')(x) # gate takes the output of the current layer gate_feature = getattr(self, 'group1_gate0')(x) mask, gprob, action, dist = self.control(gate_feature) gprobs.append(gprob) masks.append(mask.squeeze()) prev = x # input of next layer current_device = torch.cuda.current_device() actions.append(action) dists.append(dist) for g in range(4): for i in range(0 + int(g == 0), self.num_layers[g]): if getattr(self, 'group{}_ds{}'.format(g+1, i)) is not None: prev = getattr(self, 'group{}_ds{}'.format(g+1, i))(prev) x = getattr(self, 'group{}_layer{}'.format(g+1, i))(x) prev = x = mask.expand_as(x)*x + (1-mask).expand_as(prev)*prev if not (g == 3 and (i == self.num_layers[g] - 1)): gate_feature = getattr(self, 'group{}_gate{}'.format(g+1, i))(x) mask, gprob, action, dist = self.control(gate_feature) gprobs.append(gprob) masks.append(mask.squeeze()) actions.append(action) dists.append(dist) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) if reinforce: softmax = self.softmax(x) # action = softmax.multinomial() dist = Categorical(softmax) action = dist.sample() actions.append(action) dists.append(dist) with global_lock: self.saved_actions[current_device] = actions self.saved_outputs[current_device] = x self.saved_targets[current_device] = target_var self.saved_dists[current_device] = dists return x, masks, gprobs, self.control.hidden def imagenet_rnn_gate_rl_18(pretrained=False, **kwargs): """ Construct SkipNet-18 + HRL. has the same architecture as SkipNet-18+SP """ model = RecurrentGatedRLResNet(BasicBlock, [2, 2, 2, 2], embed_dim=10, hidden_dim=10, gate_type='rnn') return model def imagenet_rnn_gate_rl_34(pretrained=False, **kwargs): """ Construct SkipNet-34 + HRL. has the same architecture as SkipNet-34+SP """ model = RecurrentGatedRLResNet(BasicBlock, [3, 4, 6, 3], embed_dim=10, hidden_dim=10, gate_type='rnn') return model def imagenet_rnn_gate_rl_50(pretrained=False, **kwargs): """ Construct SkipNet-50 + HRL. has the same architecture as SkipNet-50+SP """ model = RecurrentGatedRLResNet(Bottleneck, [3, 4, 6, 3], embed_dim=10, hidden_dim=10, gate_type='rnn') return model def imagenet_rnn_gate_rl_101(pretrained=False, **kwargs): """ Construct SkipNet-101 + HRL. has the same architecture as SkipNet-101+SP """ model = RecurrentGatedRLResNet(Bottleneck, [3, 4, 23, 3], embed_dim=10, hidden_dim=10, gate_type='rnn') return model def imagenet_rnn_gate_rl_152(pretrained=False, **kwargs): """ Construct SkipNet-152 + HRL. has the same architecture as SkipNet-152+SP """ model = RecurrentGatedRLResNet(Bottleneck, [3, 8, 36, 3], embed_dim=10, hidden_dim=10, gate_type='rnn') return model
nilq/baby-python
python
from sqlalchemy import Table, Column, Integer, String, ForeignKey from utils import metadata category = Table( "category", metadata, Column("id", Integer, primary_key=True), Column("parent_fk", Integer, ForeignKey("category.id"), nullable=True), Column("label", String(length=60), unique=True, nullable=False), Column("one_liner", String(length=160), nullable=True), Column("description", String(length=500), nullable=True), Column("url", String(100), nullable=True) )
nilq/baby-python
python
import lark from foyer.exceptions import FoyerError GRAMMAR = r""" start: _string // Rules _string: _chain _nonlastbranch* _lastbranch? _chain: atom _chain | atom _nonlastbranch: "(" branch ")" _lastbranch: branch branch: _string atom: ("[" weak_and_expression "]" | atom_symbol) atom_label? atom_label: NUM ?weak_and_expression: (weak_and_expression ";")? or_expression ?or_expression: (or_expression ",")? and_expression ?and_expression: (and_expression "&")? (atom_id | not_expression) not_expression: "!" atom_id atom_id: atom_symbol | "#" atomic_num | "$(" matches_string ")" | has_label | "X" neighbor_count | "r" ring_size | "R" ring_count atom_symbol: SYMBOL | STAR atomic_num: NUM matches_string: _string has_label: LABEL neighbor_count: NUM ring_size: NUM ring_count: NUM // Terminals STAR: "*" NUM: /[\d]+/ LABEL: /\%[A-Za-z_0-9]+/ // Tokens for chemical elements // Optional, custom, non-element underscore-prefixed symbols are pre-pended SYMBOL: /{optional}C[laroudsemf]?|Os?|N[eaibdpos]?|S[icernbmg]?|P[drmtboau]?|H[eofgas]?|A[lrsgutcm]|B[eraik]?|Dy|E[urs]|F[erm]?|G[aed]|I[nr]?|Kr?|L[iaur]|M[gnodt]|R[buhenaf]|T[icebmalh]|U|V|W|Xe|Yb?|Z[nr]/ """ class SMARTS(object): """A wrapper class for parsing SMARTS grammar using lark. Provides functionality for injecting optional, custom, non-element symbols denoted by an underscore-prefix as additional tokens that the parser can recognize. Parameters ---------- optional_names: iterable, optional, default '' A list of optional names that expand the grammar's symbols beyond the canonical periodic table elements (the non-element types). The optional_names are relevant for creating grammar that includes custom elements that will belong in SMARTS definitions """ def __init__(self, optional_names=''): if optional_names: for n in optional_names: if not n.startswith('_'): raise FoyerError('Non-element types must start with an underscore, you passed {}'.format(', '.join(optional_names))) optional_names = sorted(optional_names, reverse=True) self.grammar = GRAMMAR.format(optional='{}|'.format( '|'.join(optional_names))) else: self.grammar = GRAMMAR.format(optional='') self.PARSER = lark.Lark(self.grammar, parser="lalr") def parse(self, smarts_string): return self.PARSER.parse(smarts_string)
nilq/baby-python
python
""" Created on 13-Apr-2018 @author: jdrumgoole """ import unittest import pymongo from dateutil.parser import parse from pymongoimport.audit import Audit class Test_Audit(unittest.TestCase): def setUp(self): self._client = pymongo.MongoClient(host="mongodb://localhost/TEST_AUDIT") self._database = self._client["TEST_AUDIT"] self._audit = Audit(self._database) def tearDown(self): self._client.drop_database("TEST_AUDIT") # @unittest.skip def test_get_current_batch_id(self): self.assertFalse(self._audit.in_batch()) batch_id = self._audit.start_batch(doc={"test": "doc"}) self.assertTrue(self._audit.in_batch()) self._audit.end_batch(batch_id) self.assertTrue(self._audit.get_batch(batch_id)) self.assertFalse(self._audit.in_batch()) self.assertEqual(batch_id, self._audit.get_last_valid_batch_id()) def test_get_valid_batches(self): id1 = self._audit.start_batch(doc={"test": "doc"}) id2 = self._audit.start_batch(doc={"test": "doc"}) self.assertTrue(self._audit.in_batch()) self._audit.end_batch(id2) self.assertTrue(self._audit.in_batch()) self._audit.end_batch(id1) batch = self._audit.get_batch_end(id1) self.assertGreaterEqual(batch['end'], parse("1-Jun-2017", )) self.assertFalse(self._audit.in_batch()) idlist = list(self._audit.get_valid_batch_ids()) self.assertTrue(id1 in idlist) self.assertTrue(id2 in idlist) def test_get_last_batch_id(self): id1 = self._audit.start_batch(doc={"test": "doc"}) id2 = self._audit.start_batch(doc={"test": "doc"}) self.assertEqual(2, self._audit.get_last_batch_id()) self._audit.end_batch(id2) self.assertEqual(2, self._audit.get_last_batch_id()) self._audit.end_batch(id1) id1 = self._audit.start_batch(doc={"test": "doc"}) self.assertEqual(3, self._audit.get_last_batch_id()) self._audit.end_batch(id1) if __name__ == "__main__": # import sys;sys.argv = ['', 'Test.testName'] unittest.main()
nilq/baby-python
python
# search target number in the list. class LinearSearch : def __init__(self,target, data): self.data = data self.target = target print(self.doSearch()) def doSearch(self): for current in self.data : if current == self.target : return "Target Number %s is in the list" % str(self.target) return "Target Number %s is not in the list" % str(self.target) LinearSearch(7,[1,2,3,4,5]) # How many comparison occurs here in works case? # T(n) = n
nilq/baby-python
python
import numpy as np from flask import Flask, render_template, request import jinja2 from MBScalc import * #Init Flask App app = Flask(__name__) @app.route('/', methods = ['GET','POST']) def main(): number = int(request.form['number']) if request.method == 'POST': result = number else: result = 0 # mass = float(request.form['mass'])*9.11*10**-31 # energy = float(request.form['energy'])*1.602*10**-19 # # H = request.form.getlist('height') # D = request.form.getlist('thickness') # # for i in range(len(H)): # H[i] = float(H[i]) # D[i] = float(D[i]) # i = i +1 # # N = int(len(H)) # S = 2*(N+1) # # V = np.zeros(N+1) # X = np.zeros(N) # for i in range(N): # V[i+1] = H[i]*1.602*10**-19 # X[i] = D[i]*10**-10 # i = i + 1 # DX = LenBar(X) # # matrix = Calculateoeff(mass,energy,N,S,V,X) # T = matrix[0] # R = matrix[1] # # plot = plots(T,R,K(mass,energy,V[0]),DX) # return render_template("view.html",num = number)#, mass = mass, energy = energy, number = number, out1 = T, out2 = R, image_data = plot) if __name__=="__main__": app.run()
nilq/baby-python
python
# -*- coding: utf-8 -*- import logging from random import randint import re import six import os from datetime import datetime __author__ = "Arun KR (kra3) <the1.arun@gmail.com>" __license__ = "Simplified BSD" RE_IP = re.compile(r'^[\d+]{1,3}\.[\d+]{1,3}\.[\d+]{1,3}\.[\d+]{1,3}$', re.I) RE_PRIV_IP = re.compile(r'^(?:127\.0\.0\.1|10\.|192\.168\.|172\.(?:1[6-9]|2[0-9]|3[0-1])\.)') RE_LOCALE = re.compile(r'(^|\s*,\s*)([a-zA-Z]{1,8}(-[a-zA-Z]{1,8})*)\s*(;\s*q\s*=\s*(1(\.0{0,3})?|0(\.[0-9]{0,3})))?', re.I) RE_GA_ACCOUNT_ID = re.compile(r'^(UA|MO)-[0-9]*-[0-9]*$') RE_FIRST_THREE_OCTETS_OF_IP = re.compile(r'^((\d{1,3}\.){3})\d{1,3}$') def convert_ga_timestamp(timestamp_string): timestamp = float(timestamp_string) if timestamp > ((2 ** 31) - 1): timestamp /= 1000 return datetime.utcfromtimestamp(timestamp) def get_32bit_random_num(): return randint(0, 0x7fffffff) def is_valid_ip(ip): return True if RE_IP.match(str(ip)) else False def is_private_ip(ip): return True if RE_PRIV_IP.match(str(ip)) else False def validate_locale(locale): return RE_LOCALE.findall(str(locale)) def is_valid_google_account(account): return True if RE_GA_ACCOUNT_ID.match(str(account)) else False def generate_hash(tmpstr): hash_val = 1 if tmpstr: hash_val = 0 for ordinal in map(ord, tmpstr[::-1]): hash_val = ((hash_val << 6) & 0xfffffff) + ordinal + (ordinal << 14) left_most_7 = hash_val & 0xfe00000 if left_most_7 != 0: hash_val ^= left_most_7 >> 21 return hash_val def anonymize_ip(ip): if ip: match = RE_FIRST_THREE_OCTETS_OF_IP.findall(str(ip)) if match: return '%s%s' % (match[0][0], '0') return '' def encode_uri_components(value): '''Mimics Javascript's encodeURIComponent() function for consistency with the GA Javascript client.''' return convert_to_uri_component_encoding(six.moves.urllib.parse.quote(value)) def convert_to_uri_component_encoding(value): return value.replace('%21', '!').replace('%2A', '*').replace('%27', "'").replace('%28', '(').replace('%29', ')') # Taken from expicient.com BJs repo. def stringify(s, stype=None, fn=None): ''' Converts elements of a complex data structure to strings The data structure can be a multi-tiered one - with tuples and lists etc This method will loop through each and convert everything to string. For example - it can be - [[{'a1': {'a2': {'a3': ('a4', timedelta(0, 563)), 'a5': {'a6': datetime()}}}}]] which will be converted to - [[{'a1': {'a2': {'a3': ('a4', '0:09:23'), 'a5': {'a6': '2009-05-27 16:19:52.401500' }}}}]] @param stype: If only one type of data element needs to be converted to string without affecting others, stype can be used. In the earlier example, if it is called with stringify(s, stype=datetime.timedelta) the result would be [[{'a1': {'a2': {'a3': ('a4', '0:09:23'), 'a5': {'a6': datetime() }}}}]] Also, even though the name is stringify, any function can be run on it, based on parameter fn. If fn is None, it will be stringified. ''' if type(s) in [list, set, dict, tuple]: if isinstance(s, dict): for k in s: s[k] = stringify(s[k], stype, fn) elif type(s) in [list, set]: for i, k in enumerate(s): s[i] = stringify(k, stype, fn) else: #tuple tmp = [] for k in s: tmp.append(stringify(k, stype, fn)) s = tuple(tmp) else: if fn: if not stype or (stype == type(s)): return fn(s) else: # To do str(s). But, str() can fail on unicode. So, use .encode instead if not stype or (stype == type(s)): try: return six.text_type(s) #return s.encode('ascii', 'replace') except AttributeError: return str(s) except UnicodeDecodeError: return s.decode('ascii', 'replace') return s
nilq/baby-python
python
from Cimpl import load_image, create_color, set_color, show, Image, save_as, copy from typing import NewType image = load_image('p2-original.jpg') # loads the original colourless picture def createBlue( image ): """ the function createBlue displays the original image, once closed it displays the image with a blue filter -Emilio Lindia """ image = copy(image) show(image) # shows original image new_image = image for x, y, (r, g, b) in image: # examines all pixels blue = create_color(0, 0, b) # creates a 100% blue filter set_color(new_image, x, y, blue) save_as(new_image, 'blue_channel.jpg') # saves the blue filter as a new image show(load_image('blue_channel.jpg')) # shows image print('blue_channel saved as new_image') return new_image def test_blue() -> None: '''This is the test function for the blue filter. it tests if all pixels are blue or if they contain any traces of green or red. -Emilio Lindia ''' image1 = createBlue(image) for x, y, (r, g, b) in image1: if r == 0 and g == 0: # if there is no trace of red or green print("PASS") # passed the test return else: print('FAILS') return
nilq/baby-python
python
import os import sys from random import Random import numpy as np from os.path import join import re from gpsr_command_understanding.generator.grammar import tree_printer from gpsr_command_understanding.generator.loading_helpers import load, GRAMMAR_YEAR_TO_MODULE, load_paired from gpsr_command_understanding.generator.tokens import ROOT_SYMBOL from gpsr_command_understanding.generator.paired_generator import pairs_without_placeholders, PairedGenerator def get_annotated_sentences(sentences_and_pairs): sentences, pairs = sentences_and_pairs expanded_pairs = {tree_printer(key): tree_printer(value) for key, value in pairs.items()} # These came straight from the grammar grammar_sentences = set([tree_printer(x) for x in sentences]) # These came from expanding the semantics, so they may not be in the grammar annotated_sentences = set(expanded_pairs.keys()) # Only keep annotations that cover sentences actually in the grammar out_of_grammar = annotated_sentences.difference(grammar_sentences) annotated_sentences.intersection_update(grammar_sentences) unannotated_sentences = grammar_sentences.difference(annotated_sentences) return annotated_sentences, unannotated_sentences, out_of_grammar def main(): year = int(sys.argv[1]) task = sys.argv[2] out_root = os.path.abspath(os.path.dirname(__file__) + "/../../data/") generator = load_paired(task, GRAMMAR_YEAR_TO_MODULE[year]) sentences = [pair[0] for pair in generator.generate(ROOT_SYMBOL, yield_requires_semantics=False)] [generator.extract_metadata(sentence) for sentence in sentences] sentences = set(sentences) out_path = join(out_root, "{}_{}_sentences.txt".format(year, task)) with open(out_path, "w") as f: for sentence in sentences: f.write(tree_printer(sentence) + '\n') baked_sentences = [tree_printer(x) for x in sentences] all_pairs = pairs_without_placeholders(generator) baked_pairs = {tree_printer(key): tree_printer(value) for key, value in all_pairs.items()} annotated, unannotated, out_of_grammar = get_annotated_sentences((sentences, all_pairs)) unique_sentence_parses = [baked_pairs[ann_sen] for ann_sen in annotated] unique_sentence_parses = set(unique_sentence_parses) out_path = join(out_root, "{}_{}_pairs.txt".format(year, task)) with open(out_path, "w") as f: for sentence, parse in baked_pairs.items(): f.write(sentence + '\n' + parse + '\n') meta_out_path = join(out_root, "{}_{}_annotations_meta.txt".format(year, task)) with open(meta_out_path, "w") as f: f.write("Coverage:\n") f.write("{0}/{1} {2:.1f}%\n".format(len(annotated), len(baked_sentences), 100.0 * len(annotated) / len(baked_sentences))) f.write("\t unique parses: {}\n".format(len(unique_sentence_parses))) sen_lengths = [len(sentence.split()) for sentence in baked_pairs.keys()] avg_sentence_length = np.mean(sen_lengths) parse_lengths = [] filtered_parse_lengths = [] for parse in unique_sentence_parses: parse_lengths.append(len(parse.split())) stop_tokens_removed = re.sub(r"(\ e\ |\"|\)|\()", "", parse) filtered_parse_lengths.append(len(stop_tokens_removed.split())) avg_parse_length = np.mean(parse_lengths) avg_filtered_parse_length = np.mean(filtered_parse_lengths) f.write( "\t avg sentence length (tokens): {:.1f} avg parse length (tokens): {:.1f} avg filtered parse length (tokens): {:.1f}\n".format( avg_sentence_length, avg_parse_length, avg_filtered_parse_length)) """print("No parses for:") for sentence in sorted(unannotated): print(sentence) print("-----------------")""" if __name__ == "__main__": main()
nilq/baby-python
python
#!/usr/bin/env python3 from sys import argv,stderr,exit import json, os, yaml, pynetbox, re, ipaddress from collections import defaultdict from pprint import pprint doc = """ Get config context from netbox for specified device. ## Usage %s "FQDN" """ % (argv[0]) def assume_ip_gateway(network): return str(ipaddress.ip_network(network,False)[1]).split('/')[0] def warn(*msg): print(*msg, file=stderr) def fail(*msg): print(*msg, file=stderr) exit(1) if len(argv) != 2: fail("error, invalid number of args!", doc) FQDN = argv[1] nb = pynetbox.api(os.getenv('NETBOX_API_URL'), token=os.getenv('NETBOX_TOKEN')) dev = None vm = None # find vm or device object vm = nb.virtualization.virtual_machines.get(name=FQDN) dev = nb.dcim.devices.get(name=FQDN) if vm is None and dev is None: fail("no such device or vm") if vm and dev: fail("make up your mind. conflicting naming detected!") obj = vm if vm else dev print("# generated from netbox. do not change manually") print(yaml.dump(obj['config_context']))
nilq/baby-python
python
from scipy.optimize import shgo import numpy as np from numpy.linalg import norm class VectorCubicSpline: """ a0, a1, a2, a3 are numpy vectors, they form the spline a0 + a1*s + a2*s^2 + a3*s^3 """ def __init__(self, a0, a1, a2, a3): self.a0 = np.array(a0) self.a1 = np.array(a1) self.a2 = np.array(a2) self.a3 = np.array(a3) """ Get point given parameter s. """ def get_point(self, s): return self.a0 + self.a1 * s + self.a2 * s**2 + self.a3 * s**3 """ Get closest distance and parameter s to a point. return (s, distance, point)""" def get_s_distance(self, point): def objective(s): point_on_spline = self.get_point(s) return norm(point_on_spline - point) bound = [(0, 1.0)] res = shgo(objective, bound) return res.x, objective(res.x) """ Get velocity on spline. Derivative of spline with respect to s. """ def get_velocity(self, s): return self.a1 + 2 * self.a2 * s + 3 * self.a3 * s**2 """ Construct a spline by specifying start point, start point velocity, end point, endpoint velocity. """ def create_spline_start_end_point_velocity(start, start_vel, end, end_vel): start = np.array(start) start_vel = np.array(start_vel) end = np.array(end) end_vel = np.array(end_vel) a0 = start a1 = start_vel a2 = -3 * start + 3 * end - 2 * start_vel - end_vel a3 = 2 * start - 2 * end + start_vel + end_vel return VectorCubicSpline(a0, a1, a2, a3)
nilq/baby-python
python
class Fonction: def calcul(self, x): pass class Carre(Fonction): def calcul(self, x): return x*x class Cube(Fonction): def calcul(self, x): return x*x*x def calcul_n_valeur (l,f): res = [ f(i) for i in l ] return res l = [0,1,2,3] l1 = calcul_n_valeur(l, Carre().calcul) # l1 vaut [0, 1, 4, 9] l2 = calcul_n_valeur(l, Cube().calcul) # l2 vaut [0, 1, 8, 27] print(l1) print(l2)
nilq/baby-python
python
import findspark findspark.init() from pyspark import SparkConf,SparkContext from pyspark.streaming import StreamingContext from pyspark.sql import Row,SQLContext import sys import requests def aggregate_tags_count(new_values, total_sum): return sum(new_values) + (total_sum or 0) def get_sql_context_instance(spark_context): if ('sqlContextSingletonInstance' not in globals()): globals()['sqlContextSingletonInstance'] = SQLContext(spark_context) return globals()['sqlContextSingletonInstance'] def printdata(time,rdd): try: sql_context=get_sql_context_instance(rdd.context) row_rdd=rdd.map(lambda w: Row(hashtag=w[0], hashtag_count=w[1])) hashtags_df = sql_context.createDataFrame(row_rdd) hashtags_df.registerTempTable("hashtags") hashtag_counts_df = sql_context.sql("select hashtag, hashtag_count from hashtags order by hashtag_count desc,hashtag limit 5") # hashtag_counts_df.show() temp=hashtag_counts_df.collect() windowoutput='' for i in temp: if(windowoutput!=''): windowoutput=windowoutput+','+i[0] else: windowoutput=windowoutput+i[0] windowoutput=windowoutput print(windowoutput) except Exception as e: pass conf=SparkConf() conf.setAppName("TestFakeData") sc=SparkContext(conf=conf) ssc=StreamingContext(sc,int(sys.argv[2])) ssc.checkpoint("/checkpoint_FAKEDATA") dataStream = ssc.socketTextStream("localhost",9009) hashtags = dataStream.map(lambda w:w.split(';')[7]) hashtag = hashtags.flatMap(lambda w:w.split(',')) hashtag = hashtag.filter(lambda x:len(x)!=0) countoftags = hashtag.map(lambda x: (x, 1)) countoftags = countoftags.reduceByKey(lambda x,y:x+y) window = countoftags.reduceByKeyAndWindow(lambda x,y:x+y,None,int(sys.argv[1]),1) window.foreachRDD(printdata) ssc.start() ssc.awaitTermination(25) ssc.stop()
nilq/baby-python
python
""" API serializers """ from rest_framework import serializers from groups.models import CustomUser, Group, Link class CustomUserBaseSerializer(serializers.ModelSerializer): """ CustomUser base serializer """ class Meta: model = CustomUser fields = ('id', 'username', 'email', 'date_joined') class CustomUserAdminSerializer(CustomUserBaseSerializer): """ CustomUser serializer for admin """ class Meta(CustomUserBaseSerializer.Meta): fields = '__all__' class GroupBaseSerializer(serializers.ModelSerializer): """ Group base serializer """ owner = CustomUserBaseSerializer(read_only=True) linksLength = serializers.IntegerField(source='links.count') class Meta: model = Group fields = ('id', 'name', 'description', 'created', 'owner', 'linksLength') class LinkBaseSerializer(serializers.ModelSerializer): """ Link base serializer (for nesting in Group) """ isDone = serializers.BooleanField(source='is_done') class Meta: model = Link fields = ('id', 'url', 'description', 'isDone', 'added', 'group') class GroupWithNestedSerializer(GroupBaseSerializer): """ Group serializer with nested links """ links = LinkBaseSerializer(many=True, read_only=True) class Meta(GroupBaseSerializer.Meta): fields = GroupBaseSerializer.Meta.fields + ('links', )
nilq/baby-python
python
import argparse import json from pathlib import Path from typing import Iterable, Set import pandas as pd from hyperstyle.src.python.review.inspectors.inspector_type import InspectorType from hyperstyle.src.python.review.inspectors.issue import BaseIssue, IssueType from hyperstyle.src.python.review.reviewers.utils.print_review import convert_issue_to_json from analysis.src.python.evaluation.common.pandas_util import ( drop_duplicates, filter_df_by_iterable_value, get_solutions_df_by_file_path, write_df_to_file, ) from analysis.src.python.evaluation.common.args_util import EvaluationRunToolArgument, parse_set_arg from analysis.src.python.evaluation.common.csv_util import ColumnName from analysis.src.python.evaluation.common.file_util import AnalysisExtension, get_parent_folder from analysis.src.python.evaluation.qodana.util.issue_types import QODANA_CLASS_NAME_TO_ISSUE_TYPE from analysis.src.python.evaluation.qodana.util.models import QodanaColumnName, QodanaIssue def configure_arguments(parser: argparse.ArgumentParser) -> None: parser.add_argument(f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.long_name}_hyperstyle', type=lambda value: Path(value).absolute(), help=f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.description}' f'\nAll code fragments from this file must be graded by hyperstyle tool' f'(file contains traceback column)') parser.add_argument(f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.long_name}_qodana', type=lambda value: Path(value).absolute(), help=f'{EvaluationRunToolArgument.SOLUTIONS_FILE_PATH.value.description}' f'\nAll code fragments from this file must be graded by qodana' f'(file contains inspections column)') parser.add_argument('-i', '--issues-to-keep', help='Set of issues to keep', default='') # Drop duplicates in the CODE column and delete rows that have ids from value_to_filter # The new dataframe will be sorted by the ID column def __preprocess_df(df: pd.DataFrame, ids_to_filter: Iterable) -> pd.DataFrame: df = drop_duplicates(df) df = filter_df_by_iterable_value(df, ColumnName.ID.value, ids_to_filter) return df.sort_values(ColumnName.ID.value).set_index(ColumnName.ID.value, drop=False) # Check if all code fragments with the same ids are equal def __check_code_by_ids(qodana_df: pd.DataFrame, hyperstyle_df: pd.DataFrame) -> None: assert qodana_df.shape[0] == hyperstyle_df.shape[0], ( f'rows count {qodana_df.shape[0]} in the qodana df does not equal rows ' f'count {hyperstyle_df.shape[0]} in the hyperstyle df' ) for i in range(0, qodana_df.shape[0]): if qodana_df.iloc[i][ColumnName.CODE.value] != hyperstyle_df.iloc[i][ColumnName.CODE.value]: raise ValueError(f'Code fragments in the {i}th row do not equal!') # Convert qodana inspections output to hyperstyle output # Note: keep only <issues> json field in the result def __qodana_to_hyperstyle_output(qodana_output: str, issues_to_keep: Set[str]) -> str: qodana_issues = QodanaIssue.parse_list_issues_from_json(qodana_output) filtered_issues = filter(lambda issue: issue.problem_id in issues_to_keep, qodana_issues) hyperstyle_issues = map(lambda issue: BaseIssue(origin_class=issue.problem_id, type=QODANA_CLASS_NAME_TO_ISSUE_TYPE.get(issue.problem_id, IssueType.INFO), description=issue.description, file_path=Path(), line_no=issue.line, column_no=issue.offset, inspector_type=InspectorType.QODANA), filtered_issues) hyperstyle_json = {'issues': list(map(lambda issue: convert_issue_to_json(issue), hyperstyle_issues))} return json.dumps(hyperstyle_json) # Resort all fields in the qodana dataframe according to the hyperstyle dataframe # Add column with hyperstyle output (convert qodana output to hyperstyle output) # Add grade column with grades from hyperstyle dataframe (to gather statistics by diffs_between_df.py script) def __prepare_qodana_df(qodana_df: pd.DataFrame, hyperstyle_df: pd.DataFrame, issues_to_keep: Set[str]) -> pd.DataFrame: qodana_df = __preprocess_df(qodana_df, hyperstyle_df[ColumnName.ID.value]) __check_code_by_ids(qodana_df, hyperstyle_df) qodana_df[ColumnName.TRACEBACK.value] = qodana_df.apply( lambda row: __qodana_to_hyperstyle_output(row[QodanaColumnName.INSPECTIONS.value], issues_to_keep), axis=1) qodana_df[ColumnName.GRADE.value] = hyperstyle_df[ColumnName.GRADE.value] return qodana_df def __write_updated_df(old_df_path: Path, df: pd.DataFrame, name_prefix: str) -> None: output_path = get_parent_folder(Path(old_df_path)) write_df_to_file(df, output_path / f'{name_prefix}_updated{AnalysisExtension.CSV.value}', AnalysisExtension.CSV) def __reassign_ids(df: pd.DataFrame) -> pd.DataFrame: df = df.sort_values(ColumnName.CODE.value) df[ColumnName.ID.value] = df.index return df def main() -> None: parser = argparse.ArgumentParser() configure_arguments(parser) args = parser.parse_args() issues_to_keep = parse_set_arg(args.issues_to_keep) qodana_solutions_file_path = args.solutions_file_path_qodana qodana_solutions_df = __reassign_ids(get_solutions_df_by_file_path(qodana_solutions_file_path)) hyperstyle_solutions_file_path = args.solutions_file_path_hyperstyle hyperstyle_solutions_df = __reassign_ids(get_solutions_df_by_file_path(hyperstyle_solutions_file_path)) hyperstyle_solutions_df = __preprocess_df(hyperstyle_solutions_df, qodana_solutions_df[ColumnName.ID.value]) qodana_solutions_df = __prepare_qodana_df(qodana_solutions_df, hyperstyle_solutions_df, issues_to_keep) __write_updated_df(qodana_solutions_file_path, qodana_solutions_df, 'qodana') __write_updated_df(hyperstyle_solutions_file_path, hyperstyle_solutions_df, 'hyperstyle') if __name__ == '__main__': main()
nilq/baby-python
python
""" The STDIO interface for interactive CIS. Authors: Hamed Zamani (hazamani@microsoft.com) """ import time import traceback from macaw import util from macaw.interface.interface import Interface from macaw.core.interaction_handler.msg import Message class StdioInterface(Interface): def __init__(self, params): super().__init__(params) self.msg_id = int(time.time()) def run(self): while True: try: request = input('ENTER YOUR COMMAND: ').strip() if len(request) == 0: continue user_info = {'first_name': 'STDIO', 'is_bot': 'False' } msg_info = {'msg_id': self.msg_id, 'msg_type': 'command' if request.startswith('#') else 'text', 'msg_source': 'user'} self.msg_id += 1 msg = Message(user_interface='stdio', user_id=-1, user_info=user_info, msg_info=msg_info, text=request, timestamp=util.current_time_in_milliseconds()) output = self.params['live_request_handler'](msg) self.result_presentation(output, {}) except Exception as ex: traceback.print_exc() def result_presentation(self, response_msg, params): try: print('THE RESPONSE STARTS') print('----------------------------------------------------------------------') if response_msg.msg_info['msg_type'] == 'text': print(response_msg.text) elif response_msg.msg_info['msg_type'] == 'options': for (option_text, option_data, output_score) in response_msg.msg_info['options']: print(option_data, ' | ', option_text) elif response_msg.msg_info['msg_type'] == 'error': print('ERROR: NO RESULT!') else: raise Exception('The msg_type is not recognized:', response_msg.msg_info['msg_type']) print('----------------------------------------------------------------------') print('THE RESPONSE STARTS') except Exception as ex: traceback.print_exc()
nilq/baby-python
python
from rest_framework import status from webfront.tests.InterproRESTTestCase import InterproRESTTestCase from webfront.models.interpro_new import Release_Note class UtilsAccessionTest(InterproRESTTestCase): def test_can_read_structure_overview(self): response = self.client.get("/api/utils") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertIn("available", response.data) self.assertIn("accession", response.data["available"]) self.assertIn("release", response.data["available"]) def test_accession_endpoint_doesnt_fail(self): response = self.client.get("/api/utils/accession") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_accession_endpoint_with_unexisting_acc(self): response = self.client.get("/api/utils/accession/xxXx") self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) def test_accession_endpoint_with_ipro(self): response = self.client.get("/api/utils/accession/IPR003165") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["endpoint"], "entry") self.assertEqual(response.data["source_database"], "interpro") def test_accession_endpoint_with_protein(self): response = self.client.get("/api/utils/accession/A1CUJ5") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["endpoint"], "protein") self.assertEqual(response.data["source_database"], "reviewed") def test_accession_endpoint_with_structure(self): response = self.client.get("/api/utils/accession/1JM7") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["endpoint"], "structure") self.assertEqual(response.data["source_database"], "pdb") def test_accession_endpoint_with_proteome(self): response = self.client.get("/api/utils/accession/UP000012042") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["endpoint"], "proteome") self.assertEqual(response.data["source_database"], "uniprot") def test_accession_endpoint_with_set(self): response = self.client.get("/api/utils/accession/CL0001") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["endpoint"], "set") self.assertEqual(response.data["source_database"], "pfam") def test_accession_endpoint_with_taxonomy(self): response = self.client.get("/api/utils/accession/344612") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["endpoint"], "taxonomy") self.assertEqual(response.data["source_database"], "uniprot") def test_accession_endpoint_with_protein_id(self): response = self.client.get("/api/utils/accession/CBPYA_ASPCL") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["endpoint"], "protein") self.assertEqual(response.data["source_database"], "reviewed") self.assertEqual(response.data["accession"], "A1CUJ5") class UtilsReleaseTest(InterproRESTTestCase): def test_can_read_structure_overview(self): response = self.client.get("/api/utils") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertIn("available", response.data) self.assertIn("accession", response.data["available"]) self.assertIn("release", response.data["available"]) def test_release_endpoint_doesnt_fail(self): response = self.client.get("/api/utils/release") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_release_version_endpoint_doesnt_fail(self): response = self.client.get("/api/utils/release/current") self.assertEqual(response.status_code, status.HTTP_200_OK) response = self.client.get("/api/utils/release/70.0") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_release_version_endpoint_fails(self): response = self.client.get("/api/utils/release/x") self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_the_fixtures_are_loaded(self): notes = Release_Note.objects.all() self.assertEqual(notes.count(), 2) def test_release_endpoint_returns_the_fixtures(self): notes = Release_Note.objects.all() response = self.client.get("/api/utils/release") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), len(notes)) for note in notes: self.assertIn(note.version, response.data) def test_release_current_is_same_as_accession(self): response1 = self.client.get("/api/utils/release/current") self.assertEqual(response1.status_code, status.HTTP_200_OK) response2 = self.client.get("/api/utils/release/70.0") self.assertEqual(response2.status_code, status.HTTP_200_OK) self.assertEqual(response1.data, response1.data) def test_release_70_is_same_as_fixture(self): note_version = "70.0" note = Release_Note.objects.all().filter(version=note_version).first() response = self.client.get("/api/utils/release/" + note_version) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data["content"], note.content)
nilq/baby-python
python
import os os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["CUDA_VISIBLE_DEVICES"] = "3" import warnings warnings.filterwarnings('ignore') import pickle as pickle import numpy as np import datetime from keras import Model import keras import queue from keras.layers import Dense, Activation, Dropout, Layer from keras.layers import SimpleRNN, Embedding, Bidirectional,TimeDistributed from keras.models import load_model import keras.backend as K import matplotlib.pyplot as plt FEANUMDIC = { 2018 : 282515, 2017 : 282515, 2016 : 282515, 2015 : 282515, 2014 : 282515, 2013 : 282515, 2012 : 282515, 2011 : 282515, 2019 : 282515, } BIT = 2 NowYear = 2019 TerminationLength = 10 class ActivePossible(Layer): def __init__(self, ac=None, **kwargs): # self.theta = theta # self.alpha1 = alpha1 # self.alpha2 = alpha2 self.ac = ac #self.activate = activate super(ActivePossible, self).__init__(**kwargs) def call(self, x): return K.cast(x, K.floatx()) * self.ac # fx_0 = K.relu(x) # for x>0 # fx_1 = self.alpha1 * x * K.cast(x > self.theta, K.floatx()) * K.cast(x <= 0.0, K.floatx()) # for theta<x<=0 # fx_2 = self.alpha2 * x * K.cast(x <= self.theta, K.floatx()) # for x<=theta # return fx_0 + fx_1 + fx_2 #return keras.layers.Multiply([x, self.activate]) def compute_output_shape(self, input_shape): return input_shape def set_ac(self, ac): self.ac = ac class RuleStructure(): def __init__(self, dataset, rule): self.dataset = dataset self.rule = rule self.predy = 0 self.size = len(self.dataset) def decideRule(self): if TerminationLength == len(self.rule) or (self.predy > 0 and len(self.rule) >= 5): return True else: return False def SetPredy(self, predy): self.predy = predy def SplitNode(self, newpt): pos = int(newpt / BIT) val = newpt % BIT selindex = np.where(self.dataset[:, pos] == val)[0] if len(selindex) == 0: return None else: newrule = self.rule.copy() newrule.append(newpt) newdataset = self.dataset[selindex].copy() NewNode = RuleStructure(newdataset, newrule) self.dataset = np.delete(self.dataset, selindex, axis=0) self.size = len(self.dataset) return NewNode @property def __eq__(self, other): return len(self.dataset) == len(other.dataset) def __lt__(self, other): return -len(self.dataset) < -len(other.dataset) def ReadData(year = NowYear, IsTrain = True): if year != 2019: if IsTrain == True: f = open("data\\" + str(year) + "\\train.pkl", "rb") else: f = open("data\\" + str(year) + "\\test.pkl", "rb") data = pickle.load(f) f.close() x = data[0] y = data[1] return x, y else: if IsTrain == True: x = np.load("../data/2019/x_train.npy") y = np.load("../data/2019/y_train.npy") else: x = np.load("../data/2019/x_test.npy") y = np.load("../data/2019/y_test.npy") y = y.reshape([len(y)]) return x, y def loadModel(year = NowYear, fileName = None): if fileName == None: model = load_model("../model/" + str(year) + "/MLP_model.h5") else: model = load_model("../model/" + fileName) return model def set_acpos(model, ac, index): for i in range(len(ac)): model.layers[int(index[i])].set_ac(ac[i]) return model def getPuppetModel(modelname): m = load_model(modelname) model = keras.Sequential() model.add(Dense(50, input_shape=[FEANUMDIC[NowYear]], activation= None)) model.layers[-1].set_weights(m.layers[0].get_weights()) model.add(ActivePossible(ac = np.ones([50]))) #############1111111############### model.add(Dense(50, activation= None)) model.layers[-1].set_weights(m.layers[3].get_weights()) model.add(ActivePossible(ac=np.ones([50]))) #############1111111############### model.add(Dense(1, activation= None)) model.layers[-1].set_weights(m.layers[6].get_weights()) return model def getAvailableFeature(year = NowYear): f = open("rawdata\\" + str(year) + "\\feature_tag.txt", "r", encoding="utf8") lines = f.readlines() namelist = [0 for _ in range(FEANUMDIC[NowYear])] for line in lines: for i in range(1, 10): if line[-i] == ':': break name = line[0: -i] num = int(line[-i + 1: len(line)]) namelist[num] = name f.close() return set([2*i for i in range(len(namelist)) if namelist[i] != 0]).union([2*i+1 for i in range(len(namelist)) if namelist[i] != 0]) def getNameList(year = NowYear): f = open("rawdata\\" + str(year) + "\\feature_tag.txt", "r", encoding="utf8") lines = f.readlines() namelist = [0 for _ in range(FEANUMDIC[NowYear])] for line in lines: for i in range(1, 10): if line[-i] == ':': break name = line[0 : -i] num = int(line[-i+1 : len(line)]) namelist[num] = name f.close() for i in range(FEANUMDIC[NowYear]): if namelist[i] == 0: namelist[i] = "*******************************" return namelist def getActiveNode(lay_0, lay_3,seed): dataNum = len(seed) activationNode = np.zeros([dataNum, 100]) activationNode[:, 0 : 50] = \ lay_0.predict(seed, batch_size= 20000).reshape(dataNum, 50) activationNode[:, 50 : 100] = \ lay_3.predict(seed, batch_size= 20000).reshape(dataNum, 50) return activationNode def getActivateState(model, x): lay_0 = Model(inputs=model.input, outputs=model.layers[0].output) lay_3 = Model(inputs=model.input, outputs=model.layers[3].output) activationNode = getActiveNode(lay_0, lay_3, x) return activationNode def calAcStateFromRule(nowrule, model, testNum = 300): data = np.random.randint(0, BIT, [testNum, FEANUMDIC[NowYear]]) for r in nowrule: pos = int(r / BIT) val = r % BIT data[:, pos] = val acstate = getActivateState(model, data) > 0 acstate = np.mean(acstate, axis=0) return acstate def calContributionVec(puppetModel, activationPossible): activationPossible = activationPossible.reshape([2, 50]) puppetModel = set_acpos(puppetModel, activationPossible, [1, 3, ]) contribution = getGradient(puppetModel) return contribution[0] def getGradient(puppetModel): output = puppetModel.output input = puppetModel.input gradients = K.gradients(output, input)[0] out = K.function([input], [gradients]) x = np.zeros([1, FEANUMDIC[NowYear]]) y = out([x])[0] return y def calPredy(contributionVec, rule, puppetModel, mean_vec): base = np.zeros([1, FEANUMDIC[NowYear]]) base = puppetModel.predict(base) y = np.sum(base) rulepos = [] for r in rule: rulepos.append(int(r / BIT)) for i in range(FEANUMDIC[NowYear]): if i not in rulepos and mean_vec[i] != 0: if contributionVec[i] < 0: y += (contributionVec[i] ) * mean_vec[i] else: y += (contributionVec[i] / BIT) * mean_vec[i] for r in rule: pos = int(r / BIT) val = r % BIT if val == 1: y += contributionVec[pos] # else: # y -= contributionVec[pos] return y def PlotName(RuleSet): name = getNameList(year = 2011) for rule in RuleSet: for r in rule: print(name[r[0]], r[1]) print("#######################") return 0 def readRuleSetfromTXT(filename): RuleSet = [] f = open(filename, 'r') StrSet = f.readlines() f.close() for strrule in StrSet: strrule = strrule[0:-2] rule = strrule.split(" ") for i in range(len(rule)): rule[i] = int(rule[i]) RuleSet.append(rule) NewRuleSet = [] for rule in RuleSet: newrule = [] for r in rule: newrule.append([int(r / BIT), r % BIT]) NewRuleSet.append(newrule) return NewRuleSet def ReadRuleSet(fileName): f = open(fileName, "rb") RuleSet = pickle.load(f) f.close() return RuleSet def transferRuleSet(RuleSet): NewRuleSet = [] for rule in RuleSet: newrule = [] for r in rule: newrule.append([int(r / BIT), r % BIT]) NewRuleSet.append(newrule) return NewRuleSet
nilq/baby-python
python
boot_list = [ "anklet", "boots", "clogs", "feet", "footguards", "footpads", "footsteps", "footwraps", "greatboots", "greaves", "heels", "sabatons", "sandals", "slippers", "sneakers", "socks", "sprinters", "spurs", "stompers", "treads", "walkers", "warboots", "wraps", "zoomers"] body_list = [ "banded mail", "battleplate", "bone armor", "chestguard", "chestpiece", "chestplate", "coat", "cuirass", "garments", "gown", "great plate", "jacket", "jerkin", "mail", "raiment", "raiment", "robes", "shirt", "suit", "tanktop", "tunic", "vest", "vestment"] gauntlet_list = [ "fists", "gauntlets", "gloves", "grips", "handguards", "handguards", "hands", "knuckles", "mittens", "warfists", "wraps"] helmet_list = [ "armet", "aventail", "barbute", "bascinet", "cap", "close helmet", "duster", "enclosed helmet", "facemask", "falling buffe", "frog-mouth helm", "goggles", "great helm", "hat", "helm", "helmet", "kabuto", "kettle hat", "nasal helmet", "sallet", "shorwell helmet", "spangenhelm", "turban helmet", "visor", "wrap"] leg_list = [ "bone greaves", "breeches", "breeches", "dress", "greaves", "kilt", "leggings", "legguards", "legplates", "legwraps", "pants", "platelegs", "robes", "shorts", "skirt", "tassets", "trousers"] shield_list = [ "buckler", "enarmes", "greatshield", "gulge", "heater shield", "kite shield", "mantlet", "pavise", "rondache", "shield boss", "targe", "wall", "ward"] creature_list = [ "Aatxe", "Abaia", "Abarimon", "Abath", "Abura-sumashi", "Acephali", "Achlis", "Adar Llwch Gwin", "Adhene", "Adlet", "Aerico", "Afanc", "Agathodaemon", "Agloolik", "Agni", "Agogwe", "Ahkiyyini", "Ahura", "Airavata", "Aitu", "Aitvaras", "Ajatar", "Akhlut", "Akkorokamui", "Akurojin-no-hi", "Al Rakim", "Al-mi'raj", "Ala", "Alal", "Alicanto", "Alkonost", "Allocamelus", "Alphyn", "Alseid", "Alux", "Alû", "Amanojaku", "Amarum", "Amazake-babaa", "Amemasu", "Ammit", "Amorōnagu", "Amphiptere", "Amphisbaena", "Anakim", "Angel", "Ani Hyuntikwalaski", "Ankou", "Anqa", "Antaeus", "Antero Vipunen", "Anubis", "Anzû", "Ao Ao", "Aobōzu", "Apkallu", "Argus Panoptes", "Arikura-no-baba", "Arimaspi", "Arkan Sonney", "Asag", "Asanbosam", "Asena", "Ashi-magari", "Aspidochelone", "Asrai", "Astomi", "Asura", "Aswang", "Atomy", "Atshen", "Auloniad", "Avalerion", "Azukiarai", "Baba Yaga", "Badalisc", "Bahamut", "Bai Ze", "Bake-kujira", "Bakeneko", "Bakezōri", "Bakunawa", "Balaur", "Baloz", "Bannik", "Banshee", "Bar Juchne", "Barbegazi", "Bardha", "Barghest", "Basajaun", "Bashe", "Basilisco Chilote", "Basilisk", "Bathala", "Batibat", "Baykok", "Beast of Bray Road", "Behemoth", "Bennu", "Bestial beast", "Bestiary", "Bestiary", "Bestiary", "Bies", "Bigfoot", "Bishop-fish", "Biwa", "Black Annis", "Black Shuck", "Blafard", "Bloody Bones", "Blue Crow", "Bluecap", "Bodach", "Bogeyman", "Boggart", "Bogle", "Bolla", "Bonnacon", "Boo Hag", "Boobrie", "Broxa", "Buckriders", "Bugbear", "Buggane", "Bugul Noz", "Bukavac", "Bunyip", "Buraq", "Byangoma", "Bysen", "Błudnik", "Cabeiri", "Cacus", "Cadejo", "Cailleach", "Caipora", "Caladrius", "Calydonian Boar", "Calygreyhound", "Camahueto", "Cambion", "Campe", "Canotila", "Caoineag", "Catoblepas", "Ceffyl Dŵr", "Centaur", "Central America", "Cerastes", "Cerberus", "Cercopes", "Ceryneian Hind", "Cetan", "Chamrosh", "Chaneque", "Changeling", "Charybdis", "Chepi", "Cherufe", "Cheval Gauvin", "Cheval Mallet", "Chickcharney", "Chindi", "Chinthe", "Chollima", "Chonchon", "Chromandi", "Chrysaor", "Chupacabra", "Churel", "Ciguapa", "Cihuateteo", "Cikavac", "Cinnamon bird", "Cipactli", "Coblynau", "Cockatrice", "Corycian Cave", "Cretan Bull", "Crinaeae", "Crocotta", "Cuegle", "Curupira", "Cuélebre", "Cyhyraeth", "Cynocephaly", "Căpcăun", "Cŵn Annwn", "Daitya", "Daphnaie", "Datsue-ba", "Dead Sea Apes", "Ded Moroz", "Deer", "Deer Woman", "Deity", "Demigod", "Dhampir", "Di Penates", "Di sma undar jordi", "Dilong", "Dipsa", "Dirawong", "Diwata", "Djall", "Dokkaebi", "Doppelgänger", "Dragon", "Dragon turtle", "Drangue", "Draugr", "Drekavac", "Drude", "Druk", "Dryad", "Dullahan", "Dvorovoi", "Dybbuk", "DzunukwaSamebito", "Easter Bilby", "Easter Bunny", "Edimmu", "Egbere", "Einherjar", "Eleionomae", "Elemental", "Elf", "Eloko", "Emere", "Empusa", "Engkanto", "Ent", "Epimeliad", "Erchitu", "Erinyes", "Erlking", "Erymanthian Boar", "EtiäinenDahu", "Fafnir", "Fairy", "Familiar spirit", "Far darrig", "Faun", "Fear gorta", "Fenghuang", "Fenodyree", "Fenrir", "Fext", "Finfolk", "Fir Bolg", "Fish-man", "Fomorians", "Forest Bull", "Freybug", "Fuath", "Funayūrei", "Futakuchi-onna", "FylgjaGaasyendietha", "Gagana", "Gallu", "Gamayun", "Gana", "Gancanagh", "Gandaberunda", "Gandharva", "Garmr", "Garuda", "Gashadokuro", "Gaueko", "Geb", "Gegenees", "Genius loci", "Geryon", "Ghillie Dhu", "Ghost", "Ghoul", "Gigelorum", "Gjenganger", "Glaistig", "Glashtyn", "Gnome", "Goblin", "God", "Gog and Magog", "Gold-digging ant", "Golem", "Gorgades", "Gorgon", "Goryō", "Gremlin", "Griffin", "Grindylow", "Gualichu", "Guardian angel", "Gulon", "Gurumapa", "Gwyllgi", "Gwyllion", "Gytrash", "Hades", "Hadhayosh", "Hag", "Haietlik", "Half-elf", "Haltija", "Hamadryad", "Hamingja", "Hanau epe", "Hantu Air", "Hantu Raya", "Harionago", "Harpy", "Headless Horseman", "Headless Mule", "Heikegani", "Heinzelmännchen", "Hellhound", "Heracles", "Hercinia", "Herensuge", "Hesperides", "Hidebehind", "Hiderigami", "Hieracosphinx", "Hiisi", "Hippogriff", "Hippopodes", "Hircocervus", "Hitodama", "Hitotsume-kozō", "Hobbididance", "Hobgoblin", "Hodag", "Hombre Gato", "Homunculus", "Hoop snake", "Hoopoe", "Horned Serpent", "Hotoke", "Houri", "Huldufólk", "Huli jing", "Humbaba", "Hundun", "Hupia", "Hypnalis", "Hākuturi", "Iannic-ann-ôd", "Ibong Adarna", "Iele", "Ifrit", "Ikiryō", "Iku-Turso", "Imp", "Inapertwa", "Indrik", "Inkanyamba", "Inugami", "Ipotane", "Iratxoak", "Isonade", "Ittan-momen", "Jack-o'-lantern", "Jackalope", "Jaculus", "Jatayu", "Jenglot", "Jengu", "Jentil", "Jenu", "Jersey Devil", "Jiangshi", "Jiaolong", "Jikininki", "Jinn", "Jogah", "Jorōgumo", "Jumbee", "Jörmungandr", "Jötunn", "Kabouter", "Kachina", "Kalakeyas", "Kamaitachi", "Kami", "Kangla Palace", "Kappa", "Kapre", "Karkadann", "Karura", "Karzełek", "Kasa-obake", "Ke'lets", "Kee-wakw", "Keelut", "Kelpie", "Keukegen", "Kholomodumo", "Kigatilik", "Kikimora", "Kin-u", "Kinnara", "Kitsune", "Kitsune", "Kiyohime", "Klabautermann", "Knucker", "Kobalos", "Kobold", "Komainu", "Koro-pok-guru", "Korrigan", "Kraken", "Krampus", "Krasnoludek", "Krasue", "Kubikajiri", "Kuchisake-onna", "Kuda-gitsune", "Kumakatok", "Kumiho", "Kupua", "Kurma", "Kurupi", "Kushtaka", "La Llorona", "Labbu", "Laestrygonians", "Lakanica", "Lake monster", "Lakhey", "Lamassu", "Lambton Worm", "Landvættir", "Lares", "Latin America", "Lauma", "Lava bear", "Lavellan", "Leontophone", "Leprechaun", "Leviathan", "Leyak", "Lidérc", "Likho", "Lilin", "Lilith", "Lilith", "Lindworm", "Ljubi", "Loch Ness Monster", "Lou Carcolh", "Lubber fiend", "Luduan", "Lugat", "Luison", "Lusca", "Lutin", "Maa-alused", "Machlyes", "Macrocephali", "Maero", "Mairu", "Majitu", "Mallt-y-Nos", "Mami Wata", "Manananggal", "Mandragora", "Manes", "Mannegishi", "Manticore", "Manx people", "Mapinguari", "Marabbecca", "Mareikura", "Mares of Diomedes", "Marid", "Marmennill", "Matagot", "Matsya", "Mazzikin", "Mbwiri", "Mbói Tu'ĩ", "Medusa", "Meliae", "Melusine", "Menehune", "Menninkäinen", "Merlin", "Merlion", "Mermaid", "Merman", "Merman", "Merrow", "Minka Bird", "Minokawa", "Minotaur", "Mizuchi", "Mohan", "Mokoi", "Mono Grande", "Mooinjer veggey", "Moroi", "Moss people", "Moñái", "Mujina", "Muldjewangk", "Muma Pădurii", "Mummy", "Muscaliet", "Musimon", "Myling", "Myrmecoleon", "Nachzehrer", "Nagual", "Naiad", "Namahage", "Napaeae", "Narasimha", "Nargun", "Nariphon", "Nasnas", "Nawao", "Negret", "Nekomata", "Nephilim", "Nereid", "Nereus", "Ngen", "Nguruvilu", "Nian", "Nightmarchers", "Nimerigar", "Ningyo", "Ninki Nanka", "Nocnitsa", "Noppera-bō", "Nuckelavee", "Nue", "Nuku-mai-tore", "Nuli", "Numen", "Nuno sa punso", "Nurarihyon", "Nure-onna", "Nyami Nyami", "Nymph", "Näkki", "Níðhöggr", "Nāga", "Obake", "Obayifo", "Oceanus", "Odei", "Og", "Ogopogo", "Ogre", "Ogun", "Ojáncanu", "Okuri-inu", "Onocentaur", "Onoskelis", "Onryō", "Onza", "Oozlum bird", "Ophiotaurus", "Orang Minyak", "Oread", "Orobas", "Orphan Bird", "Orthrus", "Oshun", "Osiris", "Otso", "Ouroboros", "Ovinnik", "Paasselkä devils", "Pamola", "Panis", "Panotti", "Parandrus", "Patagon", "Patagonia", "Patasola", "Pegaeae", "Pegasus", "Pelesit", "Peluda", "Penanggalan", "Penghou", "Peri", "Peryton", "Pesanta", "Peuchen", "Phi Tai Hong", "Phoenicia", "Piasa", "Pictish Beast", "Pillan", "Pishacha", "Pixie", "Pixiu", "Polevik", "Polong", "Poltergeist", "Pombero", "Ponaturi", "Poukai", "Preta", "Pricolici", "Psoglav", "Psotnik", "Psychai", "Psychopomp", "Pugot", "Pyrausta", "Púca", "Qalupalik", "Qilin", "Qiqirn", "Qliphoth", "Questing Beast", "Quetzalcoatl", "Quinotaur", "Rabisu", "Radande", "Raijū", "Rainbow Serpent", "Rainbow crow", "Rakshasa", "Ramidreju", "Raróg", "Ratatoskr", "Raven Mocker", "Redcap", "Reichsadler", "Rephaite", "Reptilian humanoid", "Robin Hood", "Rokurokubi", "Rompo", "Rougarou", "Rusalka", "Rå", "Sampati", "Sandman", "Santa Claus", "Santelmo", "Sarimanok", "Satan", "Satyr", "Sazae-oni", "Scitalis", "Scylla", "Sea monk", "Sea monster", "Sea serpent", "Selkie", "Serpent", "Serpopard", "Shachihoko", "Shaitan", "Shark", "Shedim", "Shellycoat", "Shenlong", "Shikigami", "Shikoku", "Shikome", "Shinigami", "Shisa", "Shtriga", "Shug Monkey", "Si-Te-Cah", "Sigbin", "Silenus", "Simargl", "Simurgh", "Sirin", "Sisiutl", "Skin-walker", "Skookum", "Skrzak", "Sleipnir", "Sleipnir", "Sluagh", "Soucouyant", "Spearfinger", "Sphinx", "Spirit", "Spriggan", "Squonk", "Stihi", "Strigoi", "Struthopodes", "Strzyga", "Stuhać", "Stymphalian birds", "Suangi", "Succubus", "Svartálfar", "Svaðilfari", "Swan maiden", "Sylph", "Syrbotae", "Syrictæ", "Sânziană", "Takam", "Talos", "Tangie", "Taniwha", "Taotao Mona", "Taotie", "Taotie", "Tapairu", "Tarasque", "Tartalo", "Tartaruchi", "Tatami", "Tavara", "Teju Jagua", "Tengu", "Tengu", "Tennin", "Tepegoz", "Terrible Monster", "Teumessian fox", "Theriocephaly", "Tiangou", "Tianlong", "Tibicena", "Tiddy Mun", "Tigmamanukan", "Tikbalang", "Tikoloshe", "Timingila", "Tipua", "Tiyanak", "Tizheruk", "Tlahuelpuchi", "Tlaxcaltec", "Tofu", "Topielec", "Toyol", "Trauco", "Trenti", "Trickster", "Tripura", "Tripurasura", "Troll", "Tsuchigumo", "Tsuchinoko", "Tsukumogami", "Tsul 'Kalu", "Tsurube-otoshi", "Tupilaq", "Turul", "Tylwyth Teg", "Typhon", "Tzitzimitl", "Türst", "Ubume", "Uchek Langmeidong", "Umibōzu", "Undead", "Underwater panther", "Undine", "Unhcegila", "Unicorn", "Urayuli", "Uriaș", "Urmahlullu", "Ushi-oni", "Uwan", "Vahana", "Valkyrie", "Valravn", "Vampire", "Vanara", "Varaha", "Vardøger", "Vetala", "Vishnu", "Vision Serpent", "Vodyanoy", "Vrykolakas", "Vâlvă", "Vântoase", "Vættir", "Víðópnir", "Wanyūdō", "Warak ngendog", "Warg", "Warlock", "Wati-kutjara", "Wekufe", "Wendigo", "Werecat", "Werehyena", "Werewolf", "Wild man", "Wirry-cow", "Witte Wieven", "Wolpertinger", "Wulver", "Wyvern", "Xana", "Xelhua", "Xhindi", "Xiuhcoatl", "Yacumama", "Yacuruna", "Yadōkai", "Yaksha", "Yakshini", "Yakshini", "Yama", "Yama-bito", "Yama-uba", "Yamata no Orochi", "Yaoguai", "Yara-ma-yha-who", "Yato-no-kami", "Yeti", "Ypotryll", "Yuki-onna", "Yuxa", "Yōkai", "Yōsei", "Yūrei", "Zahhak", "Zashiki-warashi", "Zduhać", "Zennyo Ryūō", "Zeus", "Zilant", "Ziz", "Zmeu", "Zombie", "Zuijin", "Zână"] adjective_list = [ "Olympian", "abhorrence", "abominable", "accurate", "accursed", "actual", "additional", "advanced", "afterlife", "agitation", "alarm", "american", "ancient", "angelic", "angelic", "angst", "annihilation", "antitank", "anxiety", "anxiety", "appropriate", "astral", "atomic", "atrocious", "atrocious", "automatic", "available", "aversion", "awe", "awe", "awesome", "awful", "barbarous", "barbarous", "basic", "beatific", "beautiful", "believing", "bereavement", "best", "bestial", "better", "big", "biggest", "biological", "bitter", "bladed", "blessed", "blessed", "bloodthirsty", "bloody", "blunt", "broken", "brutal", "brutish", "bête noire", "caliber", "callous", "carnal", "casualty", "celestial", "certain", "cessation", "chaste", "chemical", "chickenheartedness", "chief", "clean", "clumsy", "cold feet", "cold sweat", "cold-blooded", "common", "competitive", "concealed", "concern", "consecrated", "consternation", "consternation", "controversial", "convenient", "conventional", "cowardice", "creeps", "critical", "crude", "cruel", "cruel", "curtains", "customary", "damnable", "damned", "dangerous", "darkness", "deadliest", "deadly", "decease", "decisive", "dedicated", "defensive", "degenerate", "demise", "demoniac", "demonic", "departure", "depraved", "despair", "destruction", "destructive", "devastating", "devilish", "devoted", "devotional", "devout", "diabolical", "different", "discomposure", "dismay", "dismay", "disquietude", "dissolution", "distress", "divine", "divine", "doubt", "downfall", "drawn", "dread", "dread", "dreadful", "dying", "economic", "edged", "effective", "effectual", "efficient", "elysian", "empty", "empyral", "empyrean", "end", "ending", "eradication", "essential", "eternal", "eternal rest", "ethereal", "euthanasia", "evil", "excellent", "excruciating", "execution", "exit", "expiration", "extermination", "extinction", "faintheartedness", "faithful", "fallen", "familiar", "fatal", "fatality", "faultless", "favored", "favorite", "favourite", "fearful", "fearfulness", "fearsome", "ferocious", "fierce", "fighting", "final", "fine", "finis", "finish", "first", "flinty", "foreboding", "formidable", "fright", "fright", "funk", "german", "glorified", "god-fearing", "godlike", "godlike", "godly", "good", "good", "grave", "great", "greatest", "grim reaper", "hallowed", "hallowed", "handy", "hard", "hard-hearted", "harsh", "hateful", "heartless", "heaven", "heavier", "heavy", "hellish", "hidden", "holstered", "holy", "holy", "horrible", "horror", "horror", "huge", "human", "humble", "ideal", "ideological", "illegal", "immaculate", "immortal", "implacable", "important", "indispensable", "inexorable", "inferior", "infernal", "inhuman", "inhumane", "innocent", "intimidation", "invincible", "irresistible", "jitters", "just", "keen", "key", "large", "laser", "last", "latter", "legal", "legitimate", "lethal", "light", "like", "little", "loaded", "long", "looking", "loss", "made", "magic", "magical", "main", "major", "makeshift", "malevolent", "martyrdom", "massive", "merciless", "messianic", "mightiest", "mighty", "military", "misgiving", "modern", "monstrous", "monstrous", "moral", "more", "mortality", "murderous", "naked", "national", "natural", "nearest", "necessary", "necrosis", "nefarious", "new", "next", "nightmare", "nuclear", "obliteration", "oblivion", "offensive", "old", "only", "ordinary", "other", "otherworldly", "own", "painful", "panic", "panic", "paradise", "particular", "parting", "passing", "passing over", "peculiar", "perfect", "perfect", "pernicious", "personal", "phobia", "pietistic", "pious", "poignant", "pointed", "political", "ponderous", "poor", "popular", "possible", "potent", "potential", "powerful", "prayerful", "preferred", "presentiment", "primary", "primitive", "principal", "proper", "psychological", "pure", "qualm", "quietus", "radiological", "rancorous", "range", "ready", "real", "recreancy", "release", "relentless", "reliable", "repose", "revengeful", "revered", "reverence", "reverent", "revulsion", "rhetorical", "right", "righteous", "rude", "ruin", "ruination", "ruthless", "sacred", "sacrosanct", "sadistic", "sainted", "saintlike", "saintly", "same", "sanctified", "satanic", "scare", "second", "secret", "semiautomatic", "seraphic", "seraphic", "serviceable", "shaped", "sharp", "sharpest", "shock", "silence", "similar", "simple", "sinful", "single", "sleep", "small", "sole", "special", "specific", "spiritual", "spiritual", "spiritual", "spiteful", "splendid", "spotless", "standard", "strange", "strategic", "strong", "strongest", "sublime", "sublime", "successful", "such", "suffering", "suitable", "superior", "supernal", "supernatural", "supernatural", "sure", "suspicion", "tactical", "tank", "termination", "terrible", "terrible", "terror", "terrorist", "thermonuclear", "third", "timidity", "tomb", "torture", "traditional", "transcendental", "transmundane", "trembling", "tremendous", "tremor", "trepidation", "trepidation", "trepidity", "true", "tyrannical", "ultimate", "uncorrupt", "undefiled", "unease", "uneasiness", "unfeeling", "unique", "unkind", "unnatural", "unrelenting", "untainted", "unusual", "unworldly", "uplifted", "upright", "useful", "useless", "usual", "valuable", "venerable", "venerated", "vengeful", "very", "vicious", "vicious", "virtuous", "virulent", "warlike", "wicked", "wicked", "wooden", "worry"]
nilq/baby-python
python
# -*- coding: utf-8 -*- import cv2 import numpy as np from typing import Tuple, List, Union from image_registration.keypoint_matching.kaze import KAZE from image_registration.exceptions import (CreateExtractorError, NoModuleError, NoEnoughPointsError) class ORB(KAZE): METHOD_NAME = "ORB" def __init__(self, threshold: Union[int, float] = 0.8, rgb: bool = True, nfeatures: int = 50000, scaleFactor: Union[int, float] = 1.2, nlevels: int = 8, edgeThreshold: int = 31, firstLevel: int = 0, WTA_K: int = 2, scoreType: int = cv2.ORB_HARRIS_SCORE, patchSize: int = 31, fastThreshold: int = 20): super(ORB, self).__init__(threshold, rgb) # 初始化参数 self.extractor_parameters = dict( nfeatures=nfeatures, scaleFactor=scaleFactor, nlevels=nlevels, edgeThreshold=edgeThreshold, firstLevel=firstLevel, WTA_K=WTA_K, scoreType=scoreType, patchSize=patchSize, fastThreshold=fastThreshold, ) try: # 创建ORB实例 self.detector = cv2.ORB_create(**self.extractor_parameters) except Exception: raise CreateExtractorError('create orb extractor error') else: try: # https://docs.opencv.org/master/d7/d99/classcv_1_1xfeatures2d_1_1BEBLID.html # https://github.com/iago-suarez/beblid-opencv-demo self.descriptor = cv2.xfeatures2d.BEBLID_create(0.75) except AttributeError: raise NoModuleError def create_matcher(self) -> cv2.DescriptorMatcher_create: matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_BRUTEFORCE_HAMMING) return matcher def get_good_in_matches(self, matches: list) -> List[cv2.DMatch]: """ 特征点过滤 :param matches: 特征点集 """ good = [] # 出现过matches对中只有1个参数的情况,会导致遍历的时候造成报错 for v in matches: if len(v) == 2: if v[0].distance < self.FILTER_RATIO * v[1].distance: good.append(v[0]) return good def get_keypoints_and_descriptors(self, image: np.ndarray) -> Tuple[List[cv2.KeyPoint], np.ndarray]: """ 获取图像关键点(keypoints)与描述符(descriptors) :param image: 待检测的灰度图像 :raise NoEnoughPointsError: 检测特征点数量少于2时,弹出异常 :return: 关键点(keypoints)与描述符(descriptors) """ keypoints = self.detector.detect(image, None) keypoints, descriptors = self.descriptor.compute(image, keypoints) if len(keypoints) < 2: raise NoEnoughPointsError('{} detect not enough feature points in input images'.format(self.METHOD_NAME)) return keypoints, descriptors
nilq/baby-python
python
""" functions for deterministically preprocessing 2D images (or 3D with color channels) mostly for the consumption of computer vision algorithms """ import math import numpy as np import skimage.transform from .. import utils def _center_coords_for_shape(shape): """ returns the center of an ndimage with a given shape """ return np.array(shape) / 2.0 - 0.5 def _warp_cv2(img, H, output_shape, mode, order, cval): """ returns warped image using OpenCV2 in a few tests, this was 5-10x faster than either of skimage's warp functions """ # import cv2 here so that entire file doesn't have to depend on it from .. import cv2_utils # TODO handle case for other types of interpolation assert order == 1 kwargs = dict( affine_matrix=H[:2], shape=output_shape, border_mode=mode, fill_value=cval, is_inverse_map=True ) if len(img.shape) < 3 or img.shape[2] <= 4: # warp_affine can handle images with up to 4 channels return cv2_utils.warp_affine(img, **kwargs) else: # handle the case for img with many channels channels = img.shape[2] result = np.empty(output_shape + (channels,), dtype=img.dtype) for i in range(int(np.ceil(channels / 4.0))): idx = slice(i * 4, (i + 1) * 4) result[:, :, idx] = cv2_utils.warp_affine(img[..., idx], **kwargs) return result def _warp_PIL(img, H, output_shape, mode, order, cval): """ in some tests, 5x slower than OpenCV's affine transform (converting to and from PIL seems to take almost as much as performing the transformation) """ from PIL import Image, ImageTransform # TODO handle filling assert cval == 0 # TODO handle other modes assert mode == "constant" if order == 0: resample = Image.NEAREST elif order == 1: resample = Image.BILINEAR else: raise AssertionError transform = ImageTransform.AffineTransform(H[:2].ravel()) return np.array(Image.fromarray(img).transform(output_shape, transform, resample=resample), dtype=img.dtype) def _warp_fast(img, **kwargs): """ returns warped image with proper dtype """ return skimage.transform._warps_cy._warp_fast( img, **kwargs ).astype(img.dtype) def _warp(img, **kwargs): """ returns warped image with proper dtype """ return skimage.transform.warp(img, **kwargs).astype(img.dtype) def affine_transform_fn(shape, zoom=None, stretch=None, rotation=None, shear=None, translation=None, output_shape=None, vertical_flip=False, horizontal_flip=False, mode="reflect", fill_value=0.0, crop_center=None, order=1, use_cv2=True, use_PIL=False): """ returns a function to transform images according to the given parameters automatically uses skimage.transform._warps_cy._warp_fast for images w/o channels differences: - less parameters / customizability - does not work for images with color - a little bit faster (~15%-ish when testing it) shape: shape of the images to transform stretch: vertical stretch (to warp the aspect ratio) output_shape: desired shape of the output (default: same as input shape) mode: how to treat points outside boundary (default: reflect - but can be much slower than constant depending on amount of points past boundary) fill_value: value to fill boundary with for mode="constant" crop_center: center of the region that will be cropped (default: center of the image) order: order of interpolation (eg. 0=nearest neighbor, 1=bi-linear, 2=...) see documentation of skimage.transform.warp (default: 1) use_cv2: whether or not to use OpenCV warping (can be 5-10x faster) use_PIL: whether ot not ro use PIL warping """ assert not (use_cv2 and use_PIL) if len(shape) == 2: fast_warp = True elif len(shape) == 3: # has color channels fast_warp = False else: raise ValueError shape = shape[:2] if output_shape is None: output_shape = shape # --------------------- # base affine transform # --------------------- if rotation is not None: rotation = utils.rotations_to_radians(rotation) if shear is not None: shear = utils.rotations_to_radians(shear) tf_kwargs = dict( rotation=rotation, shear=shear, ) if translation is not None: # the first argument of translation changes the second axis, # so switch back to make it more intuitive to numpy array syntax vertical_translation, horizontal_translation = translation tf_kwargs["translation"] = (horizontal_translation, vertical_translation) if ((zoom is not None) or (stretch is not None) or horizontal_flip or vertical_flip): if zoom is None: zoom = 1 if stretch is None: stretch = 1 scale_horizontal = 1.0 / zoom scale_vertical = 1.0 / (zoom * stretch) if horizontal_flip: scale_horizontal *= -1 if vertical_flip: scale_vertical *= -1 tf_kwargs["scale"] = (scale_horizontal, scale_vertical) base_tf = skimage.transform.AffineTransform(**tf_kwargs) # --------------------- # centering/uncentering # --------------------- # by default, rotation and shearing is done relative to (0, 0), which # is rarely desired transform_center = _center_coords_for_shape(shape) # reverse the coordinates # because scikit-image takes in (x,y) in position coordinates where # x = axis 1, y = axis 0 center_translation = np.array(transform_center)[::-1] # translate the image such that the provided center is at (0, 0) centering_tf = skimage.transform.SimilarityTransform( translation=center_translation, ) # to put the original image back to where it belongs uncentering_tf = skimage.transform.SimilarityTransform( translation=-center_translation, ) # apply the transformations tf = uncentering_tf + base_tf + centering_tf # -------------- # crop centering # -------------- # by default, cropping takes the top left corner, which is rarely desired # thus we want to translate the image such that the provided crop_center # will be at the center of the cropped image if shape != output_shape: if crop_center is None: crop_center = transform_center crop_center = np.array(crop_center) default_center = _center_coords_for_shape(output_shape) relative_diff = crop_center - default_center centering_tf = skimage.transform.SimilarityTransform( # reverse the order of coordinates translation=relative_diff[::-1], ) tf = centering_tf + tf # ---------------------- # applying to a function # ---------------------- base_kwargs = dict( output_shape=output_shape, mode=mode, order=order, cval=fill_value, ) if use_cv2: base_fn = _warp_cv2 base_kwargs["H"] = tf.params elif use_PIL: base_fn = _warp_PIL base_kwargs["H"] = tf.params elif fast_warp: base_fn = _warp_fast base_kwargs["H"] = tf.params else: base_fn = _warp base_kwargs["inverse_map"] = tf return utils.partial(base_fn, **base_kwargs) def affine_transform(img, **kwargs): """ transforms an img with the given parameters (see documentation of affine_transform_fn) """ fn = affine_transform_fn(img.shape, **kwargs) return fn(img) def multi_affine_transform(imgs, **kwargs): """ transforms a list of images with the given parameters (see documentation of affine_transform_fn) """ for i in range(len(imgs) - 1): assert imgs[i].shape == imgs[i + 1].shape fn = affine_transform_fn(imgs[0].shape, **kwargs) return map(fn, imgs)
nilq/baby-python
python
# This file is a part of Arjuna # Copyright 2015-2020 Rahul Verma # Website: www.RahulVerma.net # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import copy import abc import sys import time from arjuna.core.utils import sys_utils from arjuna.tpi.constant import * from arjuna.core.constant import * from arjuna.core.reader.textfile import TextResourceReader from arjuna.core.types import constants from arjuna.core.adv.py import * from arjuna.interface.enums import CommandEnum from .parser import * from .command import * class ArjunaCLI: def __init__(self, args): super().__init__() self._args = args self.arg_dict = None self.main_command = MainCommand() # Create parser for primary commands subparsers = self.main_command.create_subparsers() # Create re-usable parses for command arguments new_project_parser = NewProjectParser() project_parser = ProjectParser() run_parser = RunParser() run_default_group_parser = RunDefaultGroupParser() session_parser = SessionParser() stage_parser = StageParser() group_parser = GroupParser() pickers_parser = PickersParser() # Create primary command handlers self.create_project = CreateProject(subparsers, [new_project_parser]) self.run_project = RunProject(subparsers, [project_parser, run_parser, run_default_group_parser]) self.run_session = RunSession(subparsers, [project_parser, run_parser, session_parser]) self.run_stage = RunStage(subparsers, [project_parser, run_parser, stage_parser]) self.run_group = RunGroup(subparsers, [project_parser, run_parser, group_parser]) self.run_selected = RunSelected(subparsers, [project_parser, run_parser, run_default_group_parser, pickers_parser]) def init(self): time.sleep(0.1) self.arg_dict = self.main_command.convert_to_dict(self._args) #self.main_command.execute(self.arg_dict) def execute(self): command = self.arg_dict['command'] del self.arg_dict['command'] if not command: print("!!!Fatal Error!!! You did not provide any command.") print() self.main_command.print_help() sys.exit(1) # Delegation dictionary for primary command description desc_cases = { # CommandEnum.LAUNCH_SETU: "Launching Setu", CommandEnum.CREATE_PROJECT: "Creating new project", CommandEnum.RUN_PROJECT: "Running the project", CommandEnum.RUN_SESSION: "Running the selected test session", CommandEnum.RUN_STAGE: "Running the selected test stage", CommandEnum.RUN_GROUP: "Running the selected test group", CommandEnum.RUN_SELECTED: "Running tests based on selectors" } # Hyphens in commands are replaced with underscores for enum conversion # So, create-project is internally referred as CREATE_PROJECT command_enum = CommandEnum[command.upper().replace("-", "_")] print(desc_cases[command_enum] + "...") # Delegation dictionary for primary command choices # Respective command object's 'execute' method is the handler. execute_cases = { # CommandEnum.LAUNCH_SETU: (self.launch_setu.execute,), CommandEnum.CREATE_PROJECT: (self.create_project.execute, ), CommandEnum.RUN_PROJECT: (self.run_project.execute, ), CommandEnum.RUN_SESSION: (self.run_session.execute, ), CommandEnum.RUN_STAGE: (self.run_stage.execute, ), CommandEnum.RUN_GROUP: (self.run_group.execute, ), # CommandEnum.RUN_GROUP: (self.run_group.execute, ), CommandEnum.RUN_SELECTED: (self.run_selected.execute, ) } # Delegation using Arjuna's Enum based switch-case equivalent switch = EnumSwitch(execute_cases, (self.arg_dict,)) switch(command_enum)
nilq/baby-python
python
# Copyright (c) 2019, Ahmed M. Alaa # Licensed under the BSD 3-clause license (see LICENSE.txt) import pandas as pd import numpy as np def draw_ihdp_data(fn_data): # Read the covariates and treatment assignments from the original study # ---------------------------------------------------------------------- Raw_Data = pd.read_csv(fn_data) X = np.array(Raw_Data[['X5','X6','X7','X8','X9','X10', 'X11','X12','X13','X14','X15', 'X16','X17','X18','X19','X20', 'X21','X22','X23','X24','X25', 'X26','X27','X28','X29']]) W = np.array(Raw_Data['Treatment']) # Sample random coefficients # -------------------------- coeffs_ = [0, 0.1, 0.2, 0.3, 0.4] BetaB_c = np.random.choice(coeffs_, size=6, replace=True, p=[0.5,0.125,0.125,0.125,0.125]) BetaB_d = np.random.choice(coeffs_, size=19, replace=True, p=[0.6, 0.1, 0.1, 0.1,0.1]) BetaB = np.hstack((BetaB_d,BetaB_c)) # Simulating the two response surfaces # ------------------------------------ Y_0 = np.random.normal(size=len(X)) + np.exp(np.dot(X + 0.5, BetaB)) Y_1 = np.random.normal(size=len(X)) + np.dot(X, BetaB) AVG = np.mean(Y_1[W==1] - Y_0[W==1]) Y_1 = Y_1 - AVG + 4 TE = np.dot(X, BetaB) - AVG + 4 - np.exp(np.dot(X + 0.5, BetaB)) Y = np.transpose(np.array([W, (1-W)*Y_0 + W*Y_1, TE])) # Prepare the output dataset # -------------------------- DatasetX = pd.DataFrame(X,columns='X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25'.split()) DatasetY = pd.DataFrame(Y,columns='Treatment Response TE'.split()) Dataset = DatasetX.join(DatasetY) Dataset['Y_0'] = Y_0 Dataset['Y_1'] = Y_1 return Dataset def sample_IHDP(fn_data, test_frac=0.2): Dataset = draw_ihdp_data(fn_data) num_samples = len(Dataset) train_size = int(np.floor(num_samples * (1 - test_frac))) train_index = list(np.random.choice(range(num_samples), train_size, replace=False)) test_index = list(set(list(range(num_samples))) - set(train_index)) feat_name = 'X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25' Data_train = Dataset.loc[Dataset.index[train_index]] Data_test = Dataset.loc[Dataset.index[test_index]] X_train = np.array(Data_train[feat_name.split()]) W_train = np.array(Data_train['Treatment']) Y_train = np.array(Data_train['Response']) T_true_train = np.array(Data_train['TE']) Y_cf_train = np.array(Data_train['Treatment'] * Data_train['Y_0'] + (1- Data_train['Treatment']) * Data_train['Y_1']) Y_0_train = np.array(Data_train['Y_0']) Y_1_train = np.array(Data_train['Y_1']) X_test = np.array(Data_test[feat_name.split()]) W_test = np.array(Data_test['Treatment']) Y_test = np.array(Data_test['Response']) T_true_test = np.array(Data_test['TE']) Y_cf_test = np.array(Data_test['Treatment'] * Data_test['Y_0'] + (1- Data_test['Treatment']) * Data_test['Y_1']) Y_0_test = np.array(Data_test['Y_0']) Y_1_test = np.array(Data_test['Y_1']) train_data = (X_train, W_train, Y_train, Y_0_train, Y_1_train, Y_cf_train, T_true_train) test_data = (X_test, W_test, Y_test, Y_0_test, Y_1_test, Y_cf_test, T_true_test) return train_data, test_data
nilq/baby-python
python
import numpy as np from numba import njit, prange from SZR_contact_tracing import nb_seed, update_cell, szr_sample, mszr_sample @njit def cluster_size(L: int, seed: int, alpha: float = 0.25, occupancy: float = 1, mszr: bool = True): # Initialize a lattice, run it, and return the cluster size. nb_seed(L) lattice = np.zeros((L, L, 3), dtype=np.uint16) v = int(np.floor(occupancy)) lattice[:, :, 0] = v lattice[:, :, 0] += (np.random.rand(L, L) < occupancy-v) nb_seed(seed) occupancy = int(np.ceil(occupancy)) s_buf64 = np.zeros((L*10*occupancy), dtype=np.uint64) z_buf64 = np.zeros((L*10*occupancy), dtype=np.uint64) s_buf16 = np.frombuffer(s_buf64, dtype=np.uint16).reshape(-1, 4) z_buf16 = np.frombuffer(z_buf64, dtype=np.uint16).reshape(-1, 4) update_cell(lattice, np.uint16(0), np.uint16( 0), np.uint16(-lattice[0, 0, 0]), np.uint16(lattice[0, 0, 0]), np.uint16(0), s_buf64, z_buf64) for i in range(np.sum(lattice[:, :, :2])*2): if s_buf64[0] != 0 and z_buf64[0] != 0: if mszr is True: x, y, (ds, dz, dr), dt = mszr_sample( lattice, s_buf16, z_buf16, alpha ) else: x, y, (ds, dz, dr), dt = szr_sample( lattice, s_buf16, z_buf16, alpha ) update_cell(lattice, x, y, ds, dz, dr, s_buf64, z_buf64) else: assert z_buf64[0] == 0 and s_buf64[0] == 0 break return np.sum(lattice[:, :, 1:]) @njit(parallel=True) # type: ignore def batch_clusters_size(L: int, seed_init: int, alpha: float = 0.25, occupancy: int = 1, run_number: int = 128, mszr: bool = True): # Run multiple simulations of lattices with the same setting in parallel, and return all cluster sizes. res_container = np.empty(run_number, dtype=np.int64) for i in prange(run_number): res_container[i] = cluster_size( L, seed_init+i, alpha=alpha, occupancy=occupancy, mszr=mszr) return res_container def get_fit(sizes: np.ndarray, const=None): # Get the best line fit for the $s^{\tau-2}P_{\ge s}$ - $s^\sigma$ in the plateau region, return fitted params and covariance matrix. n, s = np.histogram(sizes, bins=100) s = np.convolve(s, (0.5, 0.5), 'valid') p = n/n.sum() p[::-1] = np.cumsum(p[::-1]) x = np.power(s, 36/91) y = p*np.power(s, 187/91-2) w = np.power(s, 187/91-2)*np.power(n*(n.sum()-n)+1, -0.5) best_cov = np.array([[np.inf, np.inf], [np.inf, np.inf]]) best_fit = np.array([-1, -1]) l = np.max(x) fit = np.nan for low in np.linspace(int(0.1*l), int(0.28*l)): for high in np.linspace(int(0.6*l), int(0.9*l)): mask = (high >= x) & (x >= low) if np.sum(mask) < 10: continue try: fit, cov = np.polyfit( x[mask], y[mask], w=w[mask], deg=1, cov=True) except: cov = np.array([[np.inf, np.inf], [np.inf, np.inf]]) if cov[0, 0] < best_cov[0, 0]: best_cov = cov best_fit = fit return best_fit, best_cov def alpha_search(L: int, seed_init: int, alpha_low: float, alpha_high: float, occupancy=1, batch=1000, epsilon=0., max_step=20, mszr=True): # Perform critical point search in binary way, must specifying the upper and lower values. However, if the slope changed in an unexpected way, the last update in the boundary in the opposite direction will be undone (e.g. decrease α -> slope decreases -> last lower bond update will be undone.). const = 2-187/91 last_delta = np.inf last_alpha_low = alpha_low last_alpha_high = alpha_high his = np.empty((max_step, 2)) for i in range(max_step): print('[{:.5f},{:.5f}]'.format(alpha_low, alpha_high)) alpha = 0.5*(alpha_high+alpha_low) sizes = batch_clusters_size( L, seed_init, alpha=alpha, occupancy=occupancy, run_number=batch, mszr=mszr) fit, cov = get_fit(sizes, const) delta = fit[0] # type:ignore sigma = np.sqrt(cov[0, 0]) thres = max(epsilon, 2.5*sigma) his[i] = alpha, delta if delta > thres: last_alpha_low = alpha_low alpha_low = alpha if delta > last_delta and last_delta > 0: alpha_high = last_alpha_high last_delta = delta print(alpha, delta) elif delta < -thres: last_alpha_high = alpha_high alpha_high = alpha if delta < last_delta and last_delta < 0: alpha_low = last_alpha_low last_delta = delta print(alpha, delta) else: print(alpha, delta) return alpha, (alpha_high-alpha), fit, sizes, his[:i+1], cov return alpha, (alpha_high-alpha), fit, sizes, his[:i+1], cov # type:ignore
nilq/baby-python
python
"""Constants for the Kostal Plenticore Solar Inverter integration.""" from typing import NamedTuple from homeassistant.components.sensor import ( ATTR_STATE_CLASS, SensorDeviceClass, SensorStateClass, ) from homeassistant.const import ( ATTR_DEVICE_CLASS, ATTR_ICON, ATTR_UNIT_OF_MEASUREMENT, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, PERCENTAGE, POWER_WATT, ) DOMAIN = "kostal_plenticore" ATTR_ENABLED_DEFAULT = "entity_registry_enabled_default" # Defines all entities for process data. # # Each entry is defined with a tuple of these values: # - module id (str) # - process data id (str) # - entity name suffix (str) # - sensor properties (dict) # - value formatter (str) SENSOR_PROCESS_DATA = [ ( "devices:local", "Inverter:State", "Inverter State", {ATTR_ICON: "mdi:state-machine"}, "format_inverter_state", ), ( "devices:local", "Dc_P", "Solar Power", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_ENABLED_DEFAULT: True, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local", "Grid_P", "Grid Power", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_ENABLED_DEFAULT: True, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local", "HomeBat_P", "Home Power from Battery", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, }, "format_round", ), ( "devices:local", "HomeGrid_P", "Home Power from Grid", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local", "HomeOwn_P", "Home Power from Own", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local", "HomePv_P", "Home Power from PV", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local", "Home_P", "Home Power", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local:ac", "P", "AC Power", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_ENABLED_DEFAULT: True, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local:pv1", "P", "DC1 Power", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local:pv1", "U", "DC1 Voltage", { ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_POTENTIAL_VOLT, ATTR_DEVICE_CLASS: SensorDeviceClass.VOLTAGE, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local:pv1", "I", "DC1 Current", { ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_CURRENT_AMPERE, ATTR_DEVICE_CLASS: SensorDeviceClass.CURRENT, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_float", ), ( "devices:local:pv2", "P", "DC2 Power", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local:pv2", "U", "DC2 Voltage", { ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_POTENTIAL_VOLT, ATTR_DEVICE_CLASS: SensorDeviceClass.VOLTAGE, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local:pv2", "I", "DC2 Current", { ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_CURRENT_AMPERE, ATTR_DEVICE_CLASS: SensorDeviceClass.CURRENT, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_float", ), ( "devices:local:pv3", "P", "DC3 Power", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local:pv3", "U", "DC3 Voltage", { ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_POTENTIAL_VOLT, ATTR_DEVICE_CLASS: SensorDeviceClass.VOLTAGE, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local:pv3", "I", "DC3 Current", { ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_CURRENT_AMPERE, ATTR_DEVICE_CLASS: SensorDeviceClass.CURRENT, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_float", ), ( "devices:local", "PV2Bat_P", "PV to Battery Power", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local", "EM_State", "Energy Manager State", {ATTR_ICON: "mdi:state-machine"}, "format_em_manager_state", ), ( "devices:local:battery", "Cycles", "Battery Cycles", {ATTR_ICON: "mdi:recycle", ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT}, "format_round", ), ( "devices:local:battery", "P", "Battery Power", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "devices:local:battery", "SoC", "Battery SoC", { ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_DEVICE_CLASS: SensorDeviceClass.BATTERY, }, "format_round", ), ( "scb:statistic:EnergyFlow", "Statistic:Autarky:Day", "Autarky Day", {ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"}, "format_round", ), ( "scb:statistic:EnergyFlow", "Statistic:Autarky:Month", "Autarky Month", {ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"}, "format_round", ), ( "scb:statistic:EnergyFlow", "Statistic:Autarky:Total", "Autarky Total", { ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut", ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "scb:statistic:EnergyFlow", "Statistic:Autarky:Year", "Autarky Year", {ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"}, "format_round", ), ( "scb:statistic:EnergyFlow", "Statistic:OwnConsumptionRate:Day", "Own Consumption Rate Day", {ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"}, "format_round", ), ( "scb:statistic:EnergyFlow", "Statistic:OwnConsumptionRate:Month", "Own Consumption Rate Month", {ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"}, "format_round", ), ( "scb:statistic:EnergyFlow", "Statistic:OwnConsumptionRate:Total", "Own Consumption Rate Total", { ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut", ATTR_STATE_CLASS: SensorStateClass.MEASUREMENT, }, "format_round", ), ( "scb:statistic:EnergyFlow", "Statistic:OwnConsumptionRate:Year", "Own Consumption Rate Year", {ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:chart-donut"}, "format_round", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHome:Day", "Home Consumption Day", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHome:Month", "Home Consumption Month", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHome:Year", "Home Consumption Year", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHome:Total", "Home Consumption Total", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomeBat:Day", "Home Consumption from Battery Day", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomeBat:Month", "Home Consumption from Battery Month", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomeBat:Year", "Home Consumption from Battery Year", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomeBat:Total", "Home Consumption from Battery Total", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomeGrid:Day", "Home Consumption from Grid Day", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomeGrid:Month", "Home Consumption from Grid Month", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomeGrid:Year", "Home Consumption from Grid Year", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomeGrid:Total", "Home Consumption from Grid Total", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomePv:Day", "Home Consumption from PV Day", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomePv:Month", "Home Consumption from PV Month", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomePv:Year", "Home Consumption from PV Year", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyHomePv:Total", "Home Consumption from PV Total", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv1:Day", "Energy PV1 Day", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv1:Month", "Energy PV1 Month", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv1:Year", "Energy PV1 Year", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv1:Total", "Energy PV1 Total", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv2:Day", "Energy PV2 Day", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv2:Month", "Energy PV2 Month", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv2:Year", "Energy PV2 Year", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv2:Total", "Energy PV2 Total", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv3:Day", "Energy PV3 Day", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv3:Month", "Energy PV3 Month", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv3:Year", "Energy PV3 Year", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:EnergyPv3:Total", "Energy PV3 Total", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:Yield:Day", "Energy Yield Day", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, ATTR_ENABLED_DEFAULT: True, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:Yield:Month", "Energy Yield Month", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:Yield:Year", "Energy Yield Year", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, }, "format_energy", ), ( "scb:statistic:EnergyFlow", "Statistic:Yield:Total", "Energy Yield Total", { ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR, ATTR_DEVICE_CLASS: SensorDeviceClass.ENERGY, ATTR_STATE_CLASS: SensorStateClass.TOTAL_INCREASING, }, "format_energy", ), ] # Defines all entities for settings. # # Each entry is defined with a tuple of these values: # - module id (str) # - process data id (str) # - entity name suffix (str) # - sensor properties (dict) # - value formatter (str) SENSOR_SETTINGS_DATA = [ ( "devices:local", "Battery:MinHomeComsumption", "Battery min Home Consumption", { ATTR_UNIT_OF_MEASUREMENT: POWER_WATT, ATTR_DEVICE_CLASS: SensorDeviceClass.POWER, }, "format_round", ), ( "devices:local", "Battery:MinSoc", "Battery min Soc", {ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE, ATTR_ICON: "mdi:battery-negative"}, "format_round", ), ] class SwitchData(NamedTuple): """Representation of a SelectData tuple.""" module_id: str data_id: str name: str is_on: str on_value: str on_label: str off_value: str off_label: str # Defines all entities for switches. # # Each entry is defined with a tuple of these values: # - module id (str) # - process data id (str) # - entity name suffix (str) # - on Value (str) # - on Label (str) # - off Value (str) # - off Label (str) SWITCH_SETTINGS_DATA = [ SwitchData( "devices:local", "Battery:Strategy", "Battery Strategy:", "1", "1", "Automatic", "2", "Automatic economical", ), ] class SelectData(NamedTuple): """Representation of a SelectData tuple.""" module_id: str data_id: str name: str options: list is_on: str # Defines all entities for select widgets. # # Each entry is defined with a tuple of these values: # - module id (str) # - process data id (str) # - entity name suffix (str) # - options # - entity is enabled by default (bool) SELECT_SETTINGS_DATA = [ SelectData( "devices:local", "battery_charge", "Battery Charging / Usage mode", ["None", "Battery:SmartBatteryControl:Enable", "Battery:TimeControl:Enable"], "1", ) ]
nilq/baby-python
python
import pdb import uuid from decimal import Decimal from django.apps import apps from ahj_app.models import User, Edit, Comment, AHJInspection, Contact, Address, Location, AHJ, AHJUserMaintains from django.urls import reverse from django.utils import timezone import pytest import datetime from fixtures import create_user, ahj_obj, generate_client_with_webpage_credentials, api_client, create_minimal_obj, \ set_obj_field, get_obj_field, get_value_or_enum_row from ahj_app.models_field_enums import RequirementLevel, LocationDeterminationMethod from ahj_app import views_edits @pytest.fixture def user_obj(create_user): user = create_user(Username='someone') return user @pytest.fixture def add_enums(): RequirementLevel.objects.create(Value='ConditionallyRequired') RequirementLevel.objects.create(Value='Required') RequirementLevel.objects.create(Value='Optional') LocationDeterminationMethod.objects.create(Value='AddressGeocoding') LocationDeterminationMethod.objects.create(Value='GPS') def edit_is_pending(edit): return edit.ReviewStatus == 'P' and edit.ApprovedBy is None and edit.DateEffective is None and edit.IsApplied is False def filter_to_edit(edit_dict): search_dict = {k: v for k, v in edit_dict.items()} search_dict['DateRequested__date'] = search_dict.pop('DateRequested') search_dict['DateEffective__date'] = search_dict.pop('DateEffective') return Edit.objects.filter(**search_dict) def check_edit_exists(edit_dict): return filter_to_edit(edit_dict).exists() @pytest.mark.parametrize( 'user_type', [ 'Admin', 'AHJOfficial' ] ) @pytest.mark.django_db def test_edit_review__authenticated_normal_use(user_type, generate_client_with_webpage_credentials, ahj_obj): client = generate_client_with_webpage_credentials(Username='someone') user = User.objects.get(Username='someone') if user_type == 'Admin': user.is_superuser = True user.save() elif user_type == 'AHJOfficial': AHJUserMaintains.objects.create(UserID=user, AHJPK=ahj_obj, MaintainerStatus=True) edit_dict = {'ChangedBy': user, 'ApprovedBy': None, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': 'AHJName', 'OldValue': 'oldname', 'NewValue': 'newname', 'DateRequested': timezone.now(), 'DateEffective': None, 'ReviewStatus': 'P', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) url = reverse('edit-review') response = client.post(url, {'EditID': edit.EditID, 'Status': 'A'}) assert response.status_code == 200 edit = Edit.objects.get(EditID=edit.EditID) assert edit.ReviewStatus == 'A' assert edit.ApprovedBy == user tomorrow = timezone.now() + datetime.timedelta(days=1) assert edit.DateEffective.date() == tomorrow.date() @pytest.mark.django_db def test_edit_review__no_auth_normal_use(generate_client_with_webpage_credentials, ahj_obj): client = generate_client_with_webpage_credentials(Username='someone') user = User.objects.get(Username='someone') edit_dict = {'ChangedBy': user, 'ApprovedBy': None, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': 'AHJName', 'OldValue': 'oldname', 'NewValue': 'newname', 'DateRequested': timezone.now(), 'DateEffective': None, 'ReviewStatus': 'P', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) url = reverse('edit-review') response = client.post(url, {'EditID': edit.EditID, 'Status': 'A'}) assert response.status_code == 403 @pytest.mark.django_db def test_edit_review__invalid_status(generate_client_with_webpage_credentials, ahj_obj): client = generate_client_with_webpage_credentials(Username='someone') user = User.objects.get(Username='someone') edit_dict = {'ChangedBy': user, 'ApprovedBy': None, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': 'AHJName', 'OldValue': 'oldname', 'NewValue': 'newname', 'DateRequested': timezone.now(), 'DateEffective': None, 'ReviewStatus': 'P', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) url = reverse('edit-review') response = client.post(url, {'EditID': edit.EditID, 'Status': 'Z'}) assert response.status_code == 400 @pytest.mark.django_db def test_edit_review__edit_does_not_exist(generate_client_with_webpage_credentials): client = generate_client_with_webpage_credentials(Username='someone') url = reverse('edit-review') response = client.post(url, {'EditID': 0, 'Status': 'A'}) assert response.status_code == 400 @pytest.mark.django_db @pytest.mark.parametrize( 'params', [ ({}), ({'EditID': '1'}), ({'Status': 'A'}), ] ) def test_edit_review__missing_param(params, generate_client_with_webpage_credentials): client = generate_client_with_webpage_credentials(Username='someone') url = reverse('edit-review') response = client.post(url, params) assert response.status_code == 400 @pytest.mark.django_db def test_edit_addition__normal_use(ahj_obj, generate_client_with_webpage_credentials): client = generate_client_with_webpage_credentials(Username='someone') user = User.objects.get(Username='someone') AHJInspection.objects.create(AHJPK=ahj_obj, AHJInspectionName='Inspection1', TechnicianRequired=1, InspectionStatus=True) url = reverse('edit-addition') response = client.post(url, { 'SourceTable': 'AHJInspection', 'AHJPK': ahj_obj.AHJPK, 'ParentTable': 'AHJ', 'ParentID': ahj_obj.AHJPK, 'Value': [ { 'AHJInspectionName': 'NewName'} ]}, format='json') assert response.status_code == 200 assert response.data[0]['AHJInspectionName']['Value'] == 'NewName' # confirm returned AHJInspection was updated edit = Edit.objects.get(AHJPK=ahj_obj.AHJPK) assert edit.EditType == 'A' assert edit.NewValue == 'True' assert edit.SourceRow == response.data[0]['InspectionID']['Value'] @pytest.mark.django_db @pytest.mark.parametrize( 'params', [ ({'SourceTable': 'AHJ', 'ParentID': '1', 'ParentTable': 'AHJ'}), ({'AHJPK': '1', 'ParentID': '1', 'ParentTable': 'AHJ'}), ({'SourceTable': 'AHJ', 'AHJPK': '1', 'ParentTable': 'AHJ'}), ({'SourceTable': 'AHJ', 'AHJPK': '1', 'ParentID': '1'}) ] ) def test_edit_addition__missing_param(params, generate_client_with_webpage_credentials): client = generate_client_with_webpage_credentials(Username='someone') url = reverse('edit-addition') response = client.post(url, params) assert response.status_code == 400 @pytest.mark.django_db def test_edit_deletion__normal_use(ahj_obj, generate_client_with_webpage_credentials): client = generate_client_with_webpage_credentials(Username='someone') user = User.objects.get(Username='someone') inspection = AHJInspection.objects.create(AHJPK=ahj_obj, AHJInspectionName='Inspection1', TechnicianRequired=1, InspectionStatus=True) url = reverse('edit-deletion') response = client.post(url, { 'SourceTable': 'AHJInspection', 'AHJPK': ahj_obj.AHJPK, 'ParentTable': 'AHJ', 'ParentID': ahj_obj.AHJPK, 'Value': [ inspection.InspectionID ]}, format='json') assert response.status_code == 200 edit = Edit.objects.get(AHJPK=ahj_obj.AHJPK) assert edit.EditType == 'D' assert edit.NewValue == 'False' assert edit.SourceRow == response.data[0]['InspectionID']['Value'] @pytest.mark.django_db @pytest.mark.parametrize( 'params', [ ({'SourceTable': 'AHJ'}), ({'AHJPK': '1'}), ] ) def test_edit_deletion__missing_param(params, generate_client_with_webpage_credentials): client = generate_client_with_webpage_credentials(Username='someone') url = reverse('edit-deletion') response = client.post(url, params) assert response.status_code == 400 @pytest.mark.parametrize( 'ReviewStatus, DateEffective', [ ('A', timezone.now()), ('A', timezone.now() - datetime.timedelta(days=1)), ('A', timezone.now() + datetime.timedelta(days=1)), ('A', None), ('P', timezone.now()), ('D', timezone.now()) ] ) @pytest.mark.django_db def test_apply_edits(ReviewStatus, DateEffective, create_user, ahj_obj): field_name = 'AHJName' old_value = 'oldname' new_value = 'newname' user = create_user() set_obj_field(ahj_obj, field_name, old_value) edit_dict = {'ChangedBy': user, 'ApprovedBy': user if DateEffective is not None else None, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': field_name, 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': DateEffective, 'ReviewStatus': ReviewStatus, 'IsApplied': False, 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) views_edits.apply_edits() ahj = AHJ.objects.get(AHJPK=ahj_obj.AHJPK) is_date_effective = (DateEffective.date() == datetime.date.today()) if DateEffective is not None else False edit_should_apply = is_date_effective and ReviewStatus == 'A' edit_is_applied = getattr(ahj, field_name) == new_value assert edit_is_applied == edit_should_apply edit = Edit.objects.get(EditID=edit.EditID) assert edit.IsApplied == edit_should_apply @pytest.mark.django_db def test_edit_update__normal_use(ahj_obj, generate_client_with_webpage_credentials): client = generate_client_with_webpage_credentials(Username='someone') user = User.objects.get(Username='someone') inspection = AHJInspection.objects.create(AHJPK=ahj_obj, AHJInspectionName='Inspection1', TechnicianRequired=1, InspectionStatus=True) url = reverse('edit-update') input = [ { 'AHJPK': ahj_obj.AHJPK, 'SourceTable': 'AHJInspection', 'SourceRow': inspection.pk, 'SourceColumn': 'AHJInspectionName', 'NewValue': 'NewName' } ] response = client.post(url, input, format='json') assert response.status_code == 200 edit = Edit.objects.get(AHJPK=ahj_obj.AHJPK) # Got newly created edit object and set it as approved edit.ReviewStatus = 'A' edit.DateEffective = timezone.now() edit.ApprovedBy = user edit.save() views_edits.apply_edits() # Now that it's approved, apply edits will apply it. Inspection = AHJInspection.objects.get(AHJPK=ahj_obj) assert Inspection.AHJInspectionName == 'NewName' @pytest.mark.django_db @pytest.mark.parametrize( 'params', [ ({'SourceTable': 'AHJ'}), ({'AHJPK': '1', 'SourceTable': 'AHJ', 'SourceRow': 'row', 'SourceColumn': 'column'}), ] ) def test_edit_update__missing_param(params, generate_client_with_webpage_credentials): client = generate_client_with_webpage_credentials(Username='someone') url = reverse('edit-deletion') response = client.post(url, params) assert response.status_code == 400 @pytest.mark.django_db def test_edit_list__normal_use(ahj_obj, generate_client_with_webpage_credentials): client = generate_client_with_webpage_credentials(Username='someone') user = User.objects.get(Username='someone') Edit.objects.create(EditID=1, AHJPK=ahj_obj, ChangedBy=user, EditType='A', SourceTable='AHJ', SourceColumn='BuildingCode', SourceRow='2118', DateRequested=timezone.now()) Edit.objects.create(EditID=2, AHJPK=ahj_obj, ChangedBy=user, EditType='A', SourceTable='AHJ', SourceColumn='BuildingCode', SourceRow='2118', DateRequested=timezone.now()) url = reverse('edit-list') response = client.get(url, {'AHJPK':'1'}) assert response.status_code == 200 assert len(response.data) == 2 @pytest.mark.django_db def test_edit_list__missing_param(generate_client_with_webpage_credentials): client = generate_client_with_webpage_credentials(Username='someone') url = reverse('edit-list') response = client.get(url) assert response.status_code == 200 assert len(response.data) == 0 @pytest.mark.parametrize( 'model_name, field_name, old_value, new_value, expected_value', [ ('AHJ', 'AHJName', 'oldname', 'newname', 'old_value'), ('Contact', 'FirstName', 'oldname', 'newname', 'old_value'), ('Address', 'Country', 'oldcountry', 'newcountry', 'old_value'), ('Location', 'Elevation', Decimal('0.00000000'), Decimal('10000.00000000'), 'old_value'), ('Location', 'LocationDeterminationMethod', '', 'AddressGeocoding', None), ('Location', 'LocationDeterminationMethod', 'AddressGeocoding', '', 'old_value'), ('EngineeringReviewRequirement', 'RequirementLevel', 'ConditionallyRequired', 'Required', 'old_value'), ('AHJInspection', 'FileFolderURL', 'oldurl', 'newurl', 'old_value'), ('FeeStructure', 'FeeStructureID', str(uuid.uuid4()), str(uuid.uuid4()), 'old_value') ] ) @pytest.mark.django_db def test_edit_revert__edit_update(model_name, field_name, old_value, new_value, create_user, ahj_obj, expected_value, create_minimal_obj, add_enums): user = create_user() obj = create_minimal_obj(model_name) set_obj_field(obj, field_name, new_value) edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': model_name, 'SourceRow': obj.pk, 'SourceColumn': field_name, 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'A', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) assert views_edits.revert_edit(user, edit) edit_dict['OldValue'], edit_dict['NewValue'] = edit.NewValue, edit.OldValue if expected_value: expected_value = get_value_or_enum_row(field_name, old_value) assert get_obj_field(obj, field_name) == expected_value assert check_edit_exists(edit_dict) @pytest.mark.django_db def test_edit_revert__edit_pending_do_nothing(create_user, ahj_obj): user = create_user() old_value = 'oldname' new_value = 'newname' set_obj_field(ahj_obj, 'AHJName', old_value) edit_dict = {'ChangedBy': user, 'ApprovedBy': None, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': 'AHJName', 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': None, 'ReviewStatus': 'P', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) assert not views_edits.revert_edit(user, edit) edit_dict['OldValue'], edit_dict['NewValue'] = old_value, edit_dict['OldValue'] edit_dict['ReviewStatus'] = 'A' edit_dict['ApprovedBy'], edit_dict['DateEffective'] = user, timezone.now() assert not check_edit_exists(edit_dict) assert Edit.objects.all().count() == 1 @pytest.mark.django_db def test_edit_revert__current_value_is_old_value_do_nothing(create_user, ahj_obj): user = create_user() old_value = 'oldname' new_value = 'newname' set_obj_field(ahj_obj, 'AHJName', old_value) edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': 'AHJName', 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'A', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) assert not views_edits.revert_edit(user, edit) edit_dict['OldValue'], edit_dict['NewValue'] = old_value, edit_dict['OldValue'] assert not check_edit_exists(edit_dict) assert Edit.objects.all().count() == 1 @pytest.mark.django_db def test_edit_revert__revert_edit_old_value_uses_current_row_value(create_user, ahj_obj): user = create_user() old_value = 'oldname' middle_value = 'newername' new_value = 'newestname' edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': 'AHJName', 'OldValue': old_value, 'NewValue': middle_value, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'A', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) edit_dict['OldValue'], edit_dict['NewValue'] = edit_dict['NewValue'], new_value setattr(ahj_obj, 'AHJName', new_value) ahj_obj.save() newer_edit = Edit.objects.create(**edit_dict) assert views_edits.revert_edit(user, edit) edit_dict['OldValue'], edit_dict['NewValue'] = edit_dict['NewValue'], old_value reverting_edit = filter_to_edit(edit_dict) assert reverting_edit.exists() assert reverting_edit.first().OldValue == new_value assert get_obj_field(ahj_obj, 'AHJName') @pytest.mark.parametrize( 'parent_model_name, model_name', [ ('AHJ', 'Contact'), ('AHJInspection', 'Contact'), ('AHJ', 'EngineeringReviewRequirement'), ('AHJ', 'AHJInspection'), ('AHJ', 'DocumentSubmissionMethod'), ('AHJ', 'PermitIssueMethod'), ('AHJ', 'FeeStructure') ] ) @pytest.mark.django_db def test_edit_revert__edit_addition(parent_model_name, model_name, create_user, create_minimal_obj, ahj_obj): user = create_user() parent_obj = create_minimal_obj(parent_model_name) obj = create_minimal_obj(model_name) relation = obj.create_relation_to(parent_obj) set_obj_field(relation, relation.get_relation_status_field(), True) edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': relation.__class__.__name__, 'SourceRow': relation.pk, 'SourceColumn': relation.get_relation_status_field(), 'OldValue': None, 'NewValue': True, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'A', 'EditType': 'A', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) assert views_edits.revert_edit(user, edit) edit_dict['OldValue'], edit_dict['NewValue'] = edit_dict['NewValue'], False assert check_edit_exists(edit_dict) assert get_obj_field(relation, relation.get_relation_status_field()) == edit_dict['NewValue'] @pytest.mark.parametrize( 'parent_model_name, model_name', [ ('AHJ', 'Contact'), ('AHJInspection', 'Contact'), ('AHJ', 'EngineeringReviewRequirement'), ('AHJ', 'AHJInspection'), ('AHJ', 'DocumentSubmissionMethod'), ('AHJ', 'PermitIssueMethod'), ('AHJ', 'FeeStructure') ] ) @pytest.mark.django_db def test_edit_revert__edit_deletion(parent_model_name, model_name, create_user, create_minimal_obj, ahj_obj): user = create_user() parent_obj = create_minimal_obj(parent_model_name) obj = create_minimal_obj(model_name) relation = obj.create_relation_to(parent_obj) set_obj_field(relation, relation.get_relation_status_field(), False) edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': relation.__class__.__name__, 'SourceRow': relation.pk, 'SourceColumn': relation.get_relation_status_field(), 'OldValue': True, 'NewValue': False, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'A', 'EditType': 'D', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) assert views_edits.revert_edit(user, edit) edit_dict['OldValue'], edit_dict['NewValue'] = edit_dict['NewValue'], edit_dict['OldValue'] assert check_edit_exists(edit_dict) assert get_obj_field(relation, relation.get_relation_status_field()) == edit_dict['NewValue'] @pytest.mark.parametrize( 'edit_status1, is_applied1, is_applied2, expected_outcome', [ # Rejected edits are resettable. ('R', False, True, True), # Approved, but not yet applied, edits are resettable. ('A', False, False, True), ('A', False, True, True), # Approved and applied edits where they are the latest applied are resettable. ('A', True, False, True), # Approved and applied edits where another edit was since applied are not resettable. ('A', True, True, False) ] ) @pytest.mark.django_db def test_edit_is_resettable(edit_status1, is_applied1, is_applied2, expected_outcome, create_user, ahj_obj): user = create_user() new_value = 'newname' old_value = 'oldname' edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': 'AHJName', 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': edit_status1, 'IsApplied': is_applied1, 'EditType': 'U', 'AHJPK': ahj_obj} edit_to_reset = Edit.objects.create(**edit_dict) tomorrow = timezone.now() + datetime.timedelta(days=1) edit_dict['DateRequested'], edit_dict['DateEffective'] = tomorrow, tomorrow edit_dict['ReviewStatus'], edit_dict['IsApplied'] = 'A', is_applied2 later_edit = Edit.objects.create(**edit_dict) assert expected_outcome == views_edits.edit_is_resettable(edit_to_reset) @pytest.mark.django_db def test_edit_make_pending(create_user, ahj_obj): user = create_user() set_obj_field(ahj_obj, 'AHJName', 'newername') edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': 'AHJName', 'OldValue': 'oldname', 'NewValue': 'newname', 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'R', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) views_edits.edit_make_pending(edit) edit = Edit.objects.get(EditID=edit.EditID) assert edit_is_pending(edit) @pytest.mark.parametrize( 'model_name, field_name, old_value, new_value', [ ('AHJ', 'AHJName', 'oldname', 'newname'), ('Contact', 'FirstName', 'oldname', 'newname'), ('Address', 'Country', 'oldcountry', 'newcountry'), ('Location', 'Elevation', Decimal('0.00000000'), Decimal('10000.00000000')), ('Location', 'LocationDeterminationMethod', '', 'AddressGeocoding'), ('Location', 'LocationDeterminationMethod', 'AddressGeocoding', ''), ('EngineeringReviewRequirement', 'RequirementLevel', 'ConditionallyRequired', 'Required'), ('AHJInspection', 'FileFolderURL', 'oldurl', 'newurl'), ('FeeStructure', 'FeeStructureID', str(uuid.uuid4()), str(uuid.uuid4())) ] ) @pytest.mark.django_db def test_edit_update_old_value(model_name, field_name, old_value, new_value, create_user, ahj_obj, create_minimal_obj, add_enums): user = create_user() obj = create_minimal_obj(model_name) edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': model_name, 'SourceRow': obj.pk, 'SourceColumn': field_name, 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'A', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) views_edits.apply_edits(ready_edits=[edit]) views_edits.edit_update_old_value(edit) edit = Edit.objects.get(EditID=edit.EditID) assert edit.OldValue == str(new_value) @pytest.mark.parametrize( 'model_name, field_name, old_value, new_value', [ ('AHJ', 'AHJName', 'oldname', 'newname'), ('Contact', 'FirstName', 'oldname', 'newname'), ('Address', 'Country', 'oldcountry', 'newcountry'), ('Location', 'Elevation', Decimal('0.00000000'), Decimal('10000.00000000')), ('Location', 'LocationDeterminationMethod', '', 'AddressGeocoding'), ('Location', 'LocationDeterminationMethod', 'AddressGeocoding', ''), ('EngineeringReviewRequirement', 'RequirementLevel', 'ConditionallyRequired', 'Required'), ('AHJInspection', 'FileFolderURL', 'oldurl', 'newurl'), ('FeeStructure', 'FeeStructureID', str(uuid.uuid4()), str(uuid.uuid4())) ] ) @pytest.mark.django_db def test_edit_update_old_value_all_awaiting_apply_or_review(model_name, field_name, old_value, new_value, create_user, ahj_obj, create_minimal_obj, add_enums): user = create_user() obj = create_minimal_obj(model_name) edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': model_name, 'SourceRow': obj.pk, 'SourceColumn': field_name, 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'A', 'IsApplied': True, 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) edit_dict['IsApplied'] = False approved_edit = Edit.objects.create(**edit_dict) edit_dict['ReviewStatus'] = 'P' pending_edit = Edit.objects.create(**edit_dict) views_edits.apply_edits(ready_edits=[edit]) views_edits.edit_update_old_value_all_awaiting_apply_or_review(edit) approved_edit = Edit.objects.get(EditID=approved_edit.EditID) pending_edit = Edit.objects.get(EditID=pending_edit.EditID) assert approved_edit.OldValue == str(new_value) assert pending_edit.OldValue == str(new_value) @pytest.mark.parametrize( 'model_name, field_name, old_value, new_value, expected_value', [ ('AHJ', 'AHJName', 'oldname', 'newname', 'old_value'), ('Contact', 'FirstName', 'oldname', 'newname', 'old_value'), ('Address', 'Country', 'oldcountry', 'newcountry', 'old_value'), ('Location', 'Elevation', Decimal('0.00000000'), Decimal('10000.00000000'), 'old_value'), ('Location', 'LocationDeterminationMethod', '', 'AddressGeocoding', None), ('Location', 'LocationDeterminationMethod', 'AddressGeocoding', '', 'old_value'), ('EngineeringReviewRequirement', 'RequirementLevel', 'ConditionallyRequired', 'Required', 'old_value'), ('AHJInspection', 'FileFolderURL', 'oldurl', 'newurl', 'old_value'), ('FeeStructure', 'FeeStructureID', str(uuid.uuid4()), str(uuid.uuid4()), 'old_value') ] ) @pytest.mark.django_db def test_edit_undo_apply(model_name, field_name, old_value, new_value, create_user, ahj_obj, expected_value, create_minimal_obj, add_enums): user = create_user() obj = create_minimal_obj(model_name) edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': model_name, 'SourceRow': obj.pk, 'SourceColumn': field_name, 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'A', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) views_edits.apply_edits(ready_edits=[edit]) views_edits.edit_undo_apply(edit) if expected_value == 'old_value': expected_value = get_value_or_enum_row(field_name, old_value) assert get_obj_field(obj, field_name) == expected_value @pytest.mark.parametrize( 'model_name, field_name, old_value, new_value, expected_value', [ ('AHJ', 'AHJName', 'oldname', 'newname', 'old_value'), ('Contact', 'FirstName', 'oldname', 'newname', 'old_value'), ('Address', 'Country', 'oldcountry', 'newcountry', 'old_value'), ('Location', 'Elevation', Decimal('0.00000000'), Decimal('10000.00000000'), 'old_value'), ('Location', 'LocationDeterminationMethod', '', 'AddressGeocoding', None), ('Location', 'LocationDeterminationMethod', 'AddressGeocoding', '', 'old_value'), ('EngineeringReviewRequirement', 'RequirementLevel', 'ConditionallyRequired', 'Required', 'old_value'), ('AHJInspection', 'FileFolderURL', 'oldurl', 'newurl', 'old_value'), ('FeeStructure', 'FeeStructureID', str(uuid.uuid4()), str(uuid.uuid4()), 'old_value') ] ) @pytest.mark.django_db def test_edit_reset__edit_update(model_name, field_name, old_value, new_value, create_user, ahj_obj, create_minimal_obj, expected_value, add_enums): user = create_user() obj = create_minimal_obj(model_name) set_obj_field(obj, field_name, new_value) edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': model_name, 'SourceRow': obj.pk, 'SourceColumn': field_name, 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'A', 'IsApplied': True, 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) assert views_edits.reset_edit(user, edit) assert edit_is_pending(edit) if expected_value == 'old_value': expected_value = get_value_or_enum_row(field_name, old_value) assert get_obj_field(obj, field_name) == expected_value @pytest.mark.parametrize( 'parent_model_name, model_name, review_status', [ ('AHJ', 'Contact', 'A'), ('AHJInspection', 'Contact', 'A'), ('AHJ', 'EngineeringReviewRequirement', 'A'), ('AHJ', 'AHJInspection', 'A'), ('AHJ', 'DocumentSubmissionMethod', 'A'), ('AHJ', 'PermitIssueMethod', 'A'), ('AHJ', 'FeeStructure', 'A'), ('AHJ', 'Contact', 'R'), ('AHJInspection', 'Contact', 'R'), ('AHJ', 'EngineeringReviewRequirement', 'R'), ('AHJ', 'AHJInspection', 'R'), ('AHJ', 'DocumentSubmissionMethod', 'R'), ('AHJ', 'PermitIssueMethod', 'R'), ('AHJ', 'FeeStructure', 'R') ] ) @pytest.mark.django_db def test_edit_reset__edit_addition(parent_model_name, model_name, review_status, create_user, create_minimal_obj, ahj_obj): user = create_user() parent_obj = create_minimal_obj(parent_model_name) obj = create_minimal_obj(model_name) relation = obj.create_relation_to(parent_obj) set_obj_field(relation, relation.get_relation_status_field(), review_status == 'A') edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': relation.__class__.__name__, 'SourceRow': relation.pk, 'SourceColumn': relation.get_relation_status_field(), 'OldValue': None, 'NewValue': True, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': review_status, 'IsApplied': review_status == 'A', 'EditType': 'A', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) assert views_edits.reset_edit(user, edit) assert edit_is_pending(edit) assert get_obj_field(relation, relation.get_relation_status_field()) == edit_dict['OldValue'] @pytest.mark.parametrize( 'parent_model_name, model_name, review_status', [ ('AHJ', 'Contact', 'A'), ('AHJInspection', 'Contact', 'A'), ('AHJ', 'EngineeringReviewRequirement', 'A'), ('AHJ', 'AHJInspection', 'A'), ('AHJ', 'DocumentSubmissionMethod', 'A'), ('AHJ', 'PermitIssueMethod', 'A'), ('AHJ', 'FeeStructure', 'A'), ('AHJ', 'Contact', 'R'), ('AHJInspection', 'Contact', 'R'), ('AHJ', 'EngineeringReviewRequirement', 'R'), ('AHJ', 'AHJInspection', 'R'), ('AHJ', 'DocumentSubmissionMethod', 'R'), ('AHJ', 'PermitIssueMethod', 'R'), ('AHJ', 'FeeStructure', 'R') ] ) @pytest.mark.django_db def test_edit_reset__edit_deletion(parent_model_name, model_name, review_status, create_user, create_minimal_obj, ahj_obj): user = create_user() parent_obj = create_minimal_obj(parent_model_name) obj = create_minimal_obj(model_name) relation = obj.create_relation_to(parent_obj) set_obj_field(relation, relation.get_relation_status_field(), review_status != 'A') edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': relation.__class__.__name__, 'SourceRow': relation.pk, 'SourceColumn': relation.get_relation_status_field(), 'OldValue': True, 'NewValue': False, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': review_status, 'IsApplied': review_status == 'A', 'EditType': 'A', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) assert views_edits.reset_edit(user, edit) edit = Edit.objects.get(EditID=edit.EditID) assert edit_is_pending(edit) assert get_obj_field(relation, relation.get_relation_status_field()) == edit_dict['OldValue'] @pytest.mark.django_db def test_edit_reset__edit_pending_do_nothing(create_user, ahj_obj): user = create_user() old_value = 'oldname' new_value = 'newname' set_obj_field(ahj_obj, 'AHJName', old_value) edit_dict = {'ChangedBy': user, 'ApprovedBy': None, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': 'AHJName', 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': None, 'ReviewStatus': 'P', 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) assert not views_edits.reset_edit(user, edit) edit_dict['OldValue'], edit_dict['NewValue'] = old_value, edit_dict['OldValue'] edit_dict['ReviewStatus'] = 'A' edit_dict['ApprovedBy'], edit_dict['DateEffective'] = user, timezone.now() assert not check_edit_exists(edit_dict) assert Edit.objects.all().count() == 1 @pytest.mark.parametrize( 'force_resettable, skip_undo', [ (True, False), (True, True) ] ) @pytest.mark.django_db def test_edit_reset__kwargs(force_resettable, skip_undo, create_user, ahj_obj): user = create_user() old_value = 'oldname' new_value = 'newname' later_value = 'newname_later' set_obj_field(ahj_obj, 'AHJName', later_value) edit_dict = {'ChangedBy': user, 'ApprovedBy': user, 'SourceTable': 'AHJ', 'SourceRow': ahj_obj.pk, 'SourceColumn': 'AHJName', 'OldValue': old_value, 'NewValue': new_value, 'DateRequested': timezone.now(), 'DateEffective': timezone.now(), 'ReviewStatus': 'A', 'IsApplied': True, 'EditType': 'U', 'AHJPK': ahj_obj} edit = Edit.objects.create(**edit_dict) edit_dict['OldValue'], edit_dict['NewValue'] = edit_dict['NewValue'], later_value later_edit = Edit.objects.create(**edit_dict) assert views_edits.reset_edit(user, edit, force_resettable=force_resettable, skip_undo=skip_undo) edit = Edit.objects.get(EditID=edit.EditID) if force_resettable and not skip_undo: assert get_obj_field(ahj_obj, 'AHJName') == old_value elif force_resettable and skip_undo: assert get_obj_field(ahj_obj, 'AHJName') == later_value assert edit.OldValue == later_value assert edit.NewValue == new_value assert edit_is_pending(edit)
nilq/baby-python
python
from flask import Blueprint, g, request, current_app import json import logging from ..utils import datetime_to_json, get_time_string, get_default_runtime, match_movie import datetime from ..pick_algo import pick_movies_by_num, pick_movies_by_time from .auth import login_required import pandas import pathlib from .. import db logger = logging.getLogger(__name__) bp = Blueprint('movies', __name__, url_prefix='/movie') @bp.route('/all', methods=['GET']) @login_required def get_all_movies(): user_id = g.user.id # user_id = 1 user_movies_map = db.query_user_movies_map(user_id) res = [] keys = ['likability', 'have_seen', 'comment', 'create_time'] movie_keys = ['id', 'name', 'rating'] if user_movies_map: for row in user_movies_map: temp = {k:getattr(row, k) for k in keys} movie = db.query_movie(row.movie_id) for key in movie_keys: temp[key] = getattr(movie, key) temp['runtime'] = get_default_runtime(movie.runtime).running_time temp['starring'] = [s.name for s in movie.starring] temp['genre'] = [g.genre for g in movie.genre] res.append(temp) data = {'statusCode':0, 'message':'query success', 'data':res} return json.dumps(data, default=datetime_to_json, ensure_ascii=False) @bp.route('/', methods=['POST']) @login_required def insert_one_movie(): r = request.get_json() if r is None: logger.warning('req_data is none, may be content-type is not application/json!') return {'statusCode': -1, 'message':'req data is not json'} req_params = {key:r.get(key) for key, _ in r.items()} if req_params.get('create_time') is not None: try: req_params['create_time'] = datetime.datetime.strptime(req_params.get('create_time'), '%Y-%m-%d %H:%M:%S') print(req_params['create_time']) except Exception as e: print(e) return {'statusCode': -1, 'message':'date format must match %Y-%m-%d %H:%M:%S'} user_id = g.user.id # user_id = 1 # 先去库中匹配电影,若匹配不到则创建一个,movie_id为匹配到的或新创建的movie temp_l = db.query_movie_match_name(req_params['name']) matcher = match_movie(temp_l, {'rating':req_params['rating'], 'runtime':req_params['runtime']}) movie_id = -1 if matcher == None: movie_id = db.insert_movie(req_params['name'], [db.RunningTime('default', int(req_params['runtime']))], req_params['rating'], starring=req_params['starring'], genre=req_params['genre']) else: movie_id = matcher.id db.insert_user_movie_map(user_id, movie_id, req_params['likability'], req_params['have_seen'], req_params['comment'], req_params['create_time']) data = db.query_movie_with_userinfo(user_id, movie_id) res = {'statusCode': 0, 'message':'insert movie success', 'data': data} return json.dumps(res, default=datetime_to_json, ensure_ascii=False) @bp.route('/', methods=['PUT']) @login_required def update_one_movie(): r = request.get_json() if r is None: logger.warning('req_data is none, may be content-type is not application/json!') return {'statusCode': -1, 'message':'req data is not json'} elif r.get('id') is None: logger.warning('update data does not contain id') print(r) return {'statusCode': -1, 'message':'update data must contain id'} r['movie_id'] = r['id'] del r['id'] db.update_user_movie_map(g.user.id, **r) return {'statusCode': 0, 'message':'update movie success'} @bp.route('/', methods=['DELETE']) @login_required def remove_one_movie(): movie_id = request.args.get('id', None) if id is None: logger.warning('id is None!') return {'statusCode': -1, 'message':'delete method request id param'} db.delete_user_movie_map(g.user.id, movie_id) return {'statusCode': 0, 'message':'remove movie success'} @bp.route('/pick', methods=['POST']) @login_required def pick_movie(): r = request.get_json() if r is None: logger.warning('req_data is none, may be content-type is not application/json!') return {'statusCode': -1, 'message':'req data is not json'} pick_type = r.get('type') data = r.get('data') if data.get('value') == '': logger.error('value can not be null') return {'statusCode': -1, 'message':'value can not be null'} if pick_type is None or data is None: logger.error('pick_type or data is null, parameter error') return {'statusCode': -1, 'message':'pick_type or data is null, parameter error'} movies_havent_seen = db.query_all_movies_havent_seen_by_userid(g.user.id) starrings = data.get('starring') genres = data.get('genre') def filter_by_starring_and_genre(row): for s in starrings: if row['starring'] is None: return False temp = db.query_starring(s) if temp is None: return False elif temp.name not in row['starring']: return False for g in genres: if row['genre'] is None: return False temp = db.query_genre(g) if temp is None: return False elif temp.genre not in row['genre']: return False return True movies_input = list(filter(filter_by_starring_and_genre, movies_havent_seen)) # type=1, pick by time; type=2, pick by num pick_res = [] if pick_type == 1: pick_res = pick_movies_by_time(int(data.get('value')), movies_input) elif pick_type == 2: pick_res = pick_movies_by_num(int(data.get('value')), movies_input) res = {'statusCode': 0, 'message':'pick successful', 'data': pick_res} return json.dumps(res, default=datetime_to_json, ensure_ascii=False) @bp.route('/export', methods=['GET']) @login_required def export_movies_data(): userid = g.user.id movies = db.query_all_movies_with_userinfo(userid) export_filename = '' if movies: field_list = ['id', 'name', 'rating', 'starring', 'genre', 'runtime', 'likability', 'have_seen', 'comment', 'create_time'] movies_input = [] for m in movies: temp = {k:m.get(k) for k in field_list} movies_input.append(temp) df = pandas.DataFrame(movies_input, columns=field_list) columns_to_drop = ['id'] for col in columns_to_drop: del df[col] # print(df) def convert_list(m): if m: return '/'.join(m) return def convert_haveseen(have_seen): if have_seen == True: return '是' elif have_seen == False: return '否' return '' df['starring'] = df['starring'].apply(convert_list) df['genre'] = df['genre'].apply(convert_list) df['have_seen'] = df['have_seen'].apply(convert_haveseen) time_string = get_time_string() export_filename = f'{userid}-export-{time_string}.xlsx' export_path = pathlib.Path(current_app.config['DOWNLOAD_FOLDER']) if export_path.exists() is False: export_path.mkdir() df.to_excel(export_path.joinpath(export_filename)) else: return {'statusCode': 0, 'message':'there are no movies'} return {'statusCode': 0, 'message':'export successful', 'data': {'filename': export_filename}} @bp.route('/starrings', methods=['GET']) @login_required def get_starrings(): filter_args = request.args.get('filter') starrings = [] if filter_args is None: starrings = db.query_all_starring() else: starrings = db.query_starring_by_filter(filter_args) res = [] if starrings: keys = starrings[0].field_list for row in starrings: temp = {k:getattr(row, k) for k in keys} res.append(temp) data = {'statusCode':0, 'message':'query success', 'data':res} return data @bp.route('/genres', methods=['GET']) @login_required def get_genres(): filter_args = request.args.get('filter') genres = [] if filter_args is None: genres = db.query_all_genre() else: genres = db.query_genre_by_filter(filter_args) res = [] if genres: keys = genres[0].field_list for row in genres: temp = {k:getattr(row, k) for k in keys} res.append(temp) data = {'statusCode':0, 'message':'query success', 'data':res} return data @bp.route('/movie', methods=['GET']) @login_required def get_match_movie(): match_q = request.args.get('match') if match_q is None: logger.warning('match is none, may be content-type is not application/json!') return {'statusCode': -1, 'message':'parameter match is required'} match_res = db.query_movie_match_name(match_q) keys = ['id', 'name', 'starring', 'genre', 'rating', 'runtime'] def filter_field(movie:db.Movie): temp = {k:getattr(movie,k) for k in keys} temp['starring'] = [s.name for s in movie.starring] temp['genre'] = [g.genre for g in movie.genre] temp['runtime'] = get_default_runtime(movie.runtime).running_time return temp map_res = list(map(filter_field, match_res)) data = {'statusCode':0, 'message':'query success', 'data': map_res} return data
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # from "SuperShape2D" (Daniel Shiffman) # Video: https://youtu.be/ksRoh-10lak # supershapes: http://paulbourke.net/geometry/supershape/ import sys, os from PyQt5.QtWidgets import * from PyQt5.QtGui import * from PyQt5.QtCore import * import math import numpy as np def mapFromTo(x, a, b, c, d): """map() function of javascript""" y = (float(x) - float(a))/(float(b) - float(a)) * \ (float(d) - float(c)) + float(c) return y class SuperShape(QWidget): def __init__(self, parent=None, nstars=500): QWidget.__init__(self, parent) self.myTimerId = None self.setWindowTitle("Coding Train - Supershape2D") self.setFixedSize(400, 400) # black background p = self.palette() p.setColor(self.backgroundRole(), Qt.black) self.setAutoFillBackground(True) self.setPalette(p) # parameters self.n1 = 0.3 self.n2 = 0.3 self.n3 = 0.3 self.m = 5 self.a = 1 self.b = 1 self.radius = 100 def paintEvent(self, event): painter = QPainter(self) painter.translate(self.width()/2, self.height()/2) painter.setPen(Qt.white) #painter.setBrush(Qt.NoBrush) painter.setBrush(Qt.darkGray) total = 200 increment = 2 * math.pi/total points = [] for angle in np.arange(0, 2 * math.pi, increment): r = self.supershape(angle) x = self.radius * r * math.cos(angle) y = self.radius * r * math.sin(angle) points.append(QPoint(x, y)) painter.drawPolygon(QPolygon(points)) # write some info painter.resetTransform() font = painter.font() font.setPixelSize(10) painter.setFont(font) text='' for var in ['m','a','b','n1','n2','n3']: text += '%s = %f\n' % (var, getattr(self,var)) rectangle = painter.viewport().adjusted(10,10,-20,-20) boundingRect = painter.drawText(rectangle, 0, text) def supershape(self, theta): part1 = (1.0 / self.a) * math.cos(theta * self.m / 4.0) part1 = abs(part1) part1 = math.pow(part1, self.n2) part2 = (1.0 / self.b) * math.sin(theta * self.m / 4.0) part2 = abs(part2) part2 = math.pow(part2, self.n3) part3 = math.pow(part1 + part2, 1/self.n1) if part3 == 0.0: return 0.0 return 1.0 / part3 class Window(QWidget): def __init__(self): QWidget.__init__(self) self.initUI() def buildSlider(self, widget, rmin, rmax, stp, name): slider = QSlider(Qt.Horizontal) slider.setMinimumWidth(200) slider.setRange(0, stp) slider.setValue( float(getattr(widget, name) -rmin) /(rmax-rmin) * stp ) slider.valueChanged.connect(lambda x: setattr(widget, name, rmin+x*float(rmax-rmin)/stp)) slider.valueChanged.connect(lambda x: widget.repaint()) return slider def initUI(self): iconfile = os.path.join(os.path.dirname(__file__), 'coding_train_icon.png') self.setWindowIcon(QIcon(iconfile)) widget = SuperShape() vbox = QFormLayout() vbox.addRow("m", self.buildSlider(widget, rmin=0, rmax=10, stp=100, name='m')) vbox.addRow("a", self.buildSlider(widget, rmin=1, rmax=10, stp=100, name='a')) vbox.addRow("b", self.buildSlider(widget, rmin=1, rmax=10, stp=100, name='b')) vbox.addRow("n1", self.buildSlider(widget, rmin=0.1, rmax=1, stp=100, name='n1')) vbox.addRow("n2", self.buildSlider(widget, rmin=0.1, rmax=1, stp=100, name='n2')) vbox.addRow("n3", self.buildSlider(widget, rmin=0.1, rmax=1, stp=100, name='n3')) vbox.addRow("radius", self.buildSlider(widget, rmin=1, rmax=500, stp=500, name='radius')) hbox = QHBoxLayout() hbox.addWidget(widget) hbox.addLayout(vbox) self.setLayout(hbox) if __name__ == '__main__': app = QApplication(sys.argv) ex = Window() ex.show() sys.exit(app.exec_())
nilq/baby-python
python
"""Convert all the old posts. Author: Alex Alemi Date: 2019-01-23 """ import os import logging CURRENT_DIR = os.path.dirname(__file__) POSTS_DIR = os.path.normpath(os.path.join(CURRENT_DIR, '../posts/old')) def fix_front(line): """Redo the front of the metadata lines for the nikola format.""" return '.. ' + line[0].lower() + line[1:] def has_math(lines): """Test if math appears anywhere in the post.""" for line in lines: if '$$' in line: return True elif '$' in line: return True return False def fix_preamble(lines): """Convert the preamble to the correct form.""" # get the first empty line first_empty_line = lines.index('\n') if first_empty_line == 0: raise Exception() preamble = [fix_front(line) for line in lines[:first_empty_line]] if has_math(lines): preamble.append('.. has_math: true\n') lines = ['<--\n'] + preamble + ['-->\n'] + lines[first_empty_line:] return lines def fix_static(lines): """Fix image links to handle new static path.""" def fix_static_line(line): return line.replace('/static/images', '/images') return [fix_static_line(line) for line in lines] def transform(filepath): """Transform a file.""" with open(filepath, 'r') as f: lines = f.readlines() try: lines = fix_preamble(lines) lines = fix_static(lines) except Exception: logging.exception(f'Error on {filepath}') raise return lines if __name__ == "__main__": if not os.path.exists(POSTS_DIR): os.makedirs(POSTS_DIR) for subdir, dirs, files in os.walk(os.path.join(CURRENT_DIR, "../content.bk/old")): for file in files: filepath = os.path.normpath(os.path.join(subdir, file)) if filepath.endswith(".md"): print(f"Processing {filepath}") transformed_lines = transform(filepath) new_filepath = os.path.join(POSTS_DIR, file) with open(new_filepath, 'w') as f: f.writelines(transformed_lines) print(f"Wrote {new_filepath}")
nilq/baby-python
python
# Generated by Django 2.2.24 on 2021-07-26 14:50 import django.core.validators from django.db import migrations, models def split_dates(apps, schema_editor): CompanyObjective = apps.get_model('exportplan', 'CompanyObjectives') for objective in CompanyObjective.objects.all(): if objective.start_date: objective.start_month = objective.start_date.month objective.start_year = objective.start_date.year if objective.end_date: objective.end_month = objective.end_date.month objective.end_year = objective.end_date.year objective.save() class Migration(migrations.Migration): dependencies = [ ('exportplan', '0038_auto_20210614_1506'), ] operations = [ migrations.AddField( model_name='companyobjectives', name='end_month', field=models.IntegerField(blank=True, null=True, validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(12)]), ), migrations.AddField( model_name='companyobjectives', name='end_year', field=models.IntegerField(blank=True, null=True, validators=[ django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(9999)]), ), migrations.AddField( model_name='companyobjectives', name='start_month', field=models.IntegerField(blank=True, null=True, validators=[ django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(12)]), ), migrations.AddField( model_name='companyobjectives', name='start_year', field=models.IntegerField(blank=True, null=True, validators=[ django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(9999)]), ), migrations.RunPython(split_dates), ]
nilq/baby-python
python
"""https://gist.github.com/alopes/5358189""" stopwords = [ "de", "a", "o", "que", "e", "do", "da", "em", "um", "para", "é", "com", "não", "uma", "os", "no", "se", "na", "por", "mais", "as", "dos", "como", "mas", "foi", "ao", "ele", "das", "tem", "à", "seu", "sua", "ou", "ser", "quando", "muito", "há", "nos", "já", "está", "eu", "também", "só", "pelo", "pela", "até", "isso", "ela", "entre", "era", "depois", "sem", "mesmo", "aos", "ter", "seus", "quem", "nas", "me", "esse", "eles", "estão", "você", "tinha", "foram", "essa", "num", "nem", "suas", "meu", "às", "minha", "têm", "numa", "pelos", "elas", "havia", "seja", "qual", "será", "nós", "tenho", "lhe", "deles", "essas", "esses", "pelas", "este", "fosse", "dele", "tu", "te", "vocês", "vos", "lhes", "meus", "minhas", "teu", "tua", "teus", "tuas", "nosso", "nossa", "nossos", "nossas", "dela", "delas", "esta", "estes", "estas", "aquele", "aquela", "aqueles", "aquelas", "isto", "aquilo", "estou", "está", "estamos", "estão", "estive", "esteve", "estivemos", "estiveram", "estava", "estávamos", "estavam", "estivera", "estivéramos", "esteja", "estejamos", "estejam", "estivesse", "estivéssemos", "estivessem", "estiver", "estivermos", "estiverem", "hei", "há", "havemos", "hão", "houve", "houvemos", "houveram", "houvera", "houvéramos", "haja", "hajamos", "hajam", "houvesse", "houvéssemos", "houvessem", "houver", "houvermos", "houverem", "houverei", "houverá", "houveremos", "houverão", "houveria", "houveríamos", "houveriam", "sou", "somos", "são", "era", "éramos", "eram", "fui", "foi", "fomos", "foram", "fora", "fôramos", "seja", "sejamos", "sejam", "fosse", "fôssemos", "fossem", "for", "formos", "forem", "serei", "será", "seremos", "serão", "seria", "seríamos", "seriam", "tenho", "tem", "temos", "tém", "tinha", "tínhamos", "tinham", "tive", "teve", "tivemos", "tiveram", "tivera", "tivéramos", "tenha", "tenhamos", "tenham", "tivesse", "tivéssemos", "tivessem", "tiver", "tivermos", "tiverem", "terei", "terá", "teremos", "terão", "teria", "teríamos", "teriam", ]
nilq/baby-python
python
import pygame from . import GameEnv, GameEnv_Simple, Ball, Robot, Goal from typing import Tuple, List, Dict import random class AbstractPlayer: def __init__(self, env: GameEnv, robot: Robot): self.env = env self.robot = robot def get_action(self) -> Tuple[float, float]: raise Exception("Override this in the child class.") class OG_Twitchy(AbstractPlayer): def get_action(self) -> Tuple[float, float]: rando = random.random() # ~5% chance to turn left or right, 45% chance to go forward/back if rando <= 0.05: # turn left action = (-1, 1) elif rando <= 0.5: # go straight action = (1, 1) elif rando < 0.95: # go back action = (-1, -1) else: # turn right action = (1, -1) return action class Human(AbstractPlayer): def __init__(self, env: GameEnv, robot: Robot, key_left=pygame.K_a, key_right=pygame.K_d, key_forwards=pygame.K_w, key_backwards=pygame.K_s): super(Human, self).__init__(env, robot) self.key_left = key_left self.key_right = key_right self.key_forwards = key_forwards self.key_backwards = key_backwards def get_action(self) -> Tuple[float, float]: pygame.event.get() # If you don't call this first, doesn't work... worth investigating at some point # Process player input dctKeyDown = pygame.key.get_pressed() lngLThrust = 0 lngRThrust = 0 if dctKeyDown[self.key_forwards]: lngLThrust += 1 lngRThrust += 1 if dctKeyDown[self.key_backwards]: lngLThrust -= 1 lngRThrust -= 1 if dctKeyDown[self.key_left]: lngLThrust -= 1 lngRThrust += 1 if dctKeyDown[self.key_right]: lngLThrust += 1 lngRThrust -= 1 return (lngLThrust, lngRThrust) class DistantHuman(Human): def __init__(self, env: GameEnv, robot: Robot): super(Human, self).__init__(env, robot) raise NotImplementedError("SOMEBODY SHOULD TOTALLY MAKE A CLIENT/SERVER PLAYER THO")
nilq/baby-python
python
class Solution: def XXX(self, head: ListNode) -> ListNode: try: new_head = new_tail = ListNode(head.val) p = head.next while p: if new_tail.val != p.val: node = ListNode(p.val) new_tail.next = node new_tail = node p = p.next return new_head except: return head
nilq/baby-python
python
from PIL import Image # Charger l'image img = Image.open('/home/popschool/Documents/GitHub/projet_recoplante/Images_test/bruyere_des_marais_NB.jpg') # Afficher l'image chargée img.show() # Récupérer et afficher la taille de l'image (en pixels) w, h = img.size print("Largeur : {} px, hauteur : {} px".format(w, h)) # Afficher son mode de quantification print("Format des pixels : {}".format(img.mode)) # Récupérer et afficher la valeur du pixel à une position précise px_value = img.getpixel((20,100)) print("Valeur du pixel situé en (20,100) : {}".format(px_value)) import numpy as np # Récupérer les valeurs de tous les pixels sous forme d'une matrice mat = np.array(img) mat # Afficher la taille de la matrice de pixels print("Taille de la matrice de pixels : {}".format(mat.shape))
nilq/baby-python
python
import unittest from Config import Config from MossResultsRetriever import MossResultsRetriever from Result import Result class MossURLsTests(unittest.TestCase): def setUp(self): self.config = Config() self.validUrl = self.config.getMagicsquare() self.retriever = MossResultsRetriever() self.results = Result(1, "f1", "f2", "url", 40, 50, 60) # # isValidUrl() # # Test a valid URL def test_validUrl(self): url = self.validUrl self.assertTrue(self.retriever.isValidUrl(url)) # Test the same URL twice, which is considered a valid submission def test_validSameUrl(self): url = self.validUrl self.assertTrue(self.retriever.isValidUrl(url)) self.assertTrue(self.retriever.isValidUrl(url)) # Test an invalid String def test_invalidUrlString(self): url = "notURL" self.assertFalse(self.retriever.isValidUrl(url)) # Test an int def test_invalidUrlInt(self): url = 1 self.assertFalse(self.retriever.isValidUrl(url)) # Test a double def test_invalidUrlDouble(self): url = 0.5 self.assertFalse(self.retriever.isValidUrl(url)) # Test None def test_invalidUrlNone(self): url = None self.assertFalse(self.retriever.isValidUrl(url)) # Test empty list def test_invalidUrlListEmpty(self): url = [] self.assertFalse(self.retriever.isValidUrl(url)) # Test a list with valid URLs as entries def test_invalidUrlListOfUrls(self): url = [self.validUrl, self.validUrl, self.validUrl] self.assertFalse(self.retriever.isValidUrl(url)) # Test an invalid URL like MOSSS def test_invalidUrlLikeMoss(self): url = "http://moss.stanford.edu/results/12121212121212/" self.assertFalse(self.retriever.isValidUrl(url)) # Test a URL that's two valid URLs appended together def test_invalidUrlTwoAppended(self): url = self.validUrl + self.validUrl self.assertFalse(self.retriever.isValidUrl(url)) # Test a valid URL that isn't MOSS def test_validUrlNotMoss(self): url = "https://google.com" self.assertFalse(self.retriever.isValidUrl(url)) # Test a valid URL with space def test_validUrlWithSpace(self): url = " " + self.validUrl + " " self.assertFalse(self.retriever.isValidUrl(url)) # Test a valid URL with new line def test_validUrlWithNewLine(self): url = "\n" + self.validUrl + "\n" self.assertFalse(self.retriever.isValidUrl(url)) # # isValidUrlList() # # Test int def test_isValidUrlListInt(self): urls = 1 isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, "argument " + str(urls) + " is not a valid list") # Test double def test_isValidUrlListDouble(self): urls = 0.5 isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, "argument " + str(urls) + " is not a valid list") # Test empty string def test_isValidUrlListString(self): urls = " " isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, "argument " + str(urls) + " is not a valid list") # Test single, valid url string def test_isValidUrlListValidUrl(self): urls = self.validUrl isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, "argument " + str(urls) + " is not a valid list") # Test None def test_isValidUrlListNone(self): urls = None isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, "argument " + str(urls) + " is not a valid list") # Test empty list def test_isValidUrlListEmptyList(self): urls = [] isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, "argument " + str(urls) + " is not a valid list") # Test list of ints def test_isValidUrlListIntList(self): urls = [1, 1, 1] isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, 1) # Test list of doubles def test_isValidUrlListDoublesList(self): urls = [0.5, 0.5, 0.5] isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, 0.5) # Test list of Nones def test_isValidUrlListNoneList(self): urls = [None, None, None] isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, None) # Test list of lists def test_isValidUrlListOfLists(self): urls = [[], [], []] isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, []) # Test mixed list def test_isValidUrlListMixed(self): urls = [" ", 1, None, 0.5] isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, " ") # Test mixed list with valid url def test_isValidUrlListMixedWithValid(self): urls = [self.validUrl, " ", 1, None, 0.5] isValid, url = self.retriever.isValidUrlList(urls) self.assertFalse(isValid) self.assertEqual(url, " ") # Test single valid def test_isValidUrlListSingleValid(self): urls = [self.validUrl] isValid, url = self.retriever.isValidUrlList(urls) self.assertTrue(isValid) self.assertEqual(url, "success") # Test multiple valid def test_isValidUrlListMultipleValid(self): urls = [self.config.getMagicsquare(), self.config.getTwentyone(), self.config.getTwentyone()] isValid, url = self.retriever.isValidUrlList(urls) self.assertTrue(isValid) self.assertEqual(url, "success") # Test multiple valid with duplicates def test_isValidUrlListMultipleValidDuplicates(self): urls = [self.config.getMagicsquare(), self.config.getTwentyone(), self.config.getTwentyone(), self.config.getMagicsquare(), self.config.getTwentyone(), self.config.getTwentyone()] isValid, url = self.retriever.isValidUrlList(urls) self.assertTrue(isValid) self.assertEqual(url, "success") # # appendUrl() # # Test a valid URL def test_appendValidUrl(self): url = self.validUrl self.retriever.appendUrl(url) self.assertTrue(url in self.retriever.urls) # Test the same URL twice, which is considered a valid submission def test_appendValidSameUrl(self): url = self.validUrl self.retriever.appendUrl(url) self.retriever.appendUrl(url) self.assertTrue(url in self.retriever.urls) self.assertEqual(self.retriever.urls.count(url), 1) self.assertNotEqual(self.retriever.urls.count(url), 2) # Test an invalid String def test_appendInvalidUrlString(self): url = "notURL" self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # Test an int def test_appendInvalidUrlInt(self): url = 1 self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # Test a double def test_appendInvalidUrlDouble(self): url = 0.5 self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # Test None def test_appendInvalidUrlNone(self): url = None self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # Test empty list def test_appendInvalidUrlEmptyList(self): url = [] self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # Test a list with valid URLs as entries def test_appendInvalidUrlListOfUrls(self): url = [self.validUrl, self.validUrl, self.validUrl] self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # Test an invalid URL like MOSSS def test_appendInvalidUrlLikeMoss(self): url = "http://moss.stanford.edu/results/12121212121212/" self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # Test a URL that's two valid URLs appended together def test_appendInvalidUrlTwoAppended(self): url = self.validUrl + self.validUrl self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # Test a valid URL that isn't MOSS def test_appendValidUrlNotMoss(self): url = "https://google.com" self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # Test a valid URL with space def test_appendValidUrlWithSpace(self): url = " " + self.validUrl + " " self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # Test a valid URL with new line def test_appendValidUrlWithNewLine(self): url = "\n" + self.validUrl + "\n" self.retriever.appendUrl(url) self.assertFalse(url in self.retriever.urls) # # populateResults() # def test_populateResultsOneUrl(self): self.retriever.urls = [self.config.getTwentyone()] self.retriever.populateResults() self.assertNotEqual(len(self.retriever.results), 0) def test_populateResultsMultipleUrls(self): self.retriever.urls = [self.config.getTwentyone(), self.config.getMagicsquare(), self.config.getPalindrome()] self.retriever.populateResults() self.assertGreater(len(self.retriever.results), 3) # # getDuplicateUrls() # # Test int def test_getDuplicateUrlsInt(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls(1) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test double def test_getDuplicateUrlsDouble(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls(0.5) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test empty string def test_getDuplicateUrlsString(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls(" ") self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test single, valid url string def test_getDuplicateUrlsValidUrl(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls(self.validUrl) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test None def test_getDuplicateUrlsNone(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls(None) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test empty list def test_getDuplicateUrlsEmptyList(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls([]) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test list of ints def test_getDuplicateUrlsIntList(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls([1, 1, 1]) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test list of doubles def test_getDuplicateUrlsDoubleList(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls([0.5, 0.5, 0.5]) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test list of Nones def test_getDuplicateUrlsNoneList(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls([None, None, None]) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test list of lists def test_getDuplicateUrlsListOfLists(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls([[], [], []]) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test mixed list def test_getDuplicateUrlsMixedList(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls([1, " ", 0.5, None]) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test mixed list with valid url def test_getDuplicateUrlsMixedListWithValidUrl(self): duplicates, nonDuplicates = self.retriever.getDuplicateUrls([self.validUrl, " ", 1]) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, []) # Test no duplicates def test_getDuplicateUrlsNoDuplicates(self): urls = [self.config.getMagicsquare(), self.config.getPalindrome(), self.config.getTwentyone()] duplicates, nonDuplicates = self.retriever.getDuplicateUrls(urls) self.assertListEqual(duplicates, []) self.assertListEqual(nonDuplicates, [self.config.getMagicsquare(), self.config.getPalindrome(), self.config.getTwentyone()]) # Test one duplicate def test_getDuplicateUrlsOneDuplicate(self): urls = [self.config.getMagicsquare(), self.config.getPalindrome(), self.config.getTwentyone(), self.config.getMagicsquare()] duplicates, nonDuplicates = self.retriever.getDuplicateUrls(urls) self.assertListEqual(duplicates, [self.config.getMagicsquare()]) self.assertListEqual(nonDuplicates, [self.config.getMagicsquare(), self.config.getPalindrome(), self.config.getTwentyone()]) # Test all duplicates def test_getDuplicateUrlsAllDuplicate(self): urls = [self.config.getMagicsquare(), self.config.getPalindrome(), self.config.getTwentyone(), self.config.getMagicsquare(), self.config.getPalindrome(), self.config.getTwentyone()] duplicates, nonDuplicates = self.retriever.getDuplicateUrls(urls) self.assertListEqual(duplicates, [self.config.getMagicsquare(), self.config.getPalindrome(), self.config.getTwentyone()]) self.assertListEqual(nonDuplicates, [self.config.getMagicsquare(), self.config.getPalindrome(), self.config.getTwentyone()]) # # resultsAreValid() # # Tests all the correct types for Result object def test_validData(self): self.retriever.results =[self.results, self.results] self.assertTrue(self.retriever.resultsAreValid()) # Tests all the incorrect types for Result object def test_invalidData(self): self.results.fileOne = 1 self.results.fileTwo = 2 self.results.fileOnePercent = "52" self.results.fileTwoPercent = "58" self.results.url = 51 self.retriever.results = [self.results, self.results] self.assertFalse(self.retriever.resultsAreValid()) def tearDown(self): self.retriever = None self.results = None if __name__ == '__main__': unittest.main()
nilq/baby-python
python
import io import os.path import shutil import sys import tempfile import re import unittest from types import ModuleType from typing import Any, List, Tuple, Optional from mypy.test.helpers import ( assert_equal, assert_string_arrays_equal, local_sys_path_set ) from mypy.test.data import DataSuite, DataDrivenTestCase from mypy.errors import CompileError from mypy.stubgen import ( generate_stubs, parse_options, Options, collect_build_targets, mypy_options, is_blacklisted_path, is_non_library_module ) from mypy.stubutil import walk_packages, remove_misplaced_type_comments, common_dir_prefix from mypy.stubgenc import generate_c_type_stub, infer_method_sig, generate_c_function_stub from mypy.stubdoc import ( parse_signature, parse_all_signatures, build_signature, find_unique_signatures, infer_sig_from_docstring, infer_prop_type_from_docstring, FunctionSig, ArgSig, infer_arg_sig_from_docstring, is_valid_type ) from mypy.moduleinspect import ModuleInspect, InspectError class StubgenCmdLineSuite(unittest.TestCase): """Test cases for processing command-line options and finding files.""" @unittest.skipIf(sys.platform == 'win32', "clean up fails on Windows") def test_files_found(self) -> None: current = os.getcwd() with tempfile.TemporaryDirectory() as tmp: try: os.chdir(tmp) os.mkdir('subdir') self.make_file('subdir', 'a.py') self.make_file('subdir', 'b.py') os.mkdir(os.path.join('subdir', 'pack')) self.make_file('subdir', 'pack', '__init__.py') opts = parse_options(['subdir']) py_mods, c_mods = collect_build_targets(opts, mypy_options(opts)) assert_equal(c_mods, []) files = {mod.path for mod in py_mods} assert_equal(files, {os.path.join('subdir', 'pack', '__init__.py'), os.path.join('subdir', 'a.py'), os.path.join('subdir', 'b.py')}) finally: os.chdir(current) @unittest.skipIf(sys.platform == 'win32', "clean up fails on Windows") def test_packages_found(self) -> None: current = os.getcwd() with tempfile.TemporaryDirectory() as tmp: try: os.chdir(tmp) os.mkdir('pack') self.make_file('pack', '__init__.py', content='from . import a, b') self.make_file('pack', 'a.py') self.make_file('pack', 'b.py') opts = parse_options(['-p', 'pack']) py_mods, c_mods = collect_build_targets(opts, mypy_options(opts)) assert_equal(c_mods, []) files = {os.path.relpath(mod.path or 'FAIL') for mod in py_mods} assert_equal(files, {os.path.join('pack', '__init__.py'), os.path.join('pack', 'a.py'), os.path.join('pack', 'b.py')}) finally: os.chdir(current) @unittest.skipIf(sys.platform == 'win32', "clean up fails on Windows") def test_module_not_found(self) -> None: current = os.getcwd() captured_output = io.StringIO() sys.stdout = captured_output with tempfile.TemporaryDirectory() as tmp: try: os.chdir(tmp) self.make_file(tmp, 'mymodule.py', content='import a') opts = parse_options(['-m', 'mymodule']) py_mods, c_mods = collect_build_targets(opts, mypy_options(opts)) assert captured_output.getvalue() == '' finally: sys.stdout = sys.__stdout__ os.chdir(current) def make_file(self, *path: str, content: str = '') -> None: file = os.path.join(*path) with open(file, 'w') as f: f.write(content) def run(self, result: Optional[Any] = None) -> Optional[Any]: with local_sys_path_set(): return super().run(result) class StubgenCliParseSuite(unittest.TestCase): def test_walk_packages(self) -> None: with ModuleInspect() as m: assert_equal( set(walk_packages(m, ["mypy.errors"])), {"mypy.errors"}) assert_equal( set(walk_packages(m, ["mypy.errors", "mypy.stubgen"])), {"mypy.errors", "mypy.stubgen"}) all_mypy_packages = set(walk_packages(m, ["mypy"])) self.assertTrue(all_mypy_packages.issuperset({ "mypy", "mypy.errors", "mypy.stubgen", "mypy.test", "mypy.test.helpers", })) class StubgenUtilSuite(unittest.TestCase): """Unit tests for stubgen utility functions.""" def test_parse_signature(self) -> None: self.assert_parse_signature('func()', ('func', [], [])) def test_parse_signature_with_args(self) -> None: self.assert_parse_signature('func(arg)', ('func', ['arg'], [])) self.assert_parse_signature('do(arg, arg2)', ('do', ['arg', 'arg2'], [])) def test_parse_signature_with_optional_args(self) -> None: self.assert_parse_signature('func([arg])', ('func', [], ['arg'])) self.assert_parse_signature('func(arg[, arg2])', ('func', ['arg'], ['arg2'])) self.assert_parse_signature('func([arg[, arg2]])', ('func', [], ['arg', 'arg2'])) def test_parse_signature_with_default_arg(self) -> None: self.assert_parse_signature('func(arg=None)', ('func', [], ['arg'])) self.assert_parse_signature('func(arg, arg2=None)', ('func', ['arg'], ['arg2'])) self.assert_parse_signature('func(arg=1, arg2="")', ('func', [], ['arg', 'arg2'])) def test_parse_signature_with_qualified_function(self) -> None: self.assert_parse_signature('ClassName.func(arg)', ('func', ['arg'], [])) def test_parse_signature_with_kw_only_arg(self) -> None: self.assert_parse_signature('ClassName.func(arg, *, arg2=1)', ('func', ['arg', '*'], ['arg2'])) def test_parse_signature_with_star_arg(self) -> None: self.assert_parse_signature('ClassName.func(arg, *args)', ('func', ['arg', '*args'], [])) def test_parse_signature_with_star_star_arg(self) -> None: self.assert_parse_signature('ClassName.func(arg, **args)', ('func', ['arg', '**args'], [])) def assert_parse_signature(self, sig: str, result: Tuple[str, List[str], List[str]]) -> None: assert_equal(parse_signature(sig), result) def test_build_signature(self) -> None: assert_equal(build_signature([], []), '()') assert_equal(build_signature(['arg'], []), '(arg)') assert_equal(build_signature(['arg', 'arg2'], []), '(arg, arg2)') assert_equal(build_signature(['arg'], ['arg2']), '(arg, arg2=...)') assert_equal(build_signature(['arg'], ['arg2', '**x']), '(arg, arg2=..., **x)') def test_parse_all_signatures(self) -> None: assert_equal(parse_all_signatures(['random text', '.. function:: fn(arg', '.. function:: fn()', ' .. method:: fn2(arg)']), ([('fn', '()'), ('fn2', '(arg)')], [])) def test_find_unique_signatures(self) -> None: assert_equal(find_unique_signatures( [('func', '()'), ('func', '()'), ('func2', '()'), ('func2', '(arg)'), ('func3', '(arg, arg2)')]), [('func', '()'), ('func3', '(arg, arg2)')]) def test_infer_sig_from_docstring(self) -> None: assert_equal(infer_sig_from_docstring('\nfunc(x) - y', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x')], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nfunc(x, Y_a=None)', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x'), ArgSig(name='Y_a', default=True)], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nfunc(x, Y_a=3)', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x'), ArgSig(name='Y_a', default=True)], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nfunc(x, Y_a=[1, 2, 3])', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x'), ArgSig(name='Y_a', default=True)], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nafunc(x) - y', 'func'), []) assert_equal(infer_sig_from_docstring('\nfunc(x, y', 'func'), []) assert_equal(infer_sig_from_docstring('\nfunc(x=z(y))', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', default=True)], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nfunc x', 'func'), []) # Try to infer signature from type annotation. assert_equal(infer_sig_from_docstring('\nfunc(x: int)', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type='int')], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nfunc(x: int=3)', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type='int', default=True)], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nfunc(x: int=3) -> int', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type='int', default=True)], ret_type='int')]) assert_equal(infer_sig_from_docstring('\nfunc(x: int=3) -> int \n', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type='int', default=True)], ret_type='int')]) assert_equal(infer_sig_from_docstring('\nfunc(x: Tuple[int, str]) -> str', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type='Tuple[int,str]')], ret_type='str')]) assert_equal( infer_sig_from_docstring('\nfunc(x: Tuple[int, Tuple[str, int], str], y: int) -> str', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type='Tuple[int,Tuple[str,int],str]'), ArgSig(name='y', type='int')], ret_type='str')]) assert_equal(infer_sig_from_docstring('\nfunc(x: foo.bar)', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type='foo.bar')], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nfunc(x: list=[1,2,[3,4]])', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type='list', default=True)], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nfunc(x: str="nasty[")', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type='str', default=True)], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nfunc[(x: foo.bar, invalid]', 'func'), []) assert_equal(infer_sig_from_docstring('\nfunc(x: invalid::type<with_template>)', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type=None)], ret_type='Any')]) assert_equal(infer_sig_from_docstring('\nfunc(x: str="")', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x', type='str', default=True)], ret_type='Any')]) def test_infer_sig_from_docstring_duplicate_args(self) -> None: assert_equal(infer_sig_from_docstring('\nfunc(x, x) -> str\nfunc(x, y) -> int', 'func'), [FunctionSig(name='func', args=[ArgSig(name='x'), ArgSig(name='y')], ret_type='int')]) def test_infer_sig_from_docstring_bad_indentation(self) -> None: assert_equal(infer_sig_from_docstring(""" x x x """, 'func'), None) def test_infer_arg_sig_from_docstring(self) -> None: assert_equal(infer_arg_sig_from_docstring("(*args, **kwargs)"), [ArgSig(name='*args'), ArgSig(name='**kwargs')]) assert_equal( infer_arg_sig_from_docstring( "(x: Tuple[int, Tuple[str, int], str]=(1, ('a', 2), 'y'), y: int=4)"), [ArgSig(name='x', type='Tuple[int,Tuple[str,int],str]', default=True), ArgSig(name='y', type='int', default=True)]) def test_infer_prop_type_from_docstring(self) -> None: assert_equal(infer_prop_type_from_docstring('str: A string.'), 'str') assert_equal(infer_prop_type_from_docstring('Optional[int]: An int.'), 'Optional[int]') assert_equal(infer_prop_type_from_docstring('Tuple[int, int]: A tuple.'), 'Tuple[int, int]') assert_equal(infer_prop_type_from_docstring('\nstr: A string.'), None) def test_infer_sig_from_docstring_square_brackets(self) -> None: assert infer_sig_from_docstring( 'fetch_row([maxrows, how]) -- Fetches stuff', 'fetch_row', ) == [] def test_remove_misplaced_type_comments_1(self) -> None: good = """ \u1234 def f(x): # type: (int) -> int def g(x): # type: (int) -> int def h(): # type: () int x = 1 # type: int """ assert_equal(remove_misplaced_type_comments(good), good) def test_remove_misplaced_type_comments_2(self) -> None: bad = """ def f(x): # type: Callable[[int], int] pass # type: "foo" # type: 'bar' x = 1 # type: int """ bad_fixed = """ def f(x): pass x = 1 """ assert_equal(remove_misplaced_type_comments(bad), bad_fixed) def test_remove_misplaced_type_comments_3(self) -> None: bad = ''' def f(x): """docstring""" # type: (int) -> int pass def g(x): """docstring """ # type: (int) -> int pass ''' bad_fixed = ''' def f(x): """docstring""" pass def g(x): """docstring """ pass ''' assert_equal(remove_misplaced_type_comments(bad), bad_fixed) def test_remove_misplaced_type_comments_4(self) -> None: bad = """ def f(x): '''docstring''' # type: (int) -> int pass def g(x): '''docstring ''' # type: (int) -> int pass """ bad_fixed = """ def f(x): '''docstring''' pass def g(x): '''docstring ''' pass """ assert_equal(remove_misplaced_type_comments(bad), bad_fixed) def test_remove_misplaced_type_comments_5(self) -> None: bad = """ def f(x): # type: (int, List[Any], # float, bool) -> int pass def g(x): # type: (int, List[Any]) pass """ bad_fixed = """ def f(x): # float, bool) -> int pass def g(x): pass """ assert_equal(remove_misplaced_type_comments(bad), bad_fixed) def test_remove_misplaced_type_comments_bytes(self) -> None: original = b""" \xbf def f(x): # type: (int) -> int def g(x): # type: (int) -> int pass def h(): # type: int pass x = 1 # type: int """ dest = b""" \xbf def f(x): # type: (int) -> int def g(x): # type: (int) -> int pass def h(): pass x = 1 # type: int """ assert_equal(remove_misplaced_type_comments(original), dest) def test_common_dir_prefix(self) -> None: assert common_dir_prefix([]) == '.' assert common_dir_prefix(['x.pyi']) == '.' assert common_dir_prefix(['./x.pyi']) == '.' assert common_dir_prefix(['foo/bar/x.pyi']) == 'foo/bar' assert common_dir_prefix(['foo/bar/x.pyi', 'foo/bar/y.pyi']) == 'foo/bar' assert common_dir_prefix(['foo/bar/x.pyi', 'foo/y.pyi']) == 'foo' assert common_dir_prefix(['foo/x.pyi', 'foo/bar/y.pyi']) == 'foo' assert common_dir_prefix(['foo/bar/zar/x.pyi', 'foo/y.pyi']) == 'foo' assert common_dir_prefix(['foo/x.pyi', 'foo/bar/zar/y.pyi']) == 'foo' assert common_dir_prefix(['foo/bar/zar/x.pyi', 'foo/bar/y.pyi']) == 'foo/bar' assert common_dir_prefix(['foo/bar/x.pyi', 'foo/bar/zar/y.pyi']) == 'foo/bar' class StubgenHelpersSuite(unittest.TestCase): def test_is_blacklisted_path(self) -> None: assert not is_blacklisted_path('foo/bar.py') assert not is_blacklisted_path('foo.py') assert not is_blacklisted_path('foo/xvendor/bar.py') assert not is_blacklisted_path('foo/vendorx/bar.py') assert is_blacklisted_path('foo/vendor/bar.py') assert is_blacklisted_path('foo/vendored/bar.py') assert is_blacklisted_path('foo/vendored/bar/thing.py') assert is_blacklisted_path('foo/six.py') def test_is_non_library_module(self) -> None: assert not is_non_library_module('foo') assert not is_non_library_module('foo.bar') # The following could be test modules, but we are very conservative and # don't treat them as such since they could plausibly be real modules. assert not is_non_library_module('foo.bartest') assert not is_non_library_module('foo.bartests') assert not is_non_library_module('foo.testbar') assert is_non_library_module('foo.test') assert is_non_library_module('foo.test.foo') assert is_non_library_module('foo.tests') assert is_non_library_module('foo.tests.foo') assert is_non_library_module('foo.testing.foo') assert is_non_library_module('foo.SelfTest.foo') assert is_non_library_module('foo.test_bar') assert is_non_library_module('foo.bar_tests') assert is_non_library_module('foo.testing') assert is_non_library_module('foo.conftest') assert is_non_library_module('foo.bar_test_util') assert is_non_library_module('foo.bar_test_utils') assert is_non_library_module('foo.bar_test_base') assert is_non_library_module('foo.setup') assert is_non_library_module('foo.__main__') class StubgenPythonSuite(DataSuite): """Data-driven end-to-end test cases that generate stub files. You can use these magic test case name suffixes: *_semanal Run semantic analysis (slow as this uses real stubs -- only use when necessary) *_import Import module and perform runtime introspection (in the current process!) You can use these magic comments: # flags: --some-stubgen-option ... Specify custom stubgen options # modules: module1 module2 ... Specify which modules to output (by default only 'main') """ required_out_section = True base_path = '.' files = ['stubgen.test'] def run_case(self, testcase: DataDrivenTestCase) -> None: with local_sys_path_set(): self.run_case_inner(testcase) def run_case_inner(self, testcase: DataDrivenTestCase) -> None: extra = [] # Extra command-line args mods = [] # Module names to process source = '\n'.join(testcase.input) for file, content in testcase.files + [('./main.py', source)]: # Strip ./ prefix and .py suffix. mod = file[2:-3].replace('/', '.') if mod.endswith('.__init__'): mod, _, _ = mod.rpartition('.') mods.append(mod) if '-p ' not in source: extra.extend(['-m', mod]) with open(file, 'w') as f: f.write(content) options = self.parse_flags(source, extra) modules = self.parse_modules(source) out_dir = 'out' try: try: if not testcase.name.endswith('_import'): options.no_import = True if not testcase.name.endswith('_semanal'): options.parse_only = True generate_stubs(options) a = [] # type: List[str] for module in modules: fnam = module_to_path(out_dir, module) self.add_file(fnam, a, header=len(modules) > 1) except CompileError as e: a = e.messages assert_string_arrays_equal(testcase.output, a, 'Invalid output ({}, line {})'.format( testcase.file, testcase.line)) finally: for mod in mods: if mod in sys.modules: del sys.modules[mod] shutil.rmtree(out_dir) def parse_flags(self, program_text: str, extra: List[str]) -> Options: flags = re.search('# flags: (.*)$', program_text, flags=re.MULTILINE) if flags: flag_list = flags.group(1).split() else: flag_list = [] options = parse_options(flag_list + extra) if '--verbose' not in flag_list: options.quiet = True else: options.verbose = True return options def parse_modules(self, program_text: str) -> List[str]: modules = re.search('# modules: (.*)$', program_text, flags=re.MULTILINE) if modules: return modules.group(1).split() else: return ['main'] def add_file(self, path: str, result: List[str], header: bool) -> None: if not os.path.exists(path): result.append('<%s was not generated>' % path.replace('\\', '/')) return if header: result.append('# {}'.format(path[4:])) with open(path, encoding='utf8') as file: result.extend(file.read().splitlines()) self_arg = ArgSig(name='self') class StubgencSuite(unittest.TestCase): """Unit tests for stub generation from C modules using introspection. Note that these don't cover a lot! """ def test_infer_hash_sig(self) -> None: assert_equal(infer_method_sig('__hash__'), [self_arg]) def test_infer_getitem_sig(self) -> None: assert_equal(infer_method_sig('__getitem__'), [self_arg, ArgSig(name='index')]) def test_infer_setitem_sig(self) -> None: assert_equal(infer_method_sig('__setitem__'), [self_arg, ArgSig(name='index'), ArgSig(name='object')]) def test_infer_binary_op_sig(self) -> None: for op in ('eq', 'ne', 'lt', 'le', 'gt', 'ge', 'add', 'radd', 'sub', 'rsub', 'mul', 'rmul'): assert_equal(infer_method_sig('__%s__' % op), [self_arg, ArgSig(name='other')]) def test_infer_unary_op_sig(self) -> None: for op in ('neg', 'pos'): assert_equal(infer_method_sig('__%s__' % op), [self_arg]) def test_generate_c_type_stub_no_crash_for_object(self) -> None: output = [] # type: List[str] mod = ModuleType('module', '') # any module is fine imports = [] # type: List[str] generate_c_type_stub(mod, 'alias', object, output, imports) assert_equal(imports, []) assert_equal(output[0], 'class alias:') def test_generate_c_type_stub_variable_type_annotation(self) -> None: # This class mimics the stubgen unit test 'testClassVariable' class TestClassVariableCls: x = 1 output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType('module', '') # any module is fine generate_c_type_stub(mod, 'C', TestClassVariableCls, output, imports) assert_equal(imports, []) assert_equal(output, ['class C:', ' x: Any = ...']) def test_generate_c_type_inheritance(self) -> None: class TestClass(KeyError): pass output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType('module, ') generate_c_type_stub(mod, 'C', TestClass, output, imports) assert_equal(output, ['class C(KeyError): ...', ]) assert_equal(imports, []) def test_generate_c_type_inheritance_same_module(self) -> None: class TestBaseClass: pass class TestClass(TestBaseClass): pass output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType(TestBaseClass.__module__, '') generate_c_type_stub(mod, 'C', TestClass, output, imports) assert_equal(output, ['class C(TestBaseClass): ...', ]) assert_equal(imports, []) def test_generate_c_type_inheritance_other_module(self) -> None: import argparse class TestClass(argparse.Action): pass output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType('module', '') generate_c_type_stub(mod, 'C', TestClass, output, imports) assert_equal(output, ['class C(argparse.Action): ...', ]) assert_equal(imports, ['import argparse']) def test_generate_c_type_with_docstring(self) -> None: class TestClass: def test(self, arg0: str) -> None: """ test(self: TestClass, arg0: int) """ pass output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType(TestClass.__module__, '') generate_c_function_stub(mod, 'test', TestClass.test, output, imports, self_var='self', class_name='TestClass') assert_equal(output, ['def test(self, arg0: int) -> Any: ...']) assert_equal(imports, []) def test_generate_c_type_with_docstring_empty_default(self) -> None: class TestClass: def test(self, arg0: str = "") -> None: """ test(self: TestClass, arg0: str = "") """ pass output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType(TestClass.__module__, '') generate_c_function_stub(mod, 'test', TestClass.test, output, imports, self_var='self', class_name='TestClass') assert_equal(output, ['def test(self, arg0: str = ...) -> Any: ...']) assert_equal(imports, []) def test_generate_c_function_other_module_arg(self) -> None: """Test that if argument references type from other module, module will be imported.""" # Provide different type in python spec than in docstring to make sure, that docstring # information is used. def test(arg0: str) -> None: """ test(arg0: argparse.Action) """ pass output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType(self.__module__, '') generate_c_function_stub(mod, 'test', test, output, imports) assert_equal(output, ['def test(arg0: argparse.Action) -> Any: ...']) assert_equal(imports, ['import argparse']) def test_generate_c_function_same_module_arg(self) -> None: """Test that if argument references type from same module but using full path, no module will be imported, and type specification will be striped to local reference. """ # Provide different type in python spec than in docstring to make sure, that docstring # information is used. def test(arg0: str) -> None: """ test(arg0: argparse.Action) """ pass output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType('argparse', '') generate_c_function_stub(mod, 'test', test, output, imports) assert_equal(output, ['def test(arg0: Action) -> Any: ...']) assert_equal(imports, []) def test_generate_c_function_other_module_ret(self) -> None: """Test that if return type references type from other module, module will be imported.""" def test(arg0: str) -> None: """ test(arg0: str) -> argparse.Action """ pass output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType(self.__module__, '') generate_c_function_stub(mod, 'test', test, output, imports) assert_equal(output, ['def test(arg0: str) -> argparse.Action: ...']) assert_equal(imports, ['import argparse']) def test_generate_c_function_same_module_ret(self) -> None: """Test that if return type references type from same module but using full path, no module will be imported, and type specification will be striped to local reference. """ def test(arg0: str) -> None: """ test(arg0: str) -> argparse.Action """ pass output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType('argparse', '') generate_c_function_stub(mod, 'test', test, output, imports) assert_equal(output, ['def test(arg0: str) -> Action: ...']) assert_equal(imports, []) def test_generate_c_type_with_overload_pybind11(self) -> None: class TestClass: def __init__(self, arg0: str) -> None: """ __init__(*args, **kwargs) Overloaded function. 1. __init__(self: TestClass, arg0: str) -> None 2. __init__(self: TestClass, arg0: str, arg1: str) -> None """ pass output = [] # type: List[str] imports = [] # type: List[str] mod = ModuleType(TestClass.__module__, '') generate_c_function_stub(mod, '__init__', TestClass.__init__, output, imports, self_var='self', class_name='TestClass') assert_equal(output, [ '@overload', 'def __init__(self, arg0: str) -> None: ...', '@overload', 'def __init__(self, arg0: str, arg1: str) -> None: ...', '@overload', 'def __init__(*args, **kwargs) -> Any: ...']) assert_equal(set(imports), {'from typing import overload'}) class ArgSigSuite(unittest.TestCase): def test_repr(self) -> None: assert_equal(repr(ArgSig(name='asd"dsa')), "ArgSig(name='asd\"dsa', type=None, default=False)") assert_equal(repr(ArgSig(name="asd'dsa")), 'ArgSig(name="asd\'dsa", type=None, default=False)') assert_equal(repr(ArgSig("func", 'str')), "ArgSig(name='func', type='str', default=False)") assert_equal(repr(ArgSig("func", 'str', default=True)), "ArgSig(name='func', type='str', default=True)") class IsValidTypeSuite(unittest.TestCase): def test_is_valid_type(self) -> None: assert is_valid_type('int') assert is_valid_type('str') assert is_valid_type('Foo_Bar234') assert is_valid_type('foo.bar') assert is_valid_type('List[int]') assert is_valid_type('Dict[str, int]') assert is_valid_type('None') assert not is_valid_type('foo-bar') assert not is_valid_type('x->y') assert not is_valid_type('True') assert not is_valid_type('False') assert not is_valid_type('x,y') assert not is_valid_type('x, y') class ModuleInspectSuite(unittest.TestCase): def test_python_module(self) -> None: with ModuleInspect() as m: p = m.get_package_properties('inspect') assert p is not None assert p.name == 'inspect' assert p.file assert p.path is None assert p.is_c_module is False assert p.subpackages == [] def test_python_package(self) -> None: with ModuleInspect() as m: p = m.get_package_properties('unittest') assert p is not None assert p.name == 'unittest' assert p.file assert p.path assert p.is_c_module is False assert p.subpackages assert all(sub.startswith('unittest.') for sub in p.subpackages) def test_c_module(self) -> None: with ModuleInspect() as m: p = m.get_package_properties('_socket') assert p is not None assert p.name == '_socket' assert p.file assert p.path is None assert p.is_c_module is True assert p.subpackages == [] def test_non_existent(self) -> None: with ModuleInspect() as m: with self.assertRaises(InspectError) as e: m.get_package_properties('foobar-non-existent') assert str(e.exception) == "No module named 'foobar-non-existent'" def module_to_path(out_dir: str, module: str) -> str: fnam = os.path.join(out_dir, '{}.pyi'.format(module.replace('.', '/'))) if not os.path.exists(fnam): alt_fnam = fnam.replace('.pyi', '/__init__.pyi') if os.path.exists(alt_fnam): return alt_fnam return fnam
nilq/baby-python
python
# Generated by Django 3.2.12 on 2022-02-16 23:46 import django.core.validators from django.db import migrations, models import re class Migration(migrations.Migration): dependencies = [ ('customer', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='customer', name='user_id', ), migrations.AddField( model_name='customer', name='phone_number', field=models.CharField(default='n/a', help_text="Phone number must be entered in the format: '+27815742271'. Up to 11 digits allowed.", max_length=12, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^\\+?27?[6-8][0-9]{8}$'), 'Enter a valid phone number', 'Invalid phone number')], verbose_name='phone_number'), preserve_default=False, ), migrations.AddField( model_name='customer', name='username', field=models.CharField(db_index=True, default='n/a', help_text='Required. 255 characters or fewer. Letters, numbers and @/./+/-/_ characters', max_length=255, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[\\w.@+-]+$'), 'Enter a valid username.', 'invalid')], verbose_name='Username'), preserve_default=False, ), ]
nilq/baby-python
python
###################################################################### # # File: b2/download_dest.py # # Copyright 2019 Backblaze Inc. All Rights Reserved. # # License https://www.backblaze.com/using_b2_code.html # ###################################################################### from b2sdk.download_dest import * # noqa import b2._sdk_deprecation b2._sdk_deprecation.deprecate_module('b2.download_dest')
nilq/baby-python
python
# AUTOGENERATED BY NBDEV! DO NOT EDIT! __all__ = ["index", "modules", "custom_doc_links", "git_url"] index = {"index_flow": "00_core.ipynb", "query_flow": "00_core.ipynb", "slugify": "01_loader.ipynb", "get_image_files": "01_loader.ipynb", "verify_image": "01_loader.ipynb", "device": "03_encoder.ipynb", "archive_loader": "01_loader.ipynb", "db_loader": "01_loader.ipynb", "treemap_loader": "01_loader.ipynb", "make_dataset": "02_crafter.ipynb", "pil_loader": "02_crafter.ipynb", "DatasetImagePaths": "02_crafter.ipynb", "clip_transform": "02_crafter.ipynb", "crafter": "02_crafter.ipynb", "preproc": "02_crafter.ipynb", "model": "03_encoder.ipynb", "image_encoder": "03_encoder.ipynb", "text_encoder": "03_encoder.ipynb", "image_query_encoder": "03_encoder.ipynb", "join_all": "04_indexer.ipynb", "build_treemap": "04_indexer.ipynb", "save_archives": "04_indexer.ipynb", "ranker": "05_ranker.ipynb", "nns_to_files": "05_ranker.ipynb", "app": "07_cli.ipynb", "recall": "07_cli.ipynb", "serve": "07_cli.ipynb", "__main__": "07_cli.ipynb", "get_image": "08_jupyter_gui.ipynb", "get_grid": "08_jupyter_gui.ipynb", "update_tabs": "08_jupyter_gui.ipynb", "appPage": "08_jupyter_gui.ipynb", "st_redirect": "09_streamlit_app.ipynb", "st_stdout": "09_streamlit_app.ipynb", "st_stderr": "09_streamlit_app.ipynb", "send_image_query": "09_streamlit_app.ipynb", "send_text_query": "09_streamlit_app.ipynb", "path": "09_streamlit_app.ipynb", "text_query": "09_streamlit_app.ipynb", "image_query": "09_streamlit_app.ipynb", "im_display_zone": "09_streamlit_app.ipynb", "logbox": "09_streamlit_app.ipynb", "sizes": "09_streamlit_app.ipynb"} modules = ["core.py", "loader.py", "crafter.py", "encoder.py", "indexer.py", "ranker.py", "cli.py", "gui.py", "streamlit_app.py"] doc_url = "https://deepfates.github.io/memery/" git_url = "https://github.com/deepfates/memery/tree/main/" def custom_doc_links(name): return None
nilq/baby-python
python
from checkov.common.models.enums import CheckCategories from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck class VMDisablePasswordAuthentication(BaseResourceNegativeValueCheck): def __init__(self): name = "Ensure that Virtual machine does not enable password authentication" id = "CKV_AZURE_149" supported_resources = ['azurerm_linux_virtual_machine_scale_set', 'azurerm_linux_virtual_machine'] categories = [CheckCategories.ENCRYPTION] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def get_forbidden_values(self) -> str: return [False] def get_inspected_key(self) -> str: return "disable_password_authentication" check = VMDisablePasswordAuthentication()
nilq/baby-python
python
"""Tests for encoder routines to tf.train.Exammple.""" from absl.testing import parameterized import tensorflow as tf from tensorflow_gnn.graph import graph_constants as gc from tensorflow_gnn.graph import graph_tensor as gt from tensorflow_gnn.graph import graph_tensor_encode as ge from tensorflow_gnn.graph import graph_tensor_io as io from tensorflow_gnn.graph import graph_tensor_random as gr from tensorflow_gnn.graph import schema_utils as su import tensorflow_gnn.proto.graph_schema_pb2 as schema_pb2 from tensorflow_gnn.utils import test_utils # TODO(blais): Move this to graph_tensor_test_utils once ported. def _find_first_available_tensor(gtensor: gt.GraphTensor) -> gc.Field: for feature in gtensor.context.features.values(): return feature for node_set in gtensor.node_sets.values(): for feature in node_set.features.values(): return feature for edge_set in gtensor.edge_sets.values(): for feature in edge_set.features.values(): return feature TEST_SHAPES = [[4], [4, 3], [None, 4], [None, 4, 3], [None, None, 4], [None, None, 4, 3], [4, None], [4, 3, None], [4, None, None], [4, 3, None, None], [5, None, 4, None, 3], [None, 4, None, 3, None]] class TestWriteExample(tf.test.TestCase, parameterized.TestCase): # TODO(blais,aferludin): Replace this with graph_tensor_test_utils def _compare_graph_tensors(self, rfeatures: gc.Field, pfeatures: gc.Field): self.assertEqual(rfeatures.shape.as_list(), pfeatures.shape.as_list()) if isinstance(rfeatures, tf.RaggedTensor): self.assertAllEqual(rfeatures.flat_values, pfeatures.flat_values) rlist = rfeatures.nested_row_lengths() plist = pfeatures.nested_row_lengths() self.assertEqual(len(rlist), len(plist)) for rlengths, plengths in zip(rlist, plist): self.assertAllEqual(rlengths, plengths) else: self.assertAllEqual(rfeatures, pfeatures) @parameterized.parameters((None, True), (None, False), ('someprefix_', True)) def test_write_random_graph_tensors(self, prefix, validate): # Produce a stream of random graph tensors with a complex schema and verify # that they parse back. schema = test_utils.get_proto_resource( 'testdata/feature_repr.pbtxt', schema_pb2.GraphSchema()) spec = su.create_graph_spec_from_schema_pb(schema) # TODO(blais): Turn this into a utility. def random_graph_tensor_generator(spec) -> tf.data.Dataset: def generator(): while True: yield gr.random_graph_tensor(spec) return tf.data.Dataset.from_generator(generator, output_signature=spec) for rgraph in random_graph_tensor_generator(spec).take(16): example = ge.write_example(rgraph, prefix=prefix) serialized = tf.constant(example.SerializeToString()) pgraph = io.parse_single_example(spec, serialized, prefix=prefix, validate=validate) # TODO(blais): When graph_tensor_test_utils is ported, compare the entire # contents. rfeatures = _find_first_available_tensor(rgraph) pfeatures = _find_first_available_tensor(pgraph) self._compare_graph_tensors(rfeatures, pfeatures) def _roundtrip_test(self, shape, create_tensor): # Produce random tensors of various shapes, serialize them, and then run # them back through our parser and finally check that the shapes are # identical. dtype = tf.float32 tensor_spec = (tf.TensorSpec(shape, dtype) if tf.TensorShape(shape).is_fully_defined() else tf.RaggedTensorSpec(shape, dtype)) spec = create_tensor(tensor_spec) rgraph = gr.random_graph_tensor(spec, row_splits_dtype=tf.int64) example = ge.write_example(rgraph) serialized = tf.constant(example.SerializeToString()) pgraph = io.parse_single_example(spec, serialized, validate=True) # Find the available tensor. # TODO(blais): Replaced these with self.assertGraphTensorEq(rgraph, pgraph). rfeatures = _find_first_available_tensor(rgraph) pfeatures = _find_first_available_tensor(pgraph) self._compare_graph_tensors(rfeatures, pfeatures) @parameterized.parameters((shape,) for shape in TEST_SHAPES) def test_write_various_shapes_as_context(self, shape): def create_tensor(tensor_spec): return gt.GraphTensorSpec.from_piece_specs( context_spec=gt.ContextSpec.from_field_specs( features_spec={'wings': tensor_spec})) self._roundtrip_test(shape, create_tensor) @parameterized.parameters((shape,) for shape in TEST_SHAPES) def test_write_various_shapes_as_node_set(self, shape): def create_tensor(tensor_spec): return gt.GraphTensorSpec.from_piece_specs( node_sets_spec={'butterfly': gt.NodeSetSpec.from_field_specs( sizes_spec=tf.TensorSpec([1], tf.int64), features_spec={'wings': tensor_spec})}) self._roundtrip_test(shape, create_tensor) if __name__ == '__main__': tf.test.main()
nilq/baby-python
python
from django import forms from fir_nuggets.models import NuggetForm from incidents import models as incident_models class LandingForm(NuggetForm): new = forms.BooleanField(initial=True, required=False) event = forms.ModelChoiceField(queryset=incident_models.Incident.objects.exclude(status='C'), required=False) status = forms.CharField(required=True, widget=forms.HiddenInput, initial='O') subject = forms.CharField(required=False) concerned_business_lines = forms.ModelMultipleChoiceField(required=False, queryset=incident_models.BusinessLine.objects.all()) category = forms.ModelChoiceField(queryset=incident_models.IncidentCategory.objects.all(), required=False) detection = forms.ModelChoiceField(required=False, queryset=incident_models.Label.objects.filter(group__name='detection')) severity = forms.ChoiceField(required=False, choices=incident_models.SEVERITY_CHOICES) description = forms.CharField(required=False, widget=forms.Textarea) is_incident = forms.BooleanField(initial=False, required=False) confidentiality = forms.ChoiceField(required=False, choices=incident_models.CONFIDENTIALITY_LEVEL, initial='1') is_major = forms.BooleanField(initial=False, required=False) actor = forms.ModelChoiceField(required=False, queryset=incident_models.Label.objects.filter(group__name='actor')) plan = forms.ModelChoiceField(required=False, queryset=incident_models.Label.objects.filter(group__name='plan')) def __init__(self, *args, **kwargs): super(LandingForm, self).__init__(*args, **kwargs) self.fields['raw_data'].widget.attrs['readonly'] = True
nilq/baby-python
python
#!/usr/bin/env python #==================================================== import copy import uuid import numpy as np import threading from Utilities.decorators import thread #==================================================== class CircuitCritic(object): def __init__(self, circuit_params): self.circuit_params = circuit_params self.CRITICIZED_CIRCUITS = [] self.EXTRA_TASKS = [] self.RECEIVED_EXTRA_EVALUATIONS = {} import CircuitQuantifier.critics as critics self.merit_functions = {} for merit in dir(critics): if merit.startswith('__'): continue self.merit_functions[merit.split('_')[-1]] = getattr(critics, merit) ############################################################## def report_reevaluations(self, circuits): for circuit in circuits: self.RECEIVED_EXTRA_EVALUATIONS[circuit['circuit']['circuit_id']] = circuit def run_merit_evaluation(self, merit_func, circuit_dict, merit_options, task): merit_eval_dict = merit_func(circuit_dict, merit_options, circuit_params = self.circuit_params) if len(merit_eval_dict['extra_tasks']) > 0: # check if the merit evaluation requests new tasks remaining_extra_circuit_ids = [] received_extra_task_evaluations = {} for extra_task in merit_eval_dict['extra_tasks']: # we need to modify the circuit_id of the proposed circuit parameters new_circuit_id = str(uuid.uuid4()) extra_task['circuit']['circuit_id'] = new_circuit_id self.EXTRA_TASKS.append(extra_task) remaining_extra_circuit_ids.append(new_circuit_id) while len(received_extra_task_evaluations) < len(remaining_extra_circuit_ids): # check if we have any new evaluated circuits extra_circuit_ids = list(self.RECEIVED_EXTRA_EVALUATIONS.keys()) for extra_circuit_id in extra_circuit_ids: # memorize received evaluations if extra_circuit_id in remaining_extra_circuit_ids: received_extra_task_evaluations[extra_circuit_id] = self.RECEIVED_EXTRA_EVALUATIONS[extra_circuit_id] del self.RECEIVED_EXTRA_EVALUATIONS[extra_circuit_id] # call evaluator again merit_eval_dict = merit_func(circuit_dict, merit_options, circuit_params = self.circuit_params, context_circuits = received_extra_task_evaluations.values()) circuit_dict['loss'] = merit_eval_dict['loss'] circuit_dict['context_circuits'] = list(received_extra_task_evaluations.values()) else: circuit_dict['loss'] = merit_eval_dict['loss'] circuit_dict['context_circuits'] = None self.CRITICIZED_CIRCUITS.append([circuit_dict, task]) ############################################################## @thread def criticize_circuit(self, circuit, task_set, task): # circuit: dict | information about circuit merit = task_set.settings['merit'] merit_options = task_set.settings['merit_options'] # check if simulation timed out if 'PLACEHOLDER' in circuit['measurements']: loss = np.nan # use specified merit function to calculate loss else: if not merit in self.merit_functions: print('# ERROR | ... could not find merit function: %s' % merit) return None # merit function needs to be put on a separate thread in case it likes to launch new tasks merit_func = self.merit_functions[merit] self.run_merit_evaluation(merit_func, circuit, merit_options, task) def get_requested_tasks(self): new_tasks = copy.deepcopy(self.EXTRA_TASKS) for new_task in new_tasks: self.EXTRA_TASKS.pop(0) return new_tasks def criticize_circuits(self, circuits, task_set, tasks): for circuit_index, circuit in enumerate(circuits): self.criticize_circuit(circuit, task_set, tasks[circuit_index]) def get_criticized_circuits(self): circuits = copy.deepcopy(self.CRITICIZED_CIRCUITS) for circuit in circuits: self.CRITICIZED_CIRCUITS.pop(0) return circuits def get_extra_tasks(self): circuits = copy.deepcopy(self.EXTRA_TASKS) for circuit in circuits: self.EXTRA_TASKS.pop(0) return circuits
nilq/baby-python
python
# -*- coding: utf-8 -*- """ easybimehlanding This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ). """ import easybimehlanding.models.travel_insurance_policy_extend class TravelInsurancePolicyExtendView(object): """Implementation of the 'TravelInsurancePolicyExtendView' model. TODO: type model description here. Attributes: travel_insurance_policy_extend (TravelInsurancePolicyExtend): TODO: type description here. travel_insurance_policy_extend_ages (list of string): TODO: type description here. travel_insurance_policy_extend_passengers (list of string): TODO: type description here. """ # Create a mapping from Model property names to API property names _names = { "travel_insurance_policy_extend":'travelInsurancePolicyExtend', "travel_insurance_policy_extend_ages":'travelInsurancePolicyExtendAges', "travel_insurance_policy_extend_passengers":'travelInsurancePolicyExtendPassengers' } def __init__(self, travel_insurance_policy_extend=None, travel_insurance_policy_extend_ages=None, travel_insurance_policy_extend_passengers=None): """Constructor for the TravelInsurancePolicyExtendView class""" # Initialize members of the class self.travel_insurance_policy_extend = travel_insurance_policy_extend self.travel_insurance_policy_extend_ages = travel_insurance_policy_extend_ages self.travel_insurance_policy_extend_passengers = travel_insurance_policy_extend_passengers @classmethod def from_dictionary(cls, dictionary): """Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. """ if dictionary is None: return None # Extract variables from the dictionary travel_insurance_policy_extend = easybimehlanding.models.travel_insurance_policy_extend.TravelInsurancePolicyExtend.from_dictionary(dictionary.get('travelInsurancePolicyExtend')) if dictionary.get('travelInsurancePolicyExtend') else None travel_insurance_policy_extend_ages = dictionary.get('travelInsurancePolicyExtendAges') travel_insurance_policy_extend_passengers = dictionary.get('travelInsurancePolicyExtendPassengers') # Return an object of this model return cls(travel_insurance_policy_extend, travel_insurance_policy_extend_ages, travel_insurance_policy_extend_passengers)
nilq/baby-python
python
# Copyright (c) 2016-2020, The Bifrost Authors. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Bifrost Authors nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import from bifrost.blocks.copy import copy, CopyBlock from bifrost.blocks.transpose import transpose, TransposeBlock from bifrost.blocks.reverse import reverse, ReverseBlock from bifrost.blocks.fft import fft, FftBlock from bifrost.blocks.fftshift import fftshift, FftShiftBlock from bifrost.blocks.fdmt import fdmt, FdmtBlock from bifrost.blocks.detect import detect, DetectBlock from bifrost.blocks.guppi_raw import read_guppi_raw, GuppiRawSourceBlock from bifrost.blocks.print_header import print_header, PrintHeaderBlock from bifrost.blocks.sigproc import read_sigproc, SigprocSourceBlock from bifrost.blocks.sigproc import write_sigproc, SigprocSinkBlock from bifrost.blocks.scrunch import scrunch, ScrunchBlock from bifrost.blocks.accumulate import accumulate, AccumulateBlock from bifrost.blocks.binary_io import BinaryFileReadBlock, BinaryFileWriteBlock from bifrost.blocks.binary_io import binary_read, binary_write from bifrost.blocks.unpack import unpack, UnpackBlock from bifrost.blocks.quantize import quantize, QuantizeBlock from bifrost.blocks.wav import read_wav, WavSourceBlock from bifrost.blocks.wav import write_wav, WavSinkBlock from bifrost.blocks.serialize import serialize, SerializeBlock, deserialize, DeserializeBlock from bifrost.blocks.reduce import reduce, ReduceBlock from bifrost.blocks.correlate import correlate, CorrelateBlock from bifrost.blocks.convert_visibilities import convert_visibilities, ConvertVisibilitiesBlock try: # Avoid error if portaudio library not installed from bifrost.blocks.audio import read_audio, AudioSourceBlock except: pass try: # Avoid error if psrdada library not installed from bifrost.blocks.psrdada import read_psrdada_buffer, PsrDadaSourceBlock except: pass
nilq/baby-python
python
from .clock import Clock from .identity import Identity from .license import License from .note import Note from .resource import Resource __all__ = ["Clock", "Identity", "License", "Note", "Resource"]
nilq/baby-python
python
from behave import * from src.hamming import distance from assertpy import assert_that use_step_matcher("re") @given("two strands") def step_impl(context): context.distance = distance @when("(?P<strand1>.+) and (?P<strand2>.+) are same length") def step_impl(context, strand1, strand2): context.result = context.distance(strand1, strand2) @then("result should be (?P<result>.+)") def step_impl(context, result): assert_that(context.result).is_equal_to(int(result))
nilq/baby-python
python
# # This file is an example to set the environment. # The configs will be used in dmrgci.py and chemps2.py # import os from pyscf import lib # To install Block as the FCI solver for CASSCF, see # http://sunqm.github.io/Block/build.html # https://github.com/sanshar/Block BLOCKEXE = '/path/to/Block/block.spin_adapted' BLOCKEXE_COMPRESS_NEVPT = '/path/to/serially/compiled/Block/block.spin_adapted' #BLOCKSCRATCHDIR = os.path.join('./scratch', str(os.getpid())) BLOCKSCRATCHDIR = os.path.join(lib.param.TMPDIR, str(os.getpid())) #BLOCKRUNTIMEDIR = '.' BLOCKRUNTIMEDIR = str(os.getpid()) MPIPREFIX = 'mpirun' # change to srun for SLURM job system # Use ChemPS2 as the FCI solver for CASSCF # building PyChemPS2, a python module will be generated in # /path/to/ChemPS2/build/PyChemPS2 # see more details in the ChemPS2 document # https://github.com/SebWouters/CheMPS2 PYCHEMPS2BIN = '/path/to/CheMPS2/build/PyCheMPS2/PyCheMPS2.so'
nilq/baby-python
python
from nipype.interfaces.base import BaseInterface, \ BaseInterfaceInputSpec, traits, File, TraitedSpec, InputMultiPath, Directory from nipype.utils.filemanip import split_filename import nibabel as nb import numpy as np import os class ConsensusInputSpec(BaseInterfaceInputSpec): in_Files = traits.Either(InputMultiPath(File(exists=True)), Directory(exists=True), traits.Str(), traits.List(), mandatory=True) maskfile = File(exists=True, desc='total target mask', mandatory=True) class ConsensusOutputSpec(TraitedSpec): #out_File = File(exists=True, desc="out_File") variation_mat = File(exists=True, desc="variation_mat") consensus_mat = File(exists=True, desc="consensus_mat") class Consensus(BaseInterface): input_spec = ConsensusInputSpec output_spec = ConsensusOutputSpec def _get_filelist(self, trait_input): if os.path.isdir(trait_input[0]): filelist = [] for directory in trait_input: for root, dirnames, fnames in os.walk(directory): for f in fnames: if f.endswith('.nii'): filelist.append(os.path.join(root,f)) return filelist if os.path.isfile(trait_input[0]): return trait_input def makeConsensus(self, eachFile, mask): clustermap=nb.load(eachFile).get_data() maskedmap = clustermap[np.where(np.squeeze(mask))] consensus = np.zeros((len(maskedmap),len(maskedmap))) for j in range(len(maskedmap)): consensus[j] = maskedmap == maskedmap[j] return consensus def _run_interface(self, runtime): mask = nb.load(self.inputs.maskfile).get_data() src_paths = self._get_filelist(self.inputs.in_Files) _, base, _ = split_filename(self.inputs.in_Files[0]) cons_dim = len(nb.load(src_paths[0]).get_data()) totalConsensus = np.zeros((cons_dim,cons_dim), dtype=np.float64) for src_path in src_paths: totalConsensus += self.makeConsensus(src_path) ##average across all consensus instances and output## totalConsensus = totalConsensus/len(src_paths) cImg = nb.Nifti1Image(totalConsensus, None) nb.save(cImg, os.path.abspath(base+'_ConsensusMat.nii')) ##make consensus into stability measure## remove stability measure for now... #likeness = abs(totalConsensus-0.5) #stability = np.mean(likeness,axis=0) ##make into NiftiImage## #nImg = nb.Nifti1Image(stability, None) #nb.save(nImg, os.path.abspath(base+'_Stability.nii')) return runtime def _list_outputs(self): outputs = self._outputs().get() _, base, _ = split_filename(self.inputs.in_Files[0]) #outputs["out_File"] = os.path.abspath(base+'_Stability.nii') outputs["variation_mat"] = os.path.abspath(base+'_VariationMat.nii') outputs["consensus_mat"] = os.path.abspath(base+'_ConsensusMat.nii') return outputs
nilq/baby-python
python
#!/usr/bin/env python ## Program: VMTK ## Module: $RCSfile: vmtksurfacedistance.py,v $ ## Language: Python ## Date: $Date: 2005/09/14 09:49:59 $ ## Version: $Revision: 1.6 $ ## Copyright (c) Luca Antiga, David Steinman. All rights reserved. ## See LICENSE file for details. ## This software is distributed WITHOUT ANY WARRANTY; without even ## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. See the above copyright notices for more information. from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY import vtk from vmtk import vtkvmtk import sys from vmtk import pypes class vmtkSurfaceFeatureEdges(pypes.pypeScript): def __init__(self): pypes.pypeScript.__init__(self) self.Surface = None self.BoundaryEdges = 1 self.FeatureEdges = 1 self.FeatureAngle = 30 self.NonManifoldEdges = 0 self.Coloring = 0 self.SetScriptName('vmtksurfacefeatureedges') self.SetScriptDoc('extract feature edges from a surface') self.SetInputMembers([ ['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'], ['BoundaryEdges','boundaryedges','bool',1,'',''], ['FeatureEdges','featureedges','bool',1,'',''], ['FeatureAngle','featureangle','float',1,'(0,)',''], ['NonManifoldEdges','nonmanifoldedges','bool',1,'',''], ['Coloring','coloring','bool',1,'',''], ]) self.SetOutputMembers([ ['Surface','o','vtkPolyData',1,'','the output feature edges','vmtksurfacewriter'] ]) def Execute(self): if self.Surface == None: self.PrintError('Error: No Surface.') extractor = vtk.vtkFeatureEdges() extractor.SetInputData(self.Surface) extractor.SetBoundaryEdges(self.BoundaryEdges) extractor.SetFeatureEdges(self.FeatureEdges) extractor.SetFeatureAngle(self.FeatureAngle) extractor.SetNonManifoldEdges(self.NonManifoldEdges) extractor.SetColoring(self.Coloring) extractor.CreateDefaultLocator() extractor.Update() self.Surface = extractor.GetOutput() if __name__=='__main__': main = pypes.pypeMain() main.Arguments = sys.argv main.Execute()
nilq/baby-python
python
# -*- coding: utf-8 -*- #______________________________________________________________________________ #______________________________________________________________________________ # # Coded by Daniel González Duque #______________________________________________________________________________ #______________________________________________________________________________ ''' This package uses functions from Matlab to run models made in COMSOL, it is necessary to have access to the main folder of COMSOL to run the algorithms in Matlab. This package can also open the information from exported files and use them to generate new data. Although this package is focused on flow through porosity media in 2D right now, it can be use widely to other applications. ____________________________________________________________________________ This class is of free use and can be modify, if you have some problem please contact the programmer to the following e-mails: - daniel.gonzalez@vanderbilt.edu - danielgondu@gmail.com ____________________________________________________________________________ ''' from setuptools import setup setup( name="pyDGDutil", version="1.0.1", author="Daniel González Duque", description="Complementary scripts of other codes", license="MIT", packages=["pyDGDutil"], pyhon_requires='>=3.6' )
nilq/baby-python
python