content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0004_review'), ] operations = [ migrations.AddField( model_name='location', name='alcohol', field=models.IntegerField(blank=True, null=True, choices=[(0, b'No'), (1, b'Yes')]), ), migrations.AddField( model_name='location', name='bathrooms', field=models.IntegerField(blank=True, null=True, choices=[(0, b'No'), (1, b'Yes')]), ), migrations.AddField( model_name='location', name='coffee', field=models.IntegerField(blank=True, null=True, choices=[(0, b'None'), (1, b'Truck Stop'), (2, b'Good'), (3, b'Really Good'), (4, b'Great')]), ), migrations.AddField( model_name='location', name='food', field=models.IntegerField(blank=True, null=True, choices=[(0, b'No'), (1, b'Yes')]), ), migrations.AddField( model_name='location', name='outdoor', field=models.IntegerField(blank=True, null=True, choices=[(0, b'No'), (1, b'Yes')]), ), migrations.AddField( model_name='location', name='outlets', field=models.IntegerField(blank=True, null=True, choices=[(0, b'None'), (1, b'Minimal'), (2, b'Some'), (3, b'Ample')]), ), migrations.AddField( model_name='location', name='seating', field=models.IntegerField(blank=True, null=True, choices=[(0, b'None'), (1, b'Minimal'), (2, b'Some'), (3, b'Ample')]), ), migrations.AddField( model_name='location', name='wifi', field=models.IntegerField(blank=True, null=True, choices=[(0, b'None'), (1, b'Spotty'), (2, b'Strong')]), ), ]
nilq/baby-python
python
#导入requests模块 import requests import urllib.parse class Xiaoniu(object): def __init__(self): self.headers={ 'Accept': 'application/json, text/plain, */*', 'Content-Type': 'application/x-www-form-urlencoded', 'Origin': 'https://niutrans.vip', 'Referer': 'https://niutrans.vip/console/textTrans', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36' } self.url = 'https://test.niutrans.vip/NiuTransServer/testtrans' def translate(self, from_lan, to_lan, text): data = { 'from' : from_lan, 'to' : to_lan, 'src_text': text } url = self.url url+='?from={}&to={}&src_text='.format(from_lan, to_lan) url+=urllib.parse.quote(data['src_text']) #print(url) result = requests.get(url=url,headers=self.headers) #print(result.text) if result != None: return result.json()['tgt_text'] def xiaoniuTrans(word,from_language='zh',to_language='en'): niu = Xiaoniu() return niu.translate(from_language,to_language,word) if __name__ == '__main__': print(xiaoniuTrans("hello",from_language='en',to_language='zh'))
nilq/baby-python
python
import warnings from pyro import params from pyro.distributions.distribution import Distribution from pyro.poutine.util import is_validation_enabled from .messenger import Messenger class LiftMessenger(Messenger): """ Messenger which "lifts" parameters to random samples. Given a stochastic function with param calls and a prior, creates a stochastic function where all param calls are replaced by sampling from prior. Prior should be a callable or a dict of names to callables. """ def __init__(self, prior): """ :param prior: prior used to lift parameters. Prior can be of type dict, pyro.distributions, or a python stochastic fn Constructor """ super(LiftMessenger, self).__init__() self.prior = prior self._samples_cache = {} def __enter__(self): self._samples_cache = {} if is_validation_enabled() and isinstance(self.prior, dict): self._param_hits = set() self._param_misses = set() return super(LiftMessenger, self).__enter__() def __exit__(self, *args, **kwargs): self._samples_cache = {} if is_validation_enabled() and isinstance(self.prior, dict): extra = set(self.prior) - self._param_hits if extra: warnings.warn( "pyro.module prior did not find params ['{}']. " "Did you instead mean one of ['{}']?" .format("', '".join(extra), "', '".join(self._param_misses))) return super(LiftMessenger, self).__exit__(*args, **kwargs) def _pyro_sample(self, msg): return None def _pyro_param(self, msg): """ Overrides the `pyro.param` call with samples sampled from the distribution specified in the prior. The prior can be a pyro.distributions object or a dict of distributions keyed on the param names. If the param name does not match the name the keys in the prior, that param name is unchanged. """ name = msg["name"] param_name = params.user_param_name(name) if isinstance(self.prior, dict): # prior is a dict of distributions if param_name in self.prior.keys(): msg["fn"] = self.prior[param_name] msg["args"] = msg["args"][1:] if isinstance(msg['fn'], Distribution): msg["args"] = () msg["kwargs"] = {} msg["infer"] = {} if is_validation_enabled(): self._param_hits.add(param_name) else: if is_validation_enabled(): self._param_misses.add(param_name) return None elif isinstance(self.prior, Distribution): # prior is a distribution msg["fn"] = self.prior msg["args"] = () msg["kwargs"] = {} msg["infer"] = {} elif callable(self.prior): if not isinstance(self.prior, Distribution): # prior is a stochastic fn. block sample msg["stop"] = True msg["fn"] = self.prior msg["args"] = msg["args"][1:] else: # otherwise leave as is return None msg["type"] = "sample" if name in self._samples_cache: # Multiple pyro.param statements with the same # name. Block the site and fix the value. msg['value'] = self._samples_cache[name]['value'] msg["is_observed"] = True msg["stop"] = True else: self._samples_cache[name] = msg msg["is_observed"] = False return self._pyro_sample(msg)
nilq/baby-python
python
from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from app.dialogue.routers import dialogues_router from app.message.middleware import WebSocketStateMiddleware from app.message.routers import message_router from app.notification.routers import notification_router from config import PROJECT_NAME, API, VERSION, CLIENT_NAME from db import engine, Base app = FastAPI( title=PROJECT_NAME, version=VERSION, description='Messenger Service Anti-Freelancer by Counter', root_path=f'/{CLIENT_NAME}', ) app.add_middleware( CORSMiddleware, allow_origins=['*'], allow_credentials=True, allow_methods=['*'], allow_headers=['*'], ) app.add_middleware(WebSocketStateMiddleware) @app.on_event('startup') async def startup(): """ Startup """ async with engine.begin() as connection: await connection.run_sync(Base.metadata.create_all) app.include_router(message_router, prefix=f'/{API}/messages') app.include_router(notification_router, prefix=f'/{API}/notifications') app.include_router(dialogues_router, prefix=f'/{API}/dialogues')
nilq/baby-python
python
import numpy as np from glob import glob as glob #This will probably change astrotable = '/Users/Arthur/Documents/School/MetaPak/GradPak_code/extras/gradpak_w_sky_astrometry_table.txt' basedir = '/Users/Arthur/Documents/School/891_paper/GP_data' #astrotable = '/usr/users/eigenbrot/research/Pak/gradpak_w_sky_astrometry_table.txt' def write_header(f): f.write(r"""\renewcommand{\thefootnote}{\alph{footnote}} \begin{center} \begin{longtable}{crrccccc} \caption{\GP Fiber Locations and LabData} \label{GPtesting:tab:GP_cal_full} \\ \hline \hline \\[-2ex] Fiber & $\Delta\alpha$\tablenotemark{a} & $\Delta\delta$\tablenotemark{a} & diameter & $T_\mathrm{tot}$ & $T_4$\tablenotemark{b} & $T_{4.4}$ & $T_5$ \\ number & ('') & ('') & ('') & & & & \\[0.5ex] \hline \\[-1.8ex] \endfirsthead \multicolumn{7}{c}{{\tablename} \thetable{} -- Continued} \\[0.5ex] \hline \hline \\[-2ex] Fiber & $\Delta\alpha$\tablenotemark{a} & $\Delta\delta$\tablenotemark{a} & diameter & $T_\mathrm{tot}$ & $T_4$\tablenotemark{b} & $T_{4.4}$ & $T_5$ \\ number & ('') & ('') & ('') & & & & \\[0.5ex] \hline \\[-1.8ex] \endhead \endfoot \\[-1.8ex] \hline \hline \endlastfoot """) return def write_end(f): f.write(r"""\footnotetext[1]{Distance from fiber 105.} \footnotetext[2]{An estimate of on-bench performance. See Equation \ref{GPtesting:eq:T_FRD}.} \end{longtable} \end{center} \renewcommand{\thefootnote}{\arabic{footnote}} """) def do_single(folder): print "Looking for " + '{}/*metrics.txt'.format(folder) mfile = glob('{}/*metrics.txt'.format(folder))[0] print "found ", mfile fibnum, tput, w4, w44, w5 = np.loadtxt(mfile, usecols=(1,5,6,7,8), unpack=True) return fibnum, tput, w44, w4, w5 def convert_arcsec(val): return float(val.split('"')[0]) def main(output='Appendix/gradpak_cal_table_long.tex'): conv = {2: convert_arcsec, 3: convert_arcsec, 4:convert_arcsec} fibnum, r_arc, z_arc, rad_arc = np.loadtxt(astrotable, delimiter=';',unpack=True, usecols=(0,2,3,4), converters=conv) sidx = np.argsort(fibnum) fibnum = fibnum[sidx] r_arc = r_arc[sidx] z_arc = z_arc[sidx] rad_arc = rad_arc[sidx] sublist = glob('{}/GradPak*micron'.format(basedir)) ttfibnum = np.array([]) tput = np.array([]) w44 = np.array([]) w4 = np.array([]) w5 = np.array([]) for sub in sublist: print sub tf, tt, t44, t4, t5 = do_single(sub) ttfibnum = np.r_[ttfibnum,tf] tput = np.r_[tput,tt] w44 = np.r_[w44,t44] w4 = np.r_[w4,t4] w5 = np.r_[w5,t5] tidx = np.where(ttfibnum > 58)[0] ttfibnum[tidx] -= 1 sidx = np.argsort(ttfibnum) ttfibnum = ttfibnum[sidx] tput = tput[sidx] w44 = w44[sidx] w4 = w4[sidx] w5 = w5[sidx] idx = np.where(fibnum > 58) fibnum[idx] -= 1 #We don't like to number the broken fiber #103 because fibernums start at 1 ref_r = r_arc[104] ref_z = z_arc[104] r_arc -= ref_r z_arc -= ref_z fmt = '{:n} & '+'{:5.2f} & '*6 + '{:5.2f}' with open(output,'w') as f: write_header(f) for i in range(fibnum.size): if fibnum[i] != ttfibnum[i]: print '!!!!!!WARNING!!!!!!!!' print fibnum[i], ttfibnum[i] f.write(fmt.format(fibnum[i],r_arc[i],z_arc[i],rad_arc[i]*2,tput[i],w4[i],w44[i],w5[i])) if i != fibnum.size - 1: f.write(r'\\') f.write('\n') write_end(f) return
nilq/baby-python
python
def bisection(fun, y, xl, xr, tol, maxiter): """ The program uses the bisection method to solve the equation f(x)-y = 0 input: fun:the function(x) y : y=f(x) xl: lower bound xr: upper bound tol: tolerance maxiter: max iter return: x; solution f : residual num_iters: the count of iters """ fl = fun(xl)-y # residual for left bound fr = fun(xr)-y # residual for right bound num_iters = 0 for i in range(maxiter): num_iters += 1 # get midpoint x = 0.5*(xl + xr) # evaluate residual at midpoint f = fun(x)-y # check for convergence if (abs(f) < tol): break # reset the bounds if (f*fl < 0.0): # move right bound info to mid xr = x fr = f else: # move left bound info to mid xl = x fl = f return x, f, num_iters
nilq/baby-python
python
import sqlite3 conn = sqlite3.connect('northwind_small.sqlite3') cur = conn.cursor() top_products = cur.execute('SELECT ProductName, UnitPrice FROM Product \ ORDER BY UnitPrice DESC LIMIT 10').fetchall() print(top_products) """[('Côte de Blaye',), ('Thüringer Rostbratwurst',), ('Mishi Kobe Niku ("Sir Rodney's Marmalade",), ('Carnarvon Tigers',), ('Raclette Courdavault',), ('Manjimup Dried Apples',), ('Tarte au sucre',), ('Ipoh Coffee',), ('Rössle Sauerkraut',)] """ avg_age = cur.execute("SELECT avg(HireDate -BirthDate) \ FROM Employee").fetchall() print(avg_age[0][0]) """37.22222222222222""" supply = cur.execute("SELECT ProductName, UnitPrice, CompanyName \ FROM Product \ INNER JOIN Supplier on Supplier.Id = Product.SupplierID \ ORDER BY UnitPrice DESC LIMIT 10").fetchall() print(supply) cat = cur.execute("SELECT CategoryName, COUNT(DISTINCT Product.Id) \ FROM Product \ INNER JOIN Category on Category.Id = Product.CategoryID \ GROUP BY CategoryName \ ORDER BY COUNT(DISTINCT Product.Id) DESC \ LIMIT 1 \ ").fetchall() print(cat[0][0]) """Confections""" conn.close() # No changes so no need to commit
nilq/baby-python
python
from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext ext_modules = [ Extension("NVEnc", ["NVEnc.py"]), Extension("QSVEnc", ["QSVEnc.py"]), Extension("StaxRip", ["StaxRip.py"]), ] install_requires=[ 'requests', 'tqdm', 'beautifulsoup4', 'cython', 'win32api' 'psutil' ] setup( name = 'Update', version = '0.2', cmdclass = {'build_ext': build_ext}, ext_modules = ext_modules, platforms = 'Windows_x86_x64', requires = install_requires, )
nilq/baby-python
python
# coding=utf-8 import logging import os import scrapy from scrapy.exceptions import DropItem from scrapy.pipelines.files import FilesPipeline from .folder_path import get_file_size import settings as project_settings from items import AppDetail from utils import cal_file_hash from database import Database from pipelines.folder_path import get_app_folder class ApkDownloadPipeline(FilesPipeline): logger = logging.getLogger("ApkDownloadPipeline") def __init__(self, store_uri, download_func=None, settings=None): super(ApkDownloadPipeline, self).__init__(store_uri, download_func, settings) self.db_handler = Database() def get_media_requests(self, item: AppDetail, info): app_folder = get_app_folder(item) download_link = item['download_link'] apk_name = item['apk_name'] file_path = os.path.join(app_folder, apk_name) if item['market'] == "github_opensource": file_path += ".zip" elif not file_path.endswith('.apk'): file_path += '.apk' file_path = os.path.relpath(file_path, project_settings.FILES_STORE) if not self.db_handler.get_update_status(item['update_id']): yield scrapy.Request(download_link, meta={'file_path': file_path}) else: raise DropItem("Apk File {} exists.".format(download_link)) def file_path(self, request, response=None, info=None, *, item=None): return request.meta['file_path'] def item_completed(self, results, item: AppDetail, info): if results[0][0]: # download successfully self.logger.info("Download app '{}' version '{}' from market '{}' successfully.".format(item['app_title'], item['version'], item['market'])) apk_path = results[0][1]['path'] apk_path = os.path.join(project_settings.FILES_STORE, apk_path) apk_size = get_file_size(apk_path) apk_hash = cal_file_hash(apk_path) self.db_handler.set_update_available(item['update_id'], apk_size, apk_hash) return item else: # download fail self.logger.error("Fail to Download app '{}' version '{}' from market '{}'.".format(item['app_title'], item['version'], item['market'])) return item
nilq/baby-python
python
from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, MaxPool2D from keras.layers import Activation, Dropout, Flatten, Dense from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint, Callback from keras import optimizers from skimage import exposure import numpy as np from matplotlib import pyplot as plt from IPython.display import clear_output #CONFIG======= batch_size= 10 image_width = 213 image_height = 180 #============= class PlotLearning(Callback): def on_train_begin(self, logs={}): self.i = 0 self.x = [] self.losses = [] self.val_losses = [] self.acc = [] self.val_acc = [] self.fig = plt.figure() self.logs = [] def on_epoch_end(self, epoch, logs={}): self.logs.append(logs) self.x.append(self.i) self.losses.append(logs.get('loss')) self.val_losses.append(logs.get('val_loss')) self.acc.append(logs.get('acc')) self.val_acc.append(logs.get('val_acc')) self.i += 1 if (epoch % 10 == 0): f, (ax1, ax2) = plt.subplots(1, 2, sharex=True) clear_output(wait=True) ax1.set_yscale('log') ax1.plot(self.x, self.losses, label="loss") ax1.plot(self.x, self.val_losses, label="val_loss") ax1.legend() ax2.plot(self.x, self.acc, label="accuracy") ax2.plot(self.x, self.val_acc, label="validation accuracy") ax2.legend() plt.show(); plot = PlotLearning() checkpoint = ModelCheckpoint('./model.h5', monitor='val_acc', verbose=1, save_best_only=False, mode='max') callbacks_list = [checkpoint, plot] train_data_dir = './data/train/' val_data_dir = './data/val/' model = Sequential() model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu', input_shape = (image_width,image_height,1))) model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(1, activation = "sigmoid")) adam = optimizers.Adam(lr=0.00009, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) #Train and val data augmentors train_datagen = ImageDataGenerator ( rescale=1./255, fill_mode='nearest' ) val_datagen = ImageDataGenerator( rescale=1./255, fill_mode='nearest' ) #Generators for TRAIN and val train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(image_width, image_height), batch_size=batch_size, shuffle=True, color_mode='grayscale', class_mode='binary' ) val_generator = val_datagen.flow_from_directory( val_data_dir, target_size=(image_width, image_height), batch_size=batch_size, shuffle=True, color_mode='grayscale', class_mode='binary' ) model.fit_generator( train_generator, epochs=150, shuffle=True, callbacks=callbacks_list, validation_data=val_generator, )
nilq/baby-python
python
''' Created on Mar 26, 2014 @author: Simon ''' from datahandler.abstract_statistics import AbstractStatistics class ImageStats(AbstractStatistics): ''' Image statistics ''' def __init__(self): pass def encode(self): return [] def decode(self, encoded_stats): return []
nilq/baby-python
python
# This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. """ Save images to output files. **Plugin Type: Global** ``SaveImage`` is a global plugin. Only one instance can be opened. **Usage** This global plugin is used to save any changes made in Ginga back to output images. For example, a mosaic image that was created by the ``Mosaic`` plugin. Currently, only FITS images (single or multiple extensions) are supported. Given the output directory (e.g., ``/mypath/outputs/``), a suffix (e.g., ``ginga``), an image channel (``Image``), and a selected image (e.g., ``image1.fits``), the output file will be ``/mypath/outputs/image1_ginga_Image.fits``. Inclusion of the channel name is optional and can be omitted using plugin configuration file, ``plugin_SaveImage.cfg``. The modified extension(s) will have new header or data extracted from Ginga, while those not modified will remain untouched. Relevant change log entries from the ``ChangeHistory`` global plugin will be inserted into the history of its ``PRIMARY`` header. .. note:: This plugin uses the module ``astropy.io.fits`` to write the output images, regardless of what is chosen for ``FITSpkg`` in the ``general.cfg`` configuration file. """ # STDLIB import os import shutil # THIRD-PARTY import astropy from astropy.io import fits from astropy.utils.introspection import minversion # GINGA from ginga.GingaPlugin import GlobalPlugin from ginga.gw import Widgets from ginga.misc import Bunch from ginga.util.iohelper import shorten_name try: from ginga.gw.GwHelp import DirectorySelection except ImportError: # This is needed for RTD to build pass __all__ = ['SaveImage'] class SaveImage(GlobalPlugin): def __init__(self, fv): # superclass defines some variables for us, like logger super(SaveImage, self).__init__(fv) # Image listing self.columns = [('Image', 'IMAGE'), ('Mod. Ext.', 'MODEXT')] # User preferences. Some are just default values and can also be # changed by GUI. prefs = self.fv.get_preferences() self.settings = prefs.create_category('plugin_SaveImage') self.settings.add_defaults(output_directory='.', output_suffix='ginga', include_chname=True, clobber=False, modified_only=True, max_mosaic_size=1e8, max_rows_for_col_resize=5000) self.settings.load(onError='silent') self.outdir = os.path.abspath( self.settings.get('output_directory', '.')) self.suffix = self.settings.get('output_suffix', 'ginga') self.fv.add_callback('add-image', lambda *args: self.redo()) self.fv.add_callback('remove-image', lambda *args: self.redo()) self.fv.add_callback('add-channel', lambda *args: self.update_channels()) self.fv.add_callback('delete-channel', lambda *args: self.update_channels()) self.chnames = [] self.chname = None self.gui_up = False def build_gui(self, container): """Build GUI such that image list area is maximized.""" vbox, sw, orientation = Widgets.get_oriented_box(container) captions = (('Channel:', 'label', 'Channel Name', 'combobox', 'Modified only', 'checkbutton'), ) w, b = Widgets.build_info(captions, orientation=orientation) self.w.update(b) b.channel_name.set_tooltip('Channel for locating images to save') b.channel_name.add_callback('activated', self.select_channel_cb) mod_only = self.settings.get('modified_only', True) b.modified_only.set_state(mod_only) b.modified_only.add_callback('activated', lambda *args: self.redo()) b.modified_only.set_tooltip("Show only locally modified images") container.add_widget(w, stretch=0) captions = (('Path:', 'llabel', 'OutDir', 'entry', 'Browse', 'button'), ('Suffix:', 'llabel', 'Suffix', 'entry')) w, b = Widgets.build_info(captions, orientation=orientation) self.w.update(b) b.outdir.set_text(self.outdir) b.outdir.set_tooltip('Output directory') b.outdir.add_callback('activated', lambda w: self.set_outdir()) b.browse.set_tooltip('Browse for output directory') b.browse.add_callback('activated', lambda w: self.browse_outdir()) b.suffix.set_text(self.suffix) b.suffix.set_tooltip('Suffix to append to filename') b.suffix.add_callback('activated', lambda w: self.set_suffix()) container.add_widget(w, stretch=0) self.treeview = Widgets.TreeView(auto_expand=True, sortable=True, selection='multiple', use_alt_row_color=True) self.treeview.setup_table(self.columns, 1, 'IMAGE') self.treeview.add_callback('selected', self.toggle_save_cb) container.add_widget(self.treeview, stretch=1) captions = (('Status', 'llabel'), ) w, b = Widgets.build_info(captions, orientation=orientation) self.w.update(b) b.status.set_text('') b.status.set_tooltip('Status message') container.add_widget(w, stretch=0) btns = Widgets.HBox() btns.set_border_width(4) btns.set_spacing(3) btn = Widgets.Button('Save') btn.set_tooltip('Save selected image(s)') btn.add_callback('activated', lambda w: self.save_images()) btn.set_enabled(False) btns.add_widget(btn, stretch=0) self.w.save = btn btn = Widgets.Button('Close') btn.add_callback('activated', lambda w: self.close()) btns.add_widget(btn, stretch=0) btn = Widgets.Button("Help") btn.add_callback('activated', lambda w: self.help()) btns.add_widget(btn, stretch=0) btns.add_widget(Widgets.Label(''), stretch=1) container.add_widget(btns, stretch=0) self.gui_up = True # Initialize directory selection dialog self.dirsel = DirectorySelection(self.fv.w.root.get_widget()) # Generate initial listing self.update_channels() def instructions(self): self.tw.set_text("""Enter output directory and suffix, if different than default. Left click to select image name to save. Multiple images can be selected using click with Shift or CTRL key. Click Save to save the selected image(s). Output image will have the filename of <inputname>_<suffix>.fits.""") def redo(self, *args): """Generate listing of images that user can save.""" if not self.gui_up: return mod_only = self.w.modified_only.get_state() treedict = Bunch.caselessDict() self.treeview.clear() self.w.status.set_text('') channel = self.fv.get_channel(self.chname) if channel is None: return # Only list modified images for saving. Scanning Datasrc is enough. if mod_only: all_keys = channel.datasrc.keys(sort='alpha') # List all images in the channel. else: all_keys = channel.get_image_names() # Extract info for listing and saving for key in all_keys: iminfo = channel.get_image_info(key) path = iminfo.get('path') idx = iminfo.get('idx') t = iminfo.get('time_modified') if path is None: # Special handling for generated buffer, eg mosaic infile = key is_fits = True else: infile = os.path.basename(path) infile_ext = os.path.splitext(path)[1] infile_ext = infile_ext.lower() is_fits = False if 'fit' in infile_ext: is_fits = True # Only list FITS files unless it is Ginga generated buffer if not is_fits: continue # Only list modified buffers if mod_only and t is None: continue # More than one ext modified, append to existing entry if infile in treedict: if t is not None: treedict[infile].extlist.add(idx) elist = sorted(treedict[infile].extlist) treedict[infile].MODEXT = ';'.join( map(self._format_extname, elist)) # Add new entry else: if t is None: s = '' extlist = set() else: s = self._format_extname(idx) extlist = set([idx]) treedict[infile] = Bunch.Bunch( IMAGE=infile, MODEXT=s, extlist=extlist, path=path) self.treeview.set_tree(treedict) # Resize column widths n_rows = len(treedict) if n_rows == 0: self.w.status.set_text('Nothing available for saving') elif n_rows < self.settings.get('max_rows_for_col_resize', 5000): self.treeview.set_optimal_column_widths() self.logger.debug('Resized columns for {0} row(s)'.format(n_rows)) def update_channels(self): """Update the GUI to reflect channels and image listing. """ if not self.gui_up: return self.logger.debug("channel configuration has changed--updating gui") try: channel = self.fv.get_channel(self.chname) except KeyError: channel = self.fv.get_channel_info() if channel is None: raise ValueError('No channel available') self.chname = channel.name w = self.w.channel_name w.clear() self.chnames = list(self.fv.get_channel_names()) #self.chnames.sort() for chname in self.chnames: w.append_text(chname) # select the channel that is the current one try: i = self.chnames.index(channel.name) except IndexError: i = 0 self.w.channel_name.set_index(i) # update the image listing self.redo() def select_channel_cb(self, w, idx): self.chname = self.chnames[idx] self.logger.debug("channel name changed to '%s'" % (self.chname)) self.redo() def _format_extname(self, ext): """Pretty print given extension name and number tuple.""" if ext is None: outs = ext else: outs = '{0},{1}'.format(ext[0], ext[1]) return outs def browse_outdir(self): """Browse for output directory.""" self.dirsel.popup( 'Select directory', self.w.outdir.set_text, initialdir=self.outdir) self.set_outdir() def set_outdir(self): """Set output directory.""" dirname = self.w.outdir.get_text() if os.path.isdir(dirname): self.outdir = dirname self.logger.debug('Output directory set to {0}'.format(self.outdir)) else: self.w.outdir.set_text(self.outdir) self.logger.error('{0} is not a directory'.format(dirname)) def set_suffix(self): """Set output suffix.""" self.suffix = self.w.suffix.get_text() self.logger.debug('Output suffix set to {0}'.format(self.suffix)) def _write_history(self, pfx, hdu, linechar=60, indentchar=2): """Write change history to given HDU header. Limit each HISTORY line to given number of characters. Subsequent lines of the same history will be indented. """ channel = self.fv.get_channel(self.chname) if channel is None: return history_plgname = 'ChangeHistory' try: history_obj = self.fv.gpmon.getPlugin(history_plgname) except Exception: self.logger.error( '{0} plugin is not loaded. No HISTORY will be written to ' '{1}.'.format(history_plgname, pfx)) return if channel.name not in history_obj.name_dict: self.logger.error( '{0} channel not found in {1}. No HISTORY will be written to ' '{2}.'.format(channel.name, history_plgname, pfx)) return file_dict = history_obj.name_dict[channel.name] chistory = [] ind = ' ' * indentchar # NOTE: List comprehension too slow! for key in file_dict: if not key.startswith(pfx): continue for bnch in file_dict[key].values(): chistory.append('{0} {1}'.format(bnch.MODIFIED, bnch.DESCRIP)) # Add each HISTORY prettily into header, sorted by timestamp for s in sorted(chistory): for i in range(0, len(s), linechar): subs = s[i:i + linechar] if i > 0: subs = ind + subs.lstrip() hdu.header.add_history(subs) def _write_header(self, image, hdu): """Write header from image object to given HDU.""" hduhdr = hdu.header # Ginga image header object for the given extension only. # Cannot use get_header() because that might also return PRI hdr. ghdr = image.metadata['header'] for key in ghdr: # Need this to avoid duplication because COMMENT is a weird field if key.upper() == 'COMMENT': continue bnch = ghdr.get_card(key) # Insert new keyword if key not in hduhdr: hduhdr[key] = (bnch.value, bnch.comment) # Update existing keyword elif hduhdr[key] != bnch.value: hduhdr[key] = bnch.value def _write_mosaic(self, key, outfile): """Write out mosaic data (or any new data generated within Ginga) to single-extension FITS. """ maxsize = self.settings.get('max_mosaic_size', 1e8) # Default 10k x 10k channel = self.fv.get_channel(self.chname) image = channel.datasrc[key] # Prevent writing very large mosaic if (image.width * image.height) > maxsize: s = 'Mosaic too large to be written {0}'.format(image.shape) self.w.status.set_text(s) self.logger.error(s) return # Insert mosaic data and header into output HDU hdu = fits.PrimaryHDU(image.get_data()) self._write_header(image, hdu) # Write history to PRIMARY self._write_history(key, hdu) # Write to file if minversion(astropy, '1.3'): hdu.writeto(outfile, overwrite=True) else: hdu.writeto(outfile, clobber=True) def _write_mef(self, key, extlist, outfile): """Write out regular multi-extension FITS data.""" channel = self.fv.get_channel(self.chname) with fits.open(outfile, mode='update') as pf: # Process each modified data extension for idx in extlist: k = '{0}[{1}]'.format(key, self._format_extname(idx)) image = channel.datasrc[k] # Insert data and header into output HDU pf[idx].data = image.get_data() self._write_header(image, pf[idx]) # Write history to PRIMARY self._write_history(key, pf['PRIMARY']) def toggle_save_cb(self, w, res_dict): """Only enable saving if something is selected.""" if len(res_dict) > 0: self.w.save.set_enabled(True) else: self.w.save.set_enabled(False) def save_images(self): """Save selected images. This uses Astropy FITS package to save the outputs no matter what user chose to load the images. """ res_dict = self.treeview.get_selected() clobber = self.settings.get('clobber', False) self.treeview.clear_selection() # Automatically disables Save button # If user gives empty string, no suffix. if self.suffix: sfx = '_' + self.suffix else: sfx = '' # Also include channel name in suffix. This is useful if user likes to # open the same image in multiple channels. if self.settings.get('include_chname', True): sfx += '_' + self.chname # Process each selected file. Each can have multiple edited extensions. for infile in res_dict: f_pfx = os.path.splitext(infile)[0] # prefix f_ext = '.fits' # Only FITS supported oname = f_pfx + sfx + f_ext outfile = os.path.join(self.outdir, oname) self.w.status.set_text( 'Writing out {0} to {1} ...'.format(shorten_name(infile, 10), shorten_name(oname, 10))) self.logger.debug( 'Writing out {0} to {1} ...'.format(infile, oname)) if os.path.exists(outfile) and not clobber: self.logger.error('{0} already exists'.format(outfile)) continue bnch = res_dict[infile] if bnch.path is None or not os.path.isfile(bnch.path): self._write_mosaic(f_pfx, outfile) else: shutil.copyfile(bnch.path, outfile) self._write_mef(f_pfx, bnch.extlist, outfile) self.logger.info('{0} written'.format(outfile)) self.w.status.set_text('Saving done, see log') def close(self): self.fv.stop_global_plugin(str(self)) def start(self): self.resume() def resume(self): # turn off any mode user may be in try: self.modes_off() except AttributeError: pass self.fv.show_status('Press "Help" for instructions') def stop(self): self.gui_up = False self.fv.show_status('') def __str__(self): """ This method should be provided and should return the lower case name of the plugin. """ return 'saveimage' # Append module docstring with config doc for auto insert by Sphinx. from ginga.util.toolbox import generate_cfg_example # noqa if __doc__ is not None: __doc__ += generate_cfg_example('plugin_SaveImage', package='ginga') # END
nilq/baby-python
python
from math import inf,nan from ursina import * from numpy import dot,cross from hit_info import HitInfo #fix bug where ray starts right from face boundary class voxelcaster(): def __init__(self,chunks,size=16): self.chunks=chunks self.size=size self.cubeTemplate=[[[0,0,0],[0,0,1],[1,0,0]],[[0,0,0],[1,0,0],[1,1,0]],[[0,0,0],[0,0,1],[0,1,1]],[[1,0,0],[1,0,1],[1,1,1]],[[0,0,1],[0,1,1],[1,1,1]],[[0,1,0],[1,1,0],[1,1,1]]] self.faceNormals=[[0,-1,0],[0,0,-1],[-1,0,0],[1,0,0],[0,0,1],[0,1,0]] def voxelcast(self,origin,direction,maxDistance=inf,debug=False): origin=Vec3(*origin) direction=Vec3(*direction) #position=Vec3(*origin) point=origin normal=Vec3(0,1,0) oldNormal=None currentDistance=0 currentWorldCube=Vec3(origin[0]//1,origin[1]//1,origin[2]//1) #print(direction) while currentDistance < maxDistance: cubeType,currentChunk,currentCube=self.getCube(currentWorldCube) #print(cubeType) if cubeType != "a" and cubeType != None: return self.createHitInfo(hit=True,point=point,normal=-normal,currentChunk=currentChunk,currentCube=currentCube,cubeType=cubeType,distance=currentDistance)### else: error=True for i in range(6): start=Vec3(self.cubeTemplate[i][0][0],self.cubeTemplate[i][0][1],self.cubeTemplate[i][0][2])+currentWorldCube normal=Vec3(self.faceNormals[i][0],self.faceNormals[i][1],self.faceNormals[i][2]) divider=dot(direction,self.faceNormals[i]) if divider != 0: scalar=(dot(start,self.faceNormals[i])-dot(origin,self.faceNormals[i]))/divider #print(scalar) if scalar != nan and scalar != inf and scalar >=0: point=Vec3(origin+scalar*direction) if debug: e=Entity(model="cube", scale=0.1,position=point) destroy(e,delay=1) e.fade_out(duration=1) relPoint=point-currentWorldCube #print(relPoint) ##print(oldPoint,point) ######switch to basing it off face rather than old point/new point to reduce issues with floating point arithmetic if relPoint[0] >=0 and relPoint[0] <=1 and relPoint[1] >=0 and relPoint[1] <=1 and relPoint[2] >=0 and relPoint[2] <=1 and oldNormal != -normal and scalar >=0: ##print(oldPoint,point) oldNormal=normal currentWorldCube=currentWorldCube+normal currentDistance=distance(origin,point) ##print(currentDistance) error=False break if error: print("breaking") #print(0/0) break return self.createHitInfo()### def createHitInfo(self,hit=False,point=None,normal=None,currentChunk=None,currentCube=None,cubeType=None,distance=None): hit=HitInfo(hit=hit) hit.point=point hit.normal=normal hit.currentChunk=currentChunk hit.currentCube=currentCube hit.cubeType=cubeType hit.distance=distance return hit def getCube(self,position): currentChunk=Vec3(0,0,0) currentCube=Vec3(0,0,0) for i in range(3): currentChunk[i]=round(position[i]//self.size * self.size) currentCube[i]=round(position[i] % self.size) try: chunkArray=self.getChunkArray(currentChunk) return chunkArray[round(currentCube[0])][round(currentCube[1])][round(currentCube[2])],currentChunk,currentCube except Exception as e: ## #print(e) return "b",None,None def getChunkArray(self,chunk):## return self.chunks[str(round(chunk[0]))+":"+str(round(chunk[1]))+":"+str(round(chunk[2]))].chunkArray if __name__ == "__main__": from worldGeneration import chunkGenerator from chunks import voxelChunk import random app=Ursina() Texture.default_filtering = None Sky() generator=chunkGenerator(seed=21) count=0 chunksDict={} caster=voxelcaster(chunks=chunksDict) for i in range(1): for j in range(4): for k in range(1): count+=1 print("\n"*10+"▓"*round(count/2)+"░"*round((256-count)/2)) x,y,z=i*16,j*16,k*16 chunk=generator.generateChunkArrayNew(position=Vec3(x,y,z)) chunk=voxelChunk(position=Vec3(x,y,z),chunkArray=chunk) chunk.buildChunk() chunksDict[str(x)+":"+str(y)+":"+str(z)]=chunk #print("hit start") for i in range(100): ##print(i) hitTest=caster.voxelcast(origin=Vec3(random.randint(0,1599)/100,50,random.randint(0,1599)/100),direction=Vec3(0,-1,0),maxDistance=50) #print(hitTest.currentChunk) #print(hitTest.currentCube) #print(hitTest.normal) Entity(model="cube",scale=0.1,position=hitTest.point,color=color.black) #print("hit end") EditorCamera() """ pivot=Entity(rotation_z=0,rotation_x=30,rotation_y=0,y=32) s=DirectionalLight(scale=-30, shadows=False) s._light.show_frustum() """ sun = DirectionalLight(y=10, rotation=(90+40,45,0)) #sun._light.show_frustum() sun._light.set_shadow_caster(True, 4096, 4096) #sun._light.show_frustum() # sun._light.set_shadow_caster(True, 4096, 4096) #bmin, bmax = scene.get_tight_bounds(chunk) lens = sun._light.get_lens() lens.set_near_far(0, 10) # lens.set_film_offset((bmin.xy + bmax.xy) * .5) lens.set_film_size(0) window.fullscreen=True app.run()
nilq/baby-python
python
# # Copyright (C) 2015, Stanislaw Adaszewski # s.adaszewski@gmail.com # http://algoholic.eu # # License: 2-clause BSD # from markdown import Extension from markdown.blockprocessors import BlockProcessor from markdown.treeprocessors import Treeprocessor from markdown.util import etree, AtomicString import numpy as np from collections import defaultdict import re from markdown.inlinepatterns import Pattern from markdown.preprocessors import Preprocessor _captions = {} class FigureExtension(Extension): def extendMarkdown(self, md, md_globals): md.inlinePatterns.add('figref', FigRefPattern(r'\[([A-Za-z]+ [0-9]+)\]', md), '<emphasis') # md.inlinePatterns.add('fig', FigPattern(r'^((Figure|Table|Listing) ([0-9]+))\. (.+)', md), '<emphasis') md.parser.blockprocessors.add('figure', FigureProcessor(md.parser), '<hashheader') md.treeprocessors.add('figure', FigureTreeProcessor(md), '<prettify') # raise ValueError(md.preprocessors) # md.preprocessors.add('figure', FigPreproc(md), '<html_block') def makeExtension(configs={}): return FigureExtension(configs=configs) class FigPreproc(Preprocessor): def run(self, lines): new_lines = [] in_caption = False for line in lines: m = re.match(r'((Table|Figure|Listing) ([0-9]+))\.', line) if m is not None: new_lines.append(u'<div class="figcaption">') new_lines.append(u'') in_caption = True if line == '' and in_caption: # raise ValueError('Here') new_lines.append('') new_lines.append(u'</div>') in_caption = False new_lines.append(line) # raise ValueError(new_lines) return new_lines class FigRefPattern(Pattern): def handleMatch(self, m): hash = m.group(2).lower().replace(' ', '_') a = etree.Element('a') a.set('href', '#figref_%s' % hash) a.text = AtomicString('[%s]' % m.group(2)) return a class FigPattern(Pattern): def handleMatch(self, m): caption = m.group(5).strip() # block[m.span()[1]:].strip() # raise ValueError(caption) # p = etree.Element('p') a = etree.Element('a') # raise ValueError(m.group(0)) hash = m.group(2).lower().replace('.','').replace(' ', '_') a.set('name', 'figref_%s' % hash) a.set('class', 'figcaption') a.text = '%s. %s' % (m.group(2), caption) # raise ValueError(a.text) return a class FigureProcessor(BlockProcessor): def test(self, parent, block): return re.match(r'^[A-Za-z]+ [0-9]+\.', block) is not None def run(self, parent, blocks): block = blocks.pop(0) m = re.match(r'[A-Za-z]+ [0-9]+\.', block) caption = block[m.span()[1]:].strip() p = etree.SubElement(parent, 'p') a = etree.SubElement(p, 'a') hash = m.group(0).lower().replace('.','').replace(' ', '_') a.set('name', 'figref_%s' % hash) # a.set('class', 'figcaption') # bold = etree.SubElement(a, 'b') # bold.text = m.group(0) # regular = etree.SubElement(a, 'span') # regular.text = caption a.text = '%s %s' % (m.group(0), caption) _captions[a.get('name')] = caption # import sys def stringify(el): Q = [el] ret = '' # raise ValueError(el[0][0].text) while len(Q) > 0: el = Q.pop(0) for ch in el: Q.append(ch) if el.text is not None: ret += el.text if el.tail is not None: ret += el.tail return ret class FigureTreeProcessor(Treeprocessor): def run(self, root, M={}): # print 'Running...', dir(self) hdrtags = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6'] cnt = defaultdict(lambda: 0) # M = {} '''Q = [root] test = u'' while len(Q) > 0: el = Q.pop(0) for ch in el: Q.append(ch) #if el.tail is not None: # dummy = etree.Element('dummy') # dummy.text = el.tail # Q.append(dummy) if el.text is not None: test += el.text if el.tail is not None: test += el.tail # print (el) print test.encode('utf-8') # raise ValueError(test[:50])''' Q = [root] hdrcnt = [] active = False nmbrs = '' L = defaultdict(lambda: []) while len(Q) > 0: el = Q.pop(0) for ch in el: Q.append(ch) if el.tag == 'p' and el.text == 'CONTENT-START': active = True elif el.tag == 'p' and el.text == 'CONTENT-END': active = False nmbrs = '' elif active and el.tag in hdrtags: lvl = int(el.tag[1]) # lvl = min(lvl, 3) if lvl <= 3: hdrcnt = hdrcnt[0:lvl] if len(hdrcnt) == lvl: hdrcnt[-1] += 1 else: hdrcnt += [1] nmbrs = '.'.join(map(str, hdrcnt)) + '.' # name = el.get('name') name = None if el.tag == 'p' and len(el)>0 and el[0].tag == 'a': name = el[0].get('name') # raise ValueError(name) if name is not None and name.startswith('figref_'): # raise ValueError(dir(el)) # raise ValueError(name) type_ = name.split('_')[1] if type_ == 'figure': el.set('class', 'figcaption_img') elif type_ == 'algorithm': el.set('class', 'figcaption_algo') else: el.set('class', 'figcaption') title = '.'.join(el[0].text.split('.')[1:]) # raise ValueError(title) if name not in M: cnt[nmbrs + type_] += 1 M[name] = nmbrs + str(cnt[nmbrs + type_]) L[type_].append({'href': '#' + name, 'el': el, 'text': type_[0].upper() + type_[1:] + ' ' + M[name] + '.' + ''.join(stringify(el).split('.')[1])}) # + _captions[name]}) # raise ValueError(len(type_)) span = etree.Element('span') el[0].insert(0, span) span2 = etree.SubElement(span, 'span') span2.text = type_[0].upper() + type_[1:] + ' ' + M[name] + '. ' span2.tail = title el[0].text = '' md = self.markdown this = self def rewrite_self_references(txt): fr = md.inlinePatterns['figref'] rx = fr.getCompiledRegExp() # matches = rx.findall(txt) while True: match = rx.match(txt) if match is not None: pos = match.start(2) endpos = match.end(2) # raise ValueError(match.end(2)) a = fr.handleMatch(match) this.run(a, M) txt = txt[0:pos] + a.text[1:-1] + txt[endpos:] else: break return txt Q = [(-1, None, root)] insert_cnt = defaultdict(lambda : 0) while len(Q) > 0: (idx, parent, el) = Q.pop(0) # print 'Here', el cnt = 0 for ch in el: Q.append((cnt, el, ch)) cnt += 1 href = el.get('href') if el.tag == 'a' and href is not None and href.startswith('#figref_'): type_ = href.split('_')[1] #if href in M: # pass #else: # cnt[type_] += 1 # M[href] = str(cnt[type_]) el.text = '%s %s' % (type_[0].upper() + type_[1:], M[href[1:]]) elif el.tag == 'p' and el.text is not None and el.text.startswith('LIST-OF-'): type_ = el.text[8:-1].lower() # raise ValueError(L['figure']) for fig in L[type_]: p = etree.Element('p') p.set('style', 'text-align: left; width: 75%;') # p.text = fig['text'] a = etree.SubElement(p, 'a') a.set('href', fig['href']) # a.set('style', 'color: white; font-size: 1px; height: 1el; display: block-inline;') a.text = rewrite_self_references(fig['text']) # ' + fig['text'][:20] self.markdown.treeprocessors['myreferences'].run(p) parent.insert(insert_cnt[parent] + idx, p) insert_cnt[parent] += 1 # Q.append((-1, parent, p)) parent.remove(el)
nilq/baby-python
python
""" zoom.snippets """ import zoom import zoom.html as h class SystemSnippet(zoom.utils.Record): """SystemSnippet A chunk of text (usually HTML) that can be rendered by placing the {{snippet}} tag in a document or template. >>> db = zoom.database.setup_test() >>> snippets = get_snippets(db) >>> snippets.delete(name='test') >>> snippets.find(name='test') [] >>> t = snippets.put(Snippet(name='test', body='some text')) >>> snippets.find(name='test') [<SystemSnippet {'key': 'test', 'name': 'test', 'url': '/content/snippets/test', 'body': 'some text', 'link': '<a href="/content/snippets/test">test</a>'}>] """ @property def link(self): """Return a link""" return h.a(self.name, href=self.url) @property def url(self): return '/content/snippets/' + self.key @property def key(self): return zoom.utils.id_for(self.name) def allows(self, user, action): """Item level policy""" return True Snippet = SystemSnippet def snippet(name, default='', variant=None): snippets = get_snippets() snippet = snippets.first(name=name, variant=variant) if snippet: snippet['impressions'] = snippet.get('impressions', 0) + 1 snippets.put(snippet) result = snippet.body else: result = default return result def get_snippets(db=None): return zoom.store_of(Snippet, db=db)
nilq/baby-python
python
# -*- coding: utf-8 -*- # from flask import Flask, Blueprint, make_response, jsonify, request from flask.ext.bcrypt import check_password_hash from app import db, app, return_response # # Import module models (i.e. User) from app.mod_user.models import User # Define the blueprint: 'auth', set its url prefix: app.url/auth mod_index = Blueprint('index', __name__, url_prefix='/') @mod_index.route("login/", methods=['POST']) def login(): print request.json username = request.json.get("username") password = request.json.get("password") user = User.query.filter_by(username = username).first() if user is None or not check_password_hash(user.password, password): return return_response(400, "Wrong input") #return jsonify({'error':'wronginput'}), 400 # Return token key to user return return_response(200, "OK", {'token':user.generate_token_key()}) #return jsonify({'toke':user.generate_token_key()}) # Register new user @mod_index.route("register/", methods=['POST']) def new_user(): username = request.json.get('username') email = request.json.get('email') password = request.json.get('password') print username,email,password if username is "" or email is "" or password is "": return return_response(400, "Missing properties") if username is None or email is None or password is None: return return_response(400, "Missing properties") alreadyRegisterd = User.query.filter_by(username = username).first() if alreadyRegisterd is not None: return return_response(400, "User exist") newUser = User(username, email, password) db.session.add(newUser) db.session.commit() # Return that the user was created but he return return_response(201, "OK", {'result':'User created'})
nilq/baby-python
python
import os from unittest import mock import pytest import requests_mock from ewtwitterbot.imagery import get_quote_image from ewtwitterbot.mastodon_bot import ( MastodonConfigurationError, MastodonMediaError, get_credentials_from_environ, get_last_toot_id, respond_to_toots, save_last_toot_id, upload_image_and_description, ) @pytest.fixture def save_a_toot_id(): with open("test_last_toot.txt", "w") as f: f.write(str(14)) def test_retrieve_last_toot_id_saved(save_a_toot_id): assert get_last_toot_id("test_last_toot.txt") == 14 def test_save_toot_id(): if os.path.exists("test_last_toot.txt"): os.remove("test_last_toot.txt") save_last_toot_id(40, "test_last_toot.txt") assert os.path.exists("test_last_toot.txt") assert get_last_toot_id("test_last_toot.txt") == 40 def test_retrieve_nonexistent_tweet_id(): if os.path.exists("test_last_toot.txt"): os.remove("test_last_toot.txt") assert get_last_toot_id("test_last_toot.txt") == 1 def test_mastodon_configuration_checks(): names_to_remove = [ "MASTODON_CLIENT_SECRET_FILE", "MASTODON_USER_SECRET_FILE", "MASTODON_API_BASE_URL", ] modified_environ = {k: v for k, v in os.environ.items() if k not in names_to_remove} with mock.patch.dict(os.environ, modified_environ, clear=True): with pytest.raises(MastodonConfigurationError): get_credentials_from_environ() @pytest.fixture def mastodon_environ_patch(): return { "MASTODON_API_BASE_URL": "https://botsin.space", "MASTODON_CLIENT_SECRET_FILE": "test_ewbot_clientcred.secret", "MASTODON_USER_SECRET_FILE": "test_ewbot_usercred.secret", } def test_mastodon_media_upload_success(mastodon_environ_patch): with mock.patch.dict(os.environ, mastodon_environ_patch, clear=False): with requests_mock.Mocker() as m: m.post( "https://botsin.space/api/v1/media", status_code=200, json={ "id": "234567", "type": "image", "url": "https://files.botsin.space/media_attachments/files/022/033/641/original/quote_image.png", "preview_url": "https://files.botsin.space/media_attachments/files/022/033/641/small/quote_image.png", # noqa: E501 "remote_url": None, "text_url": "https://botsin.space/media/4Zj6ewxzzzDi0g8JnZQ", "meta": { "focus": {"x": -0.69, "y": 0.42}, "original": { "width": 640, "height": 480, "size": "640x480", "aspect": 1.3333333333333333, }, "small": { "width": 461, "height": 346, "size": "461x346", "aspect": 1.3323699421965318, }, }, "description": "test uploaded via api", "blurhash": "UFBWY:8_0Jxv4mx]t8t64.%M-:IUWGWAt6M}", }, ) get_quote_image("Hi There") assert ( upload_image_and_description( get_credentials_from_environ(), "quote_image.png", alt_text="Hi there", ) == 234567 ) def test_media_upload_error(mastodon_environ_patch): with mock.patch.dict(os.environ, mastodon_environ_patch, clear=False): with requests_mock.Mocker() as m: m.post( "https://botsin.space/api/v1/media", status_code=200, json={ "id": "234567", "type": "unknown", "url": "https://files.botsin.space/media_attachments/files/022/033/641/original/quote_image.png", "preview_url": "https://files.botsin.space/media_attachments/files/022/033/641/small/quote_image.png", # noqa: E501 "remote_url": None, "text_url": "https://botsin.space/media/4Zj6ewxzzzDi0g8JnZQ", "meta": { "focus": {"x": -0.69, "y": 0.42}, "original": { "width": 640, "height": 480, "size": "640x480", "aspect": 1.3333333333333333, }, "small": { "width": 461, "height": 346, "size": "461x346", "aspect": 1.3323699421965318, }, }, "description": "test uploaded via api", "blurhash": "UFBWY:8_0Jxv4mx]t8t64.%M-:IUWGWAt6M}", }, ) get_quote_image("Hi There") with pytest.raises(MastodonMediaError): upload_image_and_description( get_credentials_from_environ(), "quote_image.png", alt_text="Hi there", ) def test_mastodon_mention_cycle(mastodon_environ_patch): with mock.patch.dict(os.environ, mastodon_environ_patch, clear=False): with requests_mock.Mocker() as m: m.post( "https://botsin.space/api/v1/media", status_code=200, json={ "id": "234567", "type": "image", "url": "https://files.botsin.space/media_attachments/files/022/033/641/original/quote_image.png", "preview_url": "https://files.botsin.space/media_attachments/files/022/033/641/small/quote_image.png", # noqa: E501 # noqa: E501 "remote_url": None, "text_url": "https://botsin.space/media/4Zj6ewxzzzDi0g8JnZQ", "meta": { "focus": {"x": -0.69, "y": 0.42}, "original": { "width": 640, "height": 480, "size": "640x480", "aspect": 1.3333333333333333, }, "small": { "width": 461, "height": 346, "size": "461x346", "aspect": 1.3323699421965318, }, }, "description": "test uploaded via api", "blurhash": "UFBWY:8_0Jxv4mx]t8t64.%M-:IUWGWAt6M}", }, ) m.get( "https://botsin.space/api/v1/notifications", status_code=200, json=[ { "id": 4772149, "type": "mention", "created_at": "2019-11-23T07:29:18.903Z", "account": { "id": 18639, "username": "andrlik", "acct": "andrlik@wandering.shop", "display_name": "Daniel Andrlik", "locked": True, "bot": False, "discoverable": True, "group": False, "created_at": "2019-11-23T07:29:18.903Z", "note": '<p>Product exec, SFF Writer, Producer and GM of the Explorers Wanted actual play podcast. </p><p><a href="https://wandering.shop/tags/ActuallyAutistic" class="mention hashtag" rel="nofollow noopener noreferrer" target="_blank">#<span>ActuallyAutistic</span></a>/ADHD, with a dash of GAD for spice.</p><p>He/him</p><p>Your mom loves me.</p><p>Location: secluded in a blanket fort</p>', # noqa: E501 "url": "https://wandering.shop/@andrlik", "avatar": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501 "avatar_static": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501 "header": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501 "header_static": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501 "followers_count": 81, "following_count": 148, "statuses_count": 869, "last_status_at": "2019-11-23T07:29:18.903Z", "emojis": [], "fields": [ { "name": "Website", "value": '<a href="https://www.andrlik.org" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">andrlik.org</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": "2022-04-29T14:58:32.014+00:00", }, { "name": "Twitter", "value": '<a href="https://twitter.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">twitter.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": None, }, { "name": "Github", "value": '<a href="https://github.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">github.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": None, }, { "name": "Podcast", "value": '<a href="https://www.explorerswanted.fm" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">explorerswanted.fm</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": None, }, ], }, "status": { "id": 108216032166128570, "created_at": "2019-11-23T07:29:18.903Z", "in_reply_to_id": None, "in_reply_to_account_id": None, "sensitive": False, "spoiler_text": "", "visibility": "public", "language": "en", "uri": "https://wandering.shop/users/andrlik/statuses/108216031335496737", "url": "https://wandering.shop/@andrlik/108216031335496737", "replies_count": 0, "reblogs_count": 0, "favourites_count": 0, "favourited": False, "reblogged": False, "muted": False, "bookmarked": False, "content": '<p><span class="h-card"><a href="https://botsin.space/@ewbot" class="u-url mention" rel="nofollow noopener noreferrer" target="_blank">@<span>ewbot</span></a></span> Quote please</p>', # noqa: E501 "reblog": None, "account": { "id": 18639, "username": "andrlik", "acct": "andrlik@wandering.shop", "display_name": "Daniel Andrlik", "locked": True, "bot": False, "discoverable": True, "group": False, "created_at": "2019-11-23T07:29:18.903Z", "note": '<p>Product exec, SFF Writer, Producer and GM of the Explorers Wanted actual play podcast. </p><p><a href="https://wandering.shop/tags/ActuallyAutistic" class="mention hashtag" rel="nofollow noopener noreferrer" target="_blank">#<span>ActuallyAutistic</span></a>/ADHD, with a dash of GAD for spice.</p><p>He/him</p><p>Your mom loves me.</p><p>Location: secluded in a blanket fort</p>', # noqa: E501 "url": "https://wandering.shop/@andrlik", "avatar": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501 "avatar_static": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501 "header": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501 "header_static": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501 "followers_count": 81, "following_count": 148, "statuses_count": 869, "last_status_at": "2019-11-23T07:29:18.903Z", "emojis": [], "fields": [ { "name": "Website", "value": '<a href="https://www.andrlik.org" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">andrlik.org</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": "2022-04-29T14:58:32.014+00:00", }, { "name": "Twitter", "value": '<a href="https://twitter.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">twitter.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": None, }, { "name": "Github", "value": '<a href="https://github.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">github.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": None, }, { "name": "Podcast", "value": '<a href="https://www.explorerswanted.fm" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">explorerswanted.fm</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": None, }, ], }, "media_attachments": [], "mentions": [ { "id": 108215876835523723, "username": "ewbot", "url": "https://botsin.space/@ewbot", "acct": "ewbot", } ], "tags": [], "emojis": [], "card": None, "poll": None, }, } ], ) m.post( "https://botsin.space/api/v1/statuses", status_code=200, json={ "id": 108216032166128570, "created_at": "2019-11-23T07:29:18.903Z", "in_reply_to_id": None, "in_reply_to_account_id": None, "sensitive": False, "spoiler_text": "", "visibility": "public", "language": "en", "uri": "https://wandering.shop/users/andrlik/statuses/108216031335496737", "url": "https://wandering.shop/@andrlik/108216031335496737", "replies_count": 0, "reblogs_count": 0, "favourites_count": 0, "favourited": False, "reblogged": False, "muted": False, "bookmarked": False, "content": '<p><span class="h-card"><a href="https://botsin.space/@ewbot" class="u-url mention" rel="nofollow noopener noreferrer" target="_blank">@<span>ewbot</span></a></span> Quote please</p>', # noqa: E501 "reblog": None, "account": { "id": 18639, "username": "andrlik", "acct": "andrlik@wandering.shop", "display_name": "Daniel Andrlik", "locked": True, "bot": False, "discoverable": True, "group": False, "created_at": "2019-11-23T07:29:18.903Z", "note": '<p>Product exec, SFF Writer, Producer and GM of the Explorers Wanted actual play podcast. </p><p><a href="https://wandering.shop/tags/ActuallyAutistic" class="mention hashtag" rel="nofollow noopener noreferrer" target="_blank">#<span>ActuallyAutistic</span></a>/ADHD, with a dash of GAD for spice.</p><p>He/him</p><p>Your mom loves me.</p><p>Location: secluded in a blanket fort</p>', # noqa: E501 "url": "https://wandering.shop/@andrlik", "avatar": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501 "avatar_static": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501 "header": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501 "header_static": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501 "followers_count": 81, "following_count": 148, "statuses_count": 869, "last_status_at": "2019-11-23T07:29:18.903Z", "emojis": [], "fields": [ { "name": "Website", "value": '<a href="https://www.andrlik.org" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">andrlik.org</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": "2022-04-29T14:58:32.014+00:00", }, { "name": "Twitter", "value": '<a href="https://twitter.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">twitter.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": None, }, { "name": "Github", "value": '<a href="https://github.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">github.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": None, }, { "name": "Podcast", "value": '<a href="https://www.explorerswanted.fm" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">explorerswanted.fm</span><span class="invisible"></span></a>', # noqa: E501 "verified_at": None, }, ], }, "media_attachments": [], "mentions": [ { "id": 108215876835523723, "username": "ewbot", "url": "https://botsin.space/@ewbot", "acct": "ewbot", } ], "tags": [], "emojis": [], "card": None, "poll": None, }, ) m.get( "https://quoteservice.andrlik.org/api/groups/ew/get_random_quote/", json={ "quote": "We always go right.", "quote_rendered": "<p>We always go right.</p>", "citation": "Episode 3", "citation_url": "https://www.explorerswanted.fm/3", "source": { "name": "Nix", "slug": "ew-nix", "description": "Glaive", "description_rendered": "<p>Glaive</p>", }, }, ) m.get( "https://quoteservice.andrlik.org/api/sources/", json=[{"name": "Nix", "slug": "ew-nix"}], ) m.get( "https://quoteservice.andrlik.org/api/sources/ew-nix/generate_sentence/", json={"sentence": "fear the snek"}, ) respond_to_toots("test_last_toot.txt") assert get_last_toot_id("test_last_toot.txt") == 4772149
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Nov 24 19:04:13 2018 @author: kyungdoehan """ import numpy as np #%% Making square arrays of x, y, z of the overall topography class XYZ_data: def __init__(self, a, x, y, z): self.X = np.zeros((a, a)) self.Y = np.zeros((a, a)) self.Z = np.zeros((a, a)) for i in range(a): for j in range(a): self.X[j, i] = x[i + j * a] self.Y[j, i] = y[i + j * a] self.Z[j, i] = z[i + j * a] def XYZ(grid, x, y, z): return XYZ_data(grid, x, y, z) #%% class delz_ratio: def __init__(self, i): self.dzratio = np.exp(np.arange(1, i + 1) / 10) self.dzratio = self.dzratio / np.sum(self.dzratio) def dzratio(i): return delz_ratio(i) #%% class bottom: def __init__(self, inz, ifixed, j, top, dat_var, dat_new, dzratio): self.tot_b = top - dat_var self.bot = np.zeros((inz, j, j)) for irow in range(j): for icol in range(j): self.bot[:, irow, icol] = top[irow, icol] - \ np.cumsum(self.tot_b[irow, icol] * dzratio) self.bot_fixed = np.zeros((ifixed, j, j)) self.bot_fixed[0, :, :] = self.bot[inz - 1, :, :] + dat_new / ifixed for i in range(ifixed - 1): self.bot_fixed[i+1, :, :] = self.bot_fixed[i, :, :]+dat_new/ifixed self.bot = np.vstack((self.bot, self.bot_fixed)) def bot(inz, ifixed, j, top, dat_var, dat_new, dzratio): return bottom(inz, ifixed, j, top, dat_var, dat_new, dzratio) #%% class delz: def __init__(self, top, bot, nz, ny, nx): self.dzs = np.zeros((nz, ny, nx), dtype=np.float32) self.dzs[0, :, :] = top - bot[0, :, :] for ilay in range(nz-1): self.dzs[ilay+1, :, :] = bot[ilay, :, :] - bot[ilay+1, :, :] def dzs(top, bot, nz, ny, nx): return delz(top, bot, nz, ny, nx) #%% class nodes: def __init__(self, bot, dzs, nz, ny, nx): self.node = np.zeros((nz, ny, nx), dtype=np.float32) for irow in range(ny): for icol in range(nx): self.node[:, irow, icol] = bot[:, irow, icol] + 0.5 * dzs[:, irow, icol] def node(bot, dzs, nz, ny, nx): return nodes(bot, dzs, nz, ny, nx)
nilq/baby-python
python
# -*- coding: utf-8 -*- import itertools import os import plistlib import unicodedata import sys from xml.etree.ElementTree import Element, SubElement, tostring """ You should run your script via /bin/bash with all escape options ticked. The command line should be python yourscript.py "{query}" arg2 arg3 ... """ UNESCAPE_CHARACTERS = u""" ;()""" _MAX_RESULTS_DEFAULT = 9 preferences = plistlib.readPlist('info.plist') bundleid = preferences['bundleid'] class Item(object): @classmethod def unicode(cls, value): try: items = value.iteritems() except AttributeError: return unicode(value) else: return dict(map(unicode, item) for item in items) def __init__(self, attributes, title, subtitle, icon=None): self.attributes = attributes self.title = title self.subtitle = subtitle self.icon = icon def __str__(self): return tostring(self.xml(), encoding='utf-8') def xml(self): item = Element(u'item', self.unicode(self.attributes)) for attribute in (u'title', u'subtitle', u'icon'): value = getattr(self, attribute) if value is None: continue try: (value, attributes) = value except: attributes = {} elem = SubElement(item, attribute, self.unicode(attributes)) elem.text = unicode(value) return item def args(characters=None): return tuple(unescape(decode(arg), characters) for arg in sys.argv[1:]) def config(): return _create('config') def decode(s): return unicodedata.normalize('NFC', s.decode('utf-8')) def get_uid(uid): return u'-'.join(map(unicode, (bundleid, uid))) def unescape(query, characters=None): if not characters: characters = UNESCAPE_CHARACTERS for character in characters: query = query.replace('\\%s' % character, character) return query def write(text): sys.stdout.write(text) def xml(items, maxresults=_MAX_RESULTS_DEFAULT): root = Element('items') for item in itertools.islice(items, maxresults): root.append(item.xml()) return tostring(root, encoding='utf-8') def _create(path): if not os.path.isdir(path): os.mkdir(path) if not os.access(path, os.W_OK): raise IOError('No write access: %s' % path) return path def work(volatile): path = { True: '~/Library/Caches/com.runningwithcrayons.Alfred-2/Workflow Data', False: '~/Library/Application Support/Alfred 2/Workflow Data' }[bool(volatile)] return _create(os.path.join(os.path.expanduser(path), bundleid)) def config_set(key, value, volatile=True): filepath = os.path.join(work(volatile), 'config.plist') try: conf = plistlib.readPlist(filepath) except IOError: conf = {} conf[key] = value plistlib.writePlist(conf, filepath) def config_get(key, default=None, volatile=True): filepath = os.path.join(work(volatile), 'config.plist') try: conf = plistlib.readPlist(filepath) except IOError: conf = {} if key in conf: return conf[key] return default class AlfredWorkflow(object): _reserved_words = [] def write_text(self, text): print(text) def write_item(self, item): return self.write_items([item]) def write_items(self, items): return write(xml(items, maxresults=self.max_results)) def message_item(self, title, message, icon=None, uid=0): return Item({u'uid': get_uid(uid), u'arg': '', u'ignore': 'yes'}, title, message, icon) def warning_item(self, title, message, uid=0): return self.message_item(title=title, message=message, uid=uid, icon='warning.png') def error_item(self, title, message, uid=0): return self.message_item(title=title, message=message, uid=uid, icon='error.png') def exception_item(self, title, exception, uid=0): message = str(exception).replace('\n', ' ') return self.error_item(title=title, message=message, uid=uid) def route_action(self, action, query=None): method_name = 'do_{}'.format(action) if not hasattr(self, method_name): raise RuntimeError('Unknown action {}'.format(action)) method = getattr(self, method_name) return method(query) def is_command(self, query): try: command, rest = query.split(' ', 1) except ValueError: command = query command = command.strip() return command in self._reserved_words or \ hasattr(self, 'do_{}'.format(command))
nilq/baby-python
python
"""This module serves as a container to hold the global :class:`~.ShowBase.ShowBase` instance, as an alternative to using the builtin scope. Note that you cannot directly import `base` from this module since ShowBase may not have been created yet; instead, ShowBase dynamically adds itself to this module's scope when instantiated.""" __all__ = [] from .ShowBase import ShowBase, WindowControls from direct.directnotify.DirectNotifyGlobal import directNotify, giveNotify from panda3d.core import VirtualFileSystem, Notify, ClockObject, PandaSystem from panda3d.core import ConfigPageManager, ConfigVariableManager from panda3d.core import NodePath, PGTop from . import DConfig as config __dev__ = config.GetBool('want-dev', __debug__) #: The global instance of the :class:`panda3d.core.VirtualFileSystem`. vfs = VirtualFileSystem.getGlobalPtr() ostream = Notify.out() globalClock = ClockObject.getGlobalClock() cpMgr = ConfigPageManager.getGlobalPtr() cvMgr = ConfigVariableManager.getGlobalPtr() pandaSystem = PandaSystem.getGlobalPtr() # This is defined here so GUI elements can be instantiated before ShowBase. render2d = NodePath("render2d") aspect2d = render2d.attachNewNode(PGTop("aspect2d")) hidden = NodePath("hidden") # Set direct notify categories now that we have config directNotify.setDconfigLevels() def run(): """Deprecated alias for :meth:`base.run() <.ShowBase.run>`.""" assert ShowBase.notify.warning("run() is deprecated, use base.run() instead") base.run() def inspect(anObject): """Opens up a :mod:`direct.tkpanels.Inspector` GUI panel for inspecting an object.""" # Don't use a regular import, to prevent ModuleFinder from picking # it up as a dependency when building a .p3d package. import importlib Inspector = importlib.import_module('direct.tkpanels.Inspector') return Inspector.inspect(anObject) import sys if sys.version_info >= (3, 0): import builtins else: import __builtin__ as builtins builtins.inspect = inspect del sys # this also appears in AIBaseGlobal if (not __debug__) and __dev__: ShowBase.notify.error("You must set 'want-dev' to false in non-debug mode.")
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jun 3 19:26:47 2019 @author: sercangul """ a, b = map(float, input().split()) x= float(input()) print(round(sum([(1 - (a / b))**(5 - x) * (a / b) for x in range(1, 6)]), 3))
nilq/baby-python
python
# -*- coding: utf-8 -*- from pygraph.fillpolygon_edge import fillPolygonEdge from pygraph.util import mkGraph, saveG, enLarge g = mkGraph((80, 60)) points = [ (10, 40), (20, 10), (30, 10), (40, 5), (60, 10), (75, 25), (30, 50) ] fillPolygonEdge(g, points) saveG("polygon_edge.png", g) saveG("polygon_edge_large.png", enLarge(g, 10))
nilq/baby-python
python
from .fasta import is_fasta from .fasta import read_fasta
nilq/baby-python
python
import streamlit as st st.title('Streamlit custom theme tutorial') st.subheader('Powered by @dataprojectswithMJ') st.multiselect('Choose your favourite coding language(s)', options=['Python','Java','Golang','C++']) st.radio('Choose your favourite operation system:', ['Windows','Linux','MacOS']) st.date_input('Enter your date of birth') st.text_area('About you:')
nilq/baby-python
python
import json import requests class Answer: def __init__(self, client, input): self.__client = client self.id = input['id'] self.answer = input['answer'] self.likes_count= input['likesCount'] self.created_at = input['createdAt'] self.tell = input['tell'] self.sender_status = input['senderStatus'] self.sender = input['sender'] self.recipient_id = input['userId'] self.is_current_user_tell_sender = input['isCurrentUserTellSender'] self.likes = input['likes'] # to-do: put this in a seperate class (?) def is_anonymous_tell(self): """ Checks wether or not the tell was received by an anonymous person 0: Anonymous 1: Unknown 2: Public Sender Returns: True: The tell was received by an anonymous person False: The tell was received by a public sender """ if self.sender_status == 0: return True return False def like(self): """ Likes the answer on the user's profile Returns: True (bool): Answers has been liked UnknownError (exception): UnknownError has occurred """ body = { "answerId": self.id, "userId": self.recipient_id, "limit": 13 } r = requests.post(self.__client.create_like_url, json=body, headers=self.__client.auth_header) if r.status_code == 200: return True raise UnknownError def delete(self): """ Deletes the answer on the user's profile """ body = { 'answerId': self.id, 'userId': self.recipient_id, 'limit': 13 } r = requests.post(self.__client.delete_answer_url, json=body, headers=self.__client.auth_header) if r.status_code == 200: return True raise UnknownError
nilq/baby-python
python
import mysql.connector from mysql.connector import errorcode try: con = mysql.connector.connect(user='niminimda', password='123456', host='127.0.01', database='test') except mysql.connector.Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: print("something is wrong with user or password") elif err.errno == errorcode.ER_BAD_DB_ERROR: print("db doesn't exists") else: print(err) else: query = "SELECT * FROM employee; " cursor = con.cursor() cursor.execute(query) myData = cursor.fetchall() myData.sort(key=lambda x: x[2]) for item in range(0, len(myData) - 1): if myData[item][2] == myData[item + 1][2]: if myData[item][1] < myData[item + 1][1]: x = myData[item] myData[item] = myData[item + 1] myData[item + 1] = x for y in range(len(myData) - 1, -1, -1): q = myData[y] print(q[0], q[1], q[2]) cursor.close() con.close()
nilq/baby-python
python
import os, sys import json, requests # TODO: NEED TO UPDATE TO HAVE FILES RIGHT OUT AS THE TEAM ID NUMBER # TODO: NOT THE TEAM NAME. TEAM_ID = { 'fuel' : 4523, 'fusion' : 4524, 'outlaws' : 4525, 'uprising' : 4402, 'excelsior' : 4403, 'shock' : 4404, 'valiant' : 4405, 'gladiators': 4406, 'mayhem' : 4407, 'dragons' : 4408, 'dynasty' : 4409, 'spitfire' : 4410 } OWLURL = 'https://api.overwatchleague.com' STANDINGS = '/standings' RANKING = '/ranking' SCHEDULE = '/schedule' save_path = './data/' standings_file = open(save_path+'standings.json', 'w+') ranking_file = open(save_path+'ranking.json', 'w+') schedule_file = open(save_path+'schedule.json', 'w+') standings_request = requests.get(OWLURL+STANDINGS) standings_json_data = standings_request.json() standings_data_str = json.dump(standings_json_data, standings_file) ranking_request = requests.get(OWLURL+RANKING) ranking_json_data = ranking_request.json() standings_data_str = json.dump(ranking_json_data, ranking_file) schedule_request = requests.get(OWLURL+SCHEDULE) schedule_json_data = schedule_request.json() schedule_data_str = json.dump(schedule_json_data, schedule_file) save_path = './data/teams/' for team, id in TEAM_ID.iteritems(): file = open(save_path+'{:s}'.format(team)+'.json', 'w+') request = requests.get(OWLURL+'/teams/{:d}'.format(id)) json_data = request.json() data_str = json.dump(json_data,file)
nilq/baby-python
python
from ._Session import Session from ._User import User from ._UserAffiliation import UserAffiliation from ._UserEntityPermission import UserEntityPermission from ._UserRoles import UserRoles
nilq/baby-python
python
""" Get reaction forces at the support nodes of a form diagram. """ from ghpythonlib.componentbase import executingcomponent as component import rhinoscriptsyntax as rs class SupportNodeResultsComponent(component): def RunScript(self, form, support_node_keys): if form: support_node_keys = support_node_keys or list(form.support_nodes()) reaction_forces = [rs.AddPoint(*form.reaction_force(nd)) for nd in support_node_keys] return reaction_forces
nilq/baby-python
python
from carts.models import Cart from django.http import HttpRequest from products.models import Product from products.api.serializers import ProductSerializer from rest_framework import serializers class CartSerializer(serializers.ModelSerializer): products = serializers.SerializerMethodField() class Meta: model = Cart fields = ( 'user', 'products', 'subtotal', 'total', 'updated', 'timestamp', ) def get_products(self, obj): l = [] user = self.context.get('request').user results = Cart.objects.filter(user=user) if results.exists(): for product in results: l.append(product.products.all()) product_list = l[0] products = product_list response = ProductSerializer(products, many=True).data else: response = [] return response
nilq/baby-python
python
import os import re import json from setuptools import setup with open('Setup.lock') as f: c = json.loads(f.read()) with open(os.path.join(c['name'], '__init__.py')) as f: version = re.findall("^__version__ = '(.*)'", f.read())[0] with open('Pipfile.lock') as f: p = json.loads(f.read()) def _install_requires(): for k, v in p['default'].items(): if isinstance(v, str): yield k + v else: yield k + v['version'] install_requires = list(_install_requires()) kwargs = { 'name': c['name'], 'version': version, 'description': c['description'], 'url': c['url'], 'author': c['author'], 'author_email': c['author_email'], 'license': c['license'], 'packages': c.get('packages', []), 'zip_safe': False, 'scripts': c.get('scripts',[]), 'package_data': c.get('package_data',{}), 'install_requires': install_requires, 'classifiers': c.get('classifiers', []) } setup(**kwargs)
nilq/baby-python
python
""" openconfig_local_routing This module describes configuration and operational state data for routes that are locally generated, i.e., not created by dynamic routing protocols. These include static routes, locally created aggregate routes for reducing the number of constituent routes that must be advertised, summary routes for IGPs, etc. This model expresses locally generated routes as generically as possible, avoiding configuration of protocol\-specific attributes at the time of route creation. This is primarily to avoid assumptions about how underlying router implementations handle route attributes in various routing table data structures they maintain. Hence, the definition of locally generated routes essentially creates 'bare' routes that do not have any protocol\- specific attributes. When protocol\-specific attributes must be attached to a route (e.g., communities on a locally defined route meant to be advertised via BGP), the attributes should be attached via a protocol\-specific policy after importing the route into the protocol for distribution (again via routing policy). """ import sys from collections import OrderedDict from ydk.types import Entity as _Entity_ from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class LOCALDEFINEDNEXTHOP(Identity): """ A base identity type of local defined next\-hops """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self, ns="http://openconfig.net/yang/local-routing", pref="openconfig-local-routing", tag="openconfig-local-routing:LOCAL_DEFINED_NEXT_HOP"): if sys.version_info > (3,): super().__init__(ns, pref, tag) else: super(LOCALDEFINEDNEXTHOP, self).__init__(ns, pref, tag) class LocalRoutes(_Entity_): """ Top\-level container for local routes .. attribute:: config Configuration data for locally defined routes **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.Config>` .. attribute:: state Operational state data for locally defined routes **type**\: :py:class:`State <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.State>` **config**\: False .. attribute:: static_routes Enclosing container for the list of static routes **type**\: :py:class:`StaticRoutes <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes>` .. attribute:: local_aggregates Enclosing container for locally\-defined aggregate routes **type**\: :py:class:`LocalAggregates <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.LocalAggregates>` """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes, self).__init__() self._top_entity = None self.yang_name = "local-routes" self.yang_parent_name = "openconfig-local-routing" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("config", ("config", LocalRoutes.Config)), ("state", ("state", LocalRoutes.State)), ("static-routes", ("static_routes", LocalRoutes.StaticRoutes)), ("local-aggregates", ("local_aggregates", LocalRoutes.LocalAggregates))]) self._leafs = OrderedDict() self.config = LocalRoutes.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = LocalRoutes.State() self.state.parent = self self._children_name_map["state"] = "state" self.static_routes = LocalRoutes.StaticRoutes() self.static_routes.parent = self self._children_name_map["static_routes"] = "static-routes" self.local_aggregates = LocalRoutes.LocalAggregates() self.local_aggregates.parent = self self._children_name_map["local_aggregates"] = "local-aggregates" self._segment_path = lambda: "openconfig-local-routing:local-routes" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes, [], name, value) class Config(_Entity_): """ Configuration data for locally defined routes """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "local-routes" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict() self._segment_path = lambda: "config" self._absolute_path = lambda: "openconfig-local-routing:local-routes/%s" % self._segment_path() self._is_frozen = True class State(_Entity_): """ Operational state data for locally defined routes """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "local-routes" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict() self._segment_path = lambda: "state" self._absolute_path = lambda: "openconfig-local-routing:local-routes/%s" % self._segment_path() self._is_frozen = True class StaticRoutes(_Entity_): """ Enclosing container for the list of static routes .. attribute:: static List of locally configured static routes **type**\: list of :py:class:`Static <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static>` """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes, self).__init__() self.yang_name = "static-routes" self.yang_parent_name = "local-routes" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("static", ("static", LocalRoutes.StaticRoutes.Static))]) self._leafs = OrderedDict() self.static = YList(self) self._segment_path = lambda: "static-routes" self._absolute_path = lambda: "openconfig-local-routing:local-routes/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes, [], name, value) class Static(_Entity_): """ List of locally configured static routes .. attribute:: prefix (key) Reference to the destination prefix list key **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$ **refers to**\: :py:class:`prefix <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.Config>` .. attribute:: config Configuration data for static routes **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.Config>` .. attribute:: state Operational state data for static routes **type**\: :py:class:`State <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.State>` **config**\: False .. attribute:: next_hops Configuration and state parameters relating to the next\-hops that are to be utilised for the static route being specified **type**\: :py:class:`NextHops <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops>` """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes.Static, self).__init__() self.yang_name = "static" self.yang_parent_name = "static-routes" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['prefix'] self._child_classes = OrderedDict([("config", ("config", LocalRoutes.StaticRoutes.Static.Config)), ("state", ("state", LocalRoutes.StaticRoutes.Static.State)), ("next-hops", ("next_hops", LocalRoutes.StaticRoutes.Static.NextHops))]) self._leafs = OrderedDict([ ('prefix', (YLeaf(YType.str, 'prefix'), ['str'])), ]) self.prefix = None self.config = LocalRoutes.StaticRoutes.Static.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = LocalRoutes.StaticRoutes.Static.State() self.state.parent = self self._children_name_map["state"] = "state" self.next_hops = LocalRoutes.StaticRoutes.Static.NextHops() self.next_hops.parent = self self._children_name_map["next_hops"] = "next-hops" self._segment_path = lambda: "static" + "[prefix='" + str(self.prefix) + "']" self._absolute_path = lambda: "openconfig-local-routing:local-routes/static-routes/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes.Static, ['prefix'], name, value) class Config(_Entity_): """ Configuration data for static routes .. attribute:: prefix Destination prefix for the static route, either IPv4 or IPv6 **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$ .. attribute:: set_tag Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols **type**\: union of the below types: **type**\: int **range:** 0..4294967295 **type**\: str **pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)? """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes.Static.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "static" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('prefix', (YLeaf(YType.str, 'prefix'), ['str','str'])), ('set_tag', (YLeaf(YType.str, 'set-tag'), ['int','str'])), ]) self.prefix = None self.set_tag = None self._segment_path = lambda: "config" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes.Static.Config, ['prefix', 'set_tag'], name, value) class State(_Entity_): """ Operational state data for static routes .. attribute:: prefix Destination prefix for the static route, either IPv4 or IPv6 **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$ **config**\: False .. attribute:: set_tag Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols **type**\: union of the below types: **type**\: int **range:** 0..4294967295 **type**\: str **pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)? **config**\: False """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes.Static.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "static" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('prefix', (YLeaf(YType.str, 'prefix'), ['str','str'])), ('set_tag', (YLeaf(YType.str, 'set-tag'), ['int','str'])), ]) self.prefix = None self.set_tag = None self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes.Static.State, ['prefix', 'set_tag'], name, value) class NextHops(_Entity_): """ Configuration and state parameters relating to the next\-hops that are to be utilised for the static route being specified .. attribute:: next_hop A list of next\-hops to be utilised for the static route being specified **type**\: list of :py:class:`NextHop <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop>` """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes.Static.NextHops, self).__init__() self.yang_name = "next-hops" self.yang_parent_name = "static" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("next-hop", ("next_hop", LocalRoutes.StaticRoutes.Static.NextHops.NextHop))]) self._leafs = OrderedDict() self.next_hop = YList(self) self._segment_path = lambda: "next-hops" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops, [], name, value) class NextHop(_Entity_): """ A list of next\-hops to be utilised for the static route being specified. .. attribute:: index (key) A reference to the index of the current next\-hop. The index is intended to be a user\-specified value which can be used to reference the next\-hop in question, without any other semantics being assigned to it **type**\: str **refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config>` .. attribute:: config Configuration parameters relating to the next\-hop entry **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config>` .. attribute:: state Operational state parameters relating to the next\-hop entry **type**\: :py:class:`State <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.State>` **config**\: False .. attribute:: interface_ref Reference to an interface or subinterface **type**\: :py:class:`InterfaceRef <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef>` """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop, self).__init__() self.yang_name = "next-hop" self.yang_parent_name = "next-hops" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = ['index'] self._child_classes = OrderedDict([("config", ("config", LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config)), ("state", ("state", LocalRoutes.StaticRoutes.Static.NextHops.NextHop.State)), ("interface-ref", ("interface_ref", LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef))]) self._leafs = OrderedDict([ ('index', (YLeaf(YType.str, 'index'), ['str'])), ]) self.index = None self.config = LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = LocalRoutes.StaticRoutes.Static.NextHops.NextHop.State() self.state.parent = self self._children_name_map["state"] = "state" self.interface_ref = LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef() self.interface_ref.parent = self self._children_name_map["interface_ref"] = "interface-ref" self._segment_path = lambda: "next-hop" + "[index='" + str(self.index) + "']" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop, ['index'], name, value) class Config(_Entity_): """ Configuration parameters relating to the next\-hop entry .. attribute:: index An user\-specified identifier utilised to uniquely reference the next\-hop entry in the next\-hop list. The value of this index has no semantic meaning other than for referencing the entry **type**\: str .. attribute:: next_hop The next\-hop that is to be used for the static route \- this may be specified as an IP address, an interface or a pre\-defined next\-hop type \- for instance, DROP or LOCAL\_LINK. When this leaf is not set, and the interface\-ref value is specified for the next\-hop, then the system should treat the prefix as though it is directly connected to the interface **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **type**\: :py:class:`LOCALDEFINEDNEXTHOP <ydk.models.openconfig.openconfig_local_routing.LOCALDEFINEDNEXTHOP>` .. attribute:: metric A metric which is utilised to specify the preference of the next\-hop entry when it is injected into the RIB. The lower the metric, the more preferable the prefix is. When this value is not specified the metric is inherited from the default metric utilised for static routes within the network instance that the static routes are being instantiated. When multiple next\-hops are specified for a static route, the metric is utilised to determine which of the next\-hops is to be installed in the RIB. When multiple next\-hops have the same metric (be it specified, or simply the default) then these next\-hops should all be installed in the RIB **type**\: int **range:** 0..4294967295 .. attribute:: recurse Determines whether the next\-hop should be allowed to be looked up recursively \- i.e., via a RIB entry which has been installed by a routing protocol, or another static route \- rather than needing to be connected directly to an interface of the local system within the current network instance. When the interface reference specified within the next\-hop entry is set (i.e., is not null) then forwarding is restricted to being via the interface specified \- and recursion is hence disabled **type**\: bool **default value**\: false """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "next-hop" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('index', (YLeaf(YType.str, 'index'), ['str'])), ('next_hop', (YLeaf(YType.str, 'next-hop'), ['str','str',('ydk.models.openconfig.openconfig_local_routing', 'LOCALDEFINEDNEXTHOP')])), ('metric', (YLeaf(YType.uint32, 'metric'), ['int'])), ('recurse', (YLeaf(YType.boolean, 'recurse'), ['bool'])), ]) self.index = None self.next_hop = None self.metric = None self.recurse = None self._segment_path = lambda: "config" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config, ['index', 'next_hop', 'metric', 'recurse'], name, value) class State(_Entity_): """ Operational state parameters relating to the next\-hop entry .. attribute:: index An user\-specified identifier utilised to uniquely reference the next\-hop entry in the next\-hop list. The value of this index has no semantic meaning other than for referencing the entry **type**\: str **config**\: False .. attribute:: next_hop The next\-hop that is to be used for the static route \- this may be specified as an IP address, an interface or a pre\-defined next\-hop type \- for instance, DROP or LOCAL\_LINK. When this leaf is not set, and the interface\-ref value is specified for the next\-hop, then the system should treat the prefix as though it is directly connected to the interface **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$ **type**\: :py:class:`LOCALDEFINEDNEXTHOP <ydk.models.openconfig.openconfig_local_routing.LOCALDEFINEDNEXTHOP>` **config**\: False .. attribute:: metric A metric which is utilised to specify the preference of the next\-hop entry when it is injected into the RIB. The lower the metric, the more preferable the prefix is. When this value is not specified the metric is inherited from the default metric utilised for static routes within the network instance that the static routes are being instantiated. When multiple next\-hops are specified for a static route, the metric is utilised to determine which of the next\-hops is to be installed in the RIB. When multiple next\-hops have the same metric (be it specified, or simply the default) then these next\-hops should all be installed in the RIB **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: recurse Determines whether the next\-hop should be allowed to be looked up recursively \- i.e., via a RIB entry which has been installed by a routing protocol, or another static route \- rather than needing to be connected directly to an interface of the local system within the current network instance. When the interface reference specified within the next\-hop entry is set (i.e., is not null) then forwarding is restricted to being via the interface specified \- and recursion is hence disabled **type**\: bool **config**\: False **default value**\: false """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "next-hop" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('index', (YLeaf(YType.str, 'index'), ['str'])), ('next_hop', (YLeaf(YType.str, 'next-hop'), ['str','str',('ydk.models.openconfig.openconfig_local_routing', 'LOCALDEFINEDNEXTHOP')])), ('metric', (YLeaf(YType.uint32, 'metric'), ['int'])), ('recurse', (YLeaf(YType.boolean, 'recurse'), ['bool'])), ]) self.index = None self.next_hop = None self.metric = None self.recurse = None self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.State, ['index', 'next_hop', 'metric', 'recurse'], name, value) class InterfaceRef(_Entity_): """ Reference to an interface or subinterface .. attribute:: config Configured reference to interface / subinterface **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.Config>` .. attribute:: state Operational state for interface\-ref **type**\: :py:class:`State <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.State>` **config**\: False """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef, self).__init__() self.yang_name = "interface-ref" self.yang_parent_name = "next-hop" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([("config", ("config", LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.Config)), ("state", ("state", LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.State))]) self._leafs = OrderedDict() self.config = LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.State() self.state.parent = self self._children_name_map["state"] = "state" self._segment_path = lambda: "interface-ref" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef, [], name, value) class Config(_Entity_): """ Configured reference to interface / subinterface .. attribute:: interface Reference to a base interface. If a reference to a subinterface is required, this leaf must be specified to indicate the base interface **type**\: str **refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>` .. attribute:: subinterface Reference to a subinterface \-\- this requires the base interface to be specified using the interface leaf in this container. If only a reference to a base interface is requuired, this leaf should not be set **type**\: int **range:** 0..4294967295 **refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Subinterfaces.Subinterface>` """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "interface-ref" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('interface', (YLeaf(YType.str, 'interface'), ['str'])), ('subinterface', (YLeaf(YType.str, 'subinterface'), ['int'])), ]) self.interface = None self.subinterface = None self._segment_path = lambda: "config" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.Config, ['interface', 'subinterface'], name, value) class State(_Entity_): """ Operational state for interface\-ref .. attribute:: interface Reference to a base interface. If a reference to a subinterface is required, this leaf must be specified to indicate the base interface **type**\: str **refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>` **config**\: False .. attribute:: subinterface Reference to a subinterface \-\- this requires the base interface to be specified using the interface leaf in this container. If only a reference to a base interface is requuired, this leaf should not be set **type**\: int **range:** 0..4294967295 **refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Subinterfaces.Subinterface>` **config**\: False """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "interface-ref" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('interface', (YLeaf(YType.str, 'interface'), ['str'])), ('subinterface', (YLeaf(YType.str, 'subinterface'), ['int'])), ]) self.interface = None self.subinterface = None self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.State, ['interface', 'subinterface'], name, value) class LocalAggregates(_Entity_): """ Enclosing container for locally\-defined aggregate routes .. attribute:: aggregate List of aggregates **type**\: list of :py:class:`Aggregate <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.LocalAggregates.Aggregate>` """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.LocalAggregates, self).__init__() self.yang_name = "local-aggregates" self.yang_parent_name = "local-routes" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("aggregate", ("aggregate", LocalRoutes.LocalAggregates.Aggregate))]) self._leafs = OrderedDict() self.aggregate = YList(self) self._segment_path = lambda: "local-aggregates" self._absolute_path = lambda: "openconfig-local-routing:local-routes/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.LocalAggregates, [], name, value) class Aggregate(_Entity_): """ List of aggregates .. attribute:: prefix (key) Reference to the configured prefix for this aggregate **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$ **refers to**\: :py:class:`prefix <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.LocalAggregates.Aggregate.Config>` .. attribute:: config Configuration data for aggregate advertisements **type**\: :py:class:`Config <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.LocalAggregates.Aggregate.Config>` .. attribute:: state Operational state data for aggregate advertisements **type**\: :py:class:`State <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.LocalAggregates.Aggregate.State>` **config**\: False """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.LocalAggregates.Aggregate, self).__init__() self.yang_name = "aggregate" self.yang_parent_name = "local-aggregates" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['prefix'] self._child_classes = OrderedDict([("config", ("config", LocalRoutes.LocalAggregates.Aggregate.Config)), ("state", ("state", LocalRoutes.LocalAggregates.Aggregate.State))]) self._leafs = OrderedDict([ ('prefix', (YLeaf(YType.str, 'prefix'), ['str'])), ]) self.prefix = None self.config = LocalRoutes.LocalAggregates.Aggregate.Config() self.config.parent = self self._children_name_map["config"] = "config" self.state = LocalRoutes.LocalAggregates.Aggregate.State() self.state.parent = self self._children_name_map["state"] = "state" self._segment_path = lambda: "aggregate" + "[prefix='" + str(self.prefix) + "']" self._absolute_path = lambda: "openconfig-local-routing:local-routes/local-aggregates/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.LocalAggregates.Aggregate, ['prefix'], name, value) class Config(_Entity_): """ Configuration data for aggregate advertisements .. attribute:: prefix Aggregate prefix to be advertised **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$ .. attribute:: discard When true, install the aggregate route with a discard next\-hop \-\- traffic destined to the aggregate will be discarded with no ICMP message generated. When false, traffic destined to an aggregate address when no constituent routes are present will generate an ICMP unreachable message **type**\: bool **default value**\: false .. attribute:: set_tag Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols **type**\: union of the below types: **type**\: int **range:** 0..4294967295 **type**\: str **pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)? """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.LocalAggregates.Aggregate.Config, self).__init__() self.yang_name = "config" self.yang_parent_name = "aggregate" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('prefix', (YLeaf(YType.str, 'prefix'), ['str','str'])), ('discard', (YLeaf(YType.boolean, 'discard'), ['bool'])), ('set_tag', (YLeaf(YType.str, 'set-tag'), ['int','str'])), ]) self.prefix = None self.discard = None self.set_tag = None self._segment_path = lambda: "config" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.LocalAggregates.Aggregate.Config, ['prefix', 'discard', 'set_tag'], name, value) class State(_Entity_): """ Operational state data for aggregate advertisements .. attribute:: prefix Aggregate prefix to be advertised **type**\: union of the below types: **type**\: str **pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$ **type**\: str **pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$ **config**\: False .. attribute:: discard When true, install the aggregate route with a discard next\-hop \-\- traffic destined to the aggregate will be discarded with no ICMP message generated. When false, traffic destined to an aggregate address when no constituent routes are present will generate an ICMP unreachable message **type**\: bool **config**\: False **default value**\: false .. attribute:: set_tag Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols **type**\: union of the below types: **type**\: int **range:** 0..4294967295 **type**\: str **pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)? **config**\: False """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(LocalRoutes.LocalAggregates.Aggregate.State, self).__init__() self.yang_name = "state" self.yang_parent_name = "aggregate" self.is_top_level_class = False self.has_list_ancestor = True self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('prefix', (YLeaf(YType.str, 'prefix'), ['str','str'])), ('discard', (YLeaf(YType.boolean, 'discard'), ['bool'])), ('set_tag', (YLeaf(YType.str, 'set-tag'), ['int','str'])), ]) self.prefix = None self.discard = None self.set_tag = None self._segment_path = lambda: "state" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(LocalRoutes.LocalAggregates.Aggregate.State, ['prefix', 'discard', 'set_tag'], name, value) def clone_ptr(self): self._top_entity = LocalRoutes() return self._top_entity class DROP(LOCALDEFINEDNEXTHOP): """ Discard traffic for the corresponding destination """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self, ns="http://openconfig.net/yang/local-routing", pref="openconfig-local-routing", tag="openconfig-local-routing:DROP"): if sys.version_info > (3,): super().__init__(ns, pref, tag) else: super(DROP, self).__init__(ns, pref, tag) class LOCALLINK(LOCALDEFINEDNEXTHOP): """ Treat traffic towards addresses within the specified next\-hop prefix as though they are connected to a local link. When the LOCAL\_LINK next\-hop type is specified, an interface must also be specified such that the local system can determine which link to trigger link\-layer address discovery against """ _prefix = 'oc-loc-rt' _revision = '2017-05-15' def __init__(self, ns="http://openconfig.net/yang/local-routing", pref="openconfig-local-routing", tag="openconfig-local-routing:LOCAL_LINK"): if sys.version_info > (3,): super().__init__(ns, pref, tag) else: super(LOCALLINK, self).__init__(ns, pref, tag)
nilq/baby-python
python
import time import json from pathlib import Path import torch import torch.nn as nn from torch.nn.parallel import DistributedDataParallel from torch.utils.tensorboard import SummaryWriter from radam import RAdam from model import GPT, GPTLMHead, GPTClsHead def timeit(method): def timed(*args, **kw): _args = args[0].args ts = time.time() result = method(*args, **kw) te = time.time() if _args.distributed: if _args.local_rank == 0: print('Function Time: {}\t>\t{:.0f} min {:.0f} sec'.format(method.__name__, (te-ts)//60, (te-ts)%60)) else: print('Function Time: {}\t>\t{:.0f} min {:.0f} sec'.format(method.__name__, (te-ts)//60, (te-ts)%60)) return result return timed class Trainer: def __init__(self, args, train_loader, test_loader, tokenizer): self.args = args self.train_loader = train_loader self.test_loader = test_loader self.tokenizer = tokenizer self.vocab_size = tokenizer.vocab_size self.pad_id = tokenizer.pad_token_id self.eos_id = tokenizer.eos_token_id self.device = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu', args.local_rank) self.writer = SummaryWriter() if args.local_rank in [-1, 0] else None self.n_gpus = torch.distributed.get_world_size() if args.distributed else torch.cuda.device_count() assert args.pretrain != args.finetune # Do not set both finetune and pretrain arguments to the same (True, False) if args.pretrained_model: self.gpt = torch.load(args.pretrained_model) else: self.gpt = GPT(vocab_size=self.vocab_size, seq_len=args.max_seq_len, d_model=args.hidden, n_layers=args.n_layers, n_heads=args.n_attn_heads, d_ff=args.ffn_hidden, embd_pdrop=args.embd_dropout, attn_pdrop=args.attn_dropout, resid_pdrop=args.resid_dropout, pad_id=self.pad_id) if args.pretrain: self.model = GPTLMHead(self.gpt) self.model.to(self.device) if args.finetune: with open(args.cached_label_dict, 'r') as file: label_dict = json.load(file) self.model = GPTClsHead(self.gpt, n_class=len(label_dict), cls_token_id=self.eos_id) self.model.to(self.device) if args.distributed: self.model = DistributedDataParallel(self.model, device_ids=[args.local_rank], output_device=args.local_rank) self.optimizer = RAdam(self.model.parameters(), args.lr) self.criterion = nn.CrossEntropyLoss(ignore_index = self.pad_id).to(self.device) self.cls_criterion = nn.CrossEntropyLoss().to(self.device) @timeit def train(self, epoch): if self.args.pretrain: self.pretrain(epoch) if self.args.finetune: self.finetune(epoch) def pretrain(self, epoch): losses = 0 n_batches, n_samples = len(self.train_loader), len(self.train_loader.dataset) self.model.train() for i, batch in enumerate(self.train_loader): inputs = batch[0].to(self.device) targets = inputs[:, 1:].contiguous() # |inputs| : (batch_size, seq_len), |targets| : (batch_size, seq_len-1) lm_logits = self.model(inputs) lm_logits = lm_logits[:, :-1].contiguous() # |lm_logits| : (batch_size, seq_len-1, vocab_size) loss = self.criterion(lm_logits.view(-1, self.vocab_size), targets.view(-1)) losses += loss.item() self.optimizer.zero_grad() loss.backward() self.optimizer.step() if self.args.local_rank in [-1, 0]: self.writer.add_scalar('Loss/pre-train', loss.item(), ((epoch-1)*n_batches)+i) if i % (n_batches//5) == 0 and i != 0: print('Iteration {} ({}/{})\tLoss: {:.4f}'.format(i, i, n_batches, losses/i)) print('Train Epoch {} [rank: {}]\t>\tLoss: {:.4f}'.format(epoch, self.args.local_rank, losses/n_batches)) def finetune(self, epoch): losses, accs = 0, 0 n_batches, n_samples = len(self.train_loader), len(self.train_loader.dataset) # n_batches = batch size per GPU self.model.train() for i, batch in enumerate(self.train_loader): inputs, labels = map(lambda x: x.to(self.device), batch) # |inputs| : (batch_size, seq_len), |labels| : (batch_size) lm_logits, cls_logits = self.model(inputs) lm_logits = lm_logits[:, :-1].contiguous() # |lm_logits| : (batch_size, seq_len-1, vocab_size), |cls_logits| : (batch_size, n_class) lm_loss = self.criterion(lm_logits.view(-1, self.vocab_size), inputs[:, 1:].contiguous().view(-1)) cls_loss = self.cls_criterion(cls_logits, labels) loss = cls_loss + (self.args.auxiliary_ratio * lm_loss) losses += loss.item() acc = (cls_logits.argmax(dim=-1) == labels).to(dtype=cls_logits.dtype).mean() accs += acc self.optimizer.zero_grad() loss.backward() self.optimizer.step() if self.args.local_rank in [-1, 0]: self.writer.add_scalar('Loss/fine-tune', loss.item(), ((epoch-1)*n_batches)+i) self.writer.add_scalar('Accuracy/fine-tune', acc, ((epoch-1)*n_batches)+i) if i % (n_batches//5) == 0 and i != 0: print('Iteration {} ({}/{})\tLoss: {:.4f} Acc: {:.1f}%'.format(i, i, n_batches, losses/i, accs/i*100.)) print('Train Epoch {} [rank: {}]\t>\tLoss: {:.4f} / Acc: {:.1f}%'.format(epoch, self.args.local_rank, losses/n_batches, accs/n_batches*100.)) def evaluate(self, epoch): losses, accs = 0, 0 n_batches, n_samples = len(self.test_loader), len(self.test_loader.dataset) self.model.eval() with torch.no_grad(): for i, batch in enumerate(self.test_loader): if self.args.pretrain: inputs = batch.to(self.device) targets = inputs[:, 1:].contiguous() lm_logits = self.model(inputs) lm_logits = lm_logits[:, :-1].contiguous() loss = self.criterion(lm_logits.view(-1, self.vocab_size), targets.view(-1)) losses += loss.item() if self.args.local_rank in [-1, 0]: self.writer.add_scalar('Loss/pre-train(eval)', loss.item(), ((epoch-1)*n_batches)+i) elif self.args.finetune: inputs, labels = map(lambda x: x.to(self.device), batch) lm_logits, cls_logits = self.model(inputs) lm_logits = lm_logits[:, :-1].contiguous() lm_loss = self.criterion(lm_logits.view(-1, self.vocab_size), inputs[:, 1:].contiguous().view(-1)) cls_loss = self.cls_criterion(cls_logits, labels) loss = cls_loss + (self.args.auxiliary_ratio * lm_loss) losses += loss.item() acc = (cls_logits.argmax(dim=-1) == labels).to(dtype=cls_logits.dtype).mean() accs += acc if self.args.local_rank in [-1, 0]: self.writer.add_scalar('Loss/fine-tune(eval)', loss.item(), ((epoch-1)*n_batches)+i) self.writer.add_scalar('Accuracy/fine-tune(eval)', acc, ((epoch-1)*n_batches)+i) print('Eval Epoch {} [rank: {}]\t>\tLoss: {:.4f} / Acc: {:.1f}%'.format(epoch, self.args.local_rank, losses/n_batches, accs/n_batches*100.)) def save(self, epoch, model_prefix='model', root='.model'): path = Path(root) / (model_prefix + '.ep%d' % epoch) if not path.parent.exists(): path.parent.mkdir() if self.args.distributed: if self.args.local_rank == 0: torch.save(self.gpt, path) else: torch.save(self.gpt, path)
nilq/baby-python
python
import os import subprocess files = [ "001", "001a", "001b", "002", "002a", "002b", "003", "003a", "003b", "004", "004a", "004b", "005", "005a", "005b", "006", "006a", "006b", "007", "007a", "007b", "008", "008a", "008b", "009", "009a", "009b", "010", "010a", "010b", "011", "011a", "011b", "012", "012a", "012b", "013", "013a", "013b", "014", "014a", "014b", "015", "015a", "015b", "016", "016a", "016b", "017", "017a", "017b", "017c", "018", "018a", "018b", "018c", "018d", "018e", "018f", "018g", "019", "019a", "019b", "019c", "019d", "019e", "020", "021", "021a", "021b", "021c", "021d", "021e", "022", "022a", "022b", "022c", "022d", "022e", "023", "023a", "023b", "023c", "023d", "023e", "024", "024a", "024b", "024c", "024d", "024e", "025", "025a", "025b", "025c", "025d", "025e", "026", "026a", "026b", "026c", "026d", "026e", "027", "027a", "027b", "027c", "027d", "027e", "028", "028a", "028b", "028c", "028d", "028e", "029", "029a", "029b", "029c", "029d", "029e", "030", "030a", "030b", "030c", "030d", "030e", ] for n in files: in_path = os.path.join("public", "img", "map", "pipo-charachip" + n + ".png") for i, direction in enumerate(["down", "left", "right", "up"]): out_path = os.path.join("public", "img", "avatar", n + "-" + direction + ".png") offset = i * 32 subprocess.call( ["magick", "convert", in_path, "-crop", "32x32+0+" + str(offset), out_path] )
nilq/baby-python
python
#! /usr/bin/env python import io import os from setuptools import setup mydir = os.path.dirname(__file__) def read_project_version(): # Version-trick to have version-info in a single place. # http://stackoverflow.com/questions/2058802/how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package fglobals = {} with io.open(os.path.join(mydir, '_version.py')) as fd: exec(fd.read(), fglobals) # To read __version__ return fglobals['__version__'] setup(name='doit-graphx', description="doit command plugin to generate task dependency-graphs using networkx", version=read_project_version(), license='MIT', author='Kostis Anagnostopoulos', author_email='ankostis@gmail.com', url='https://github.com/pydoit/doit-graphx', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Operating System :: POSIX', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'Intended Audience :: Science/Research', 'Intended Audience :: System Administrators', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Testing', 'Topic :: Software Development :: Quality Assurance', 'Topic :: Scientific/Engineering', ], py_modules=['cmd_graphx', '_version'], # TODO: Fatcor-out matplotlib in an extra-requires. install_requires=['networkx', 'matplotlib'], # doit>=0.28.0] # doit 0.28 unreleased long_description="", )
nilq/baby-python
python
import os from unittest.mock import patch from util.job import get_job_id def test_create_job_id(): assert get_job_id() == os.getenv('JOB_ID'), 'job id is created' @patch.dict('os.environ', {'JOB_ID': 'job_123'}) def test_retrieve_job_id(): assert get_job_id() == 'job_123', 'job id is retrieved'
nilq/baby-python
python
from kelvin.tests.test_cc_utils import * from kelvin.tests.test_ccsd import * from kelvin.tests.test_ft_cc_2rdm import * from kelvin.tests.test_ft_cc_ampl import * from kelvin.tests.test_ft_cc_relden import * from kelvin.tests.test_ft_ccsd import * from kelvin.tests.test_ft_ccsd_rdm import * from kelvin.tests.test_ft_deriv import * from kelvin.tests.test_ft_lambda import * from kelvin.tests.test_ft_lambda_equations import * from kelvin.tests.test_ft_mp2 import * from kelvin.tests.test_hubbard import * from kelvin.tests.test_hubbard_field import * from kelvin.tests.test_kel_ccsd import * from kelvin.tests.test_lambda import * from kelvin.tests.test_mp2 import * from kelvin.tests.test_neq_ccsd import * from kelvin.tests.test_neq_density import * from kelvin.tests.test_neq_lambda import * from kelvin.tests.test_neq_lambda_equation import * from kelvin.tests.test_neq_prop import * from kelvin.tests.test_quadrature import * from kelvin.tests.test_td_ccsd import * from kelvin.tests.test_td_ccsd_ESN import * from kelvin.tests.test_td_ccsd_lambda import * from kelvin.tests.test_td_ccsd_1rdm import * from kelvin.tests.test_td_ccsd_2rdm import * from kelvin.tests.test_td_ccsd_relden import * from kelvin.tests.test_scf import * from kelvin.tests.test_test import * from kelvin.tests.test_ueg import * from kelvin.tests.test_ueg_utils import *
nilq/baby-python
python
from person import Person from bounding_box import BoundingBox from typing import List from video_frame import VideoFrame from sort import Sort import numpy as np class Tracker: """ Trackes detected person and groups people with close trajectories. Attributes ---------- minDist: float People are considered to be in the same group if they are less the minDist meters from each other for enough video frames """ def __init__(self, analyzer, minDist = 100) -> None: self._sort=Sort(max_age=10) self._analyzer=analyzer self._minDist = minDist pass def addBoundingBoxForPerson(self, person:Person, box:BoundingBox): """ Append the boundig box to the bounding boxes of the person. It also calculates and append the coordinate of the person to his/her coordinates. Parameters ---------- person : Person The owner of the bounding box box : BoundingBox The bounding box to append """ person.bounding_boxes.append(box) if box==None: person.coordinates.append(None) else: x, y = self._analyzer.transformation.transformPoint(box.left+box.width/2, box.top+box.height) person.addCoordinates(x, y) def updateTrajectories(self,current:VideoFrame,bounding_boxes:List[BoundingBox],scores:List[float])->None: """ Identifies new people on the videoFrame, tracks already identified people. Deletes people, if they are missing for at least 10 video frames. Parameters ---------- current : VideoFrame New video frame last : VideoFrame, optional The video frame before, by default None bounding_boxes: BoundingBox[] Detected boundingboxes on current frame scores: float[] Certanity score of boundingboxes """ lenBB=len(bounding_boxes); if (lenBB != 0): npbb=np.array([[bb.left, bb.top, bb.left+bb.width,bb.top+bb.height] for bb in bounding_boxes]) npscores=np.array(scores) npscores=np.resize(npscores,(lenBB,1)) bbs=np.hstack((npbb,npscores)) objs=self._sort.update(bbs) activePeople:List[Person]=self._analyzer.activePeople to_delete=[] for person in activePeople: found=False for obj in objs: if obj[4]==person.id: self.addBoundingBoxForPerson(person, BoundingBox(int(obj[0]),int(obj[1]),int(obj[2]-obj[0]),int(obj[3]-obj[1]))) found=True obj[4]=-1 break if not found: countNone=0 for bbid in range(1,min(len(person.bounding_boxes),6)): if person.bounding_boxes[-bbid] is None: countNone+=1 if(countNone==5): to_delete.append(person) self.addBoundingBoxForPerson(person,None) for obj in objs: if obj[4]!=-1: newPerson=Person() newPerson.id=obj[4] self.addBoundingBoxForPerson(newPerson, BoundingBox(int(obj[0]),int(obj[1]),int(obj[2]-obj[0]),int(obj[3]-obj[1]))) self._analyzer.activePeople.append(newPerson) for d in to_delete: self._analyzer.activePeople.remove(d) def groupTrajectories(self, dt = 100)->None:#4 * 30)->None: """ Considers two individuals as being in the same group if they are less then d meters apart for at least dt seconds. Parameters ---------- dt : int minimum seconds (sec * fps) """ for i, p1 in enumerate(self._analyzer.activePeople): for j, p2 in enumerate(self._analyzer.activePeople): if (i > j) and (p1 not in p2.inGroupWith): if ((len(p1.coordinates) >= dt) and (len(p2.coordinates) >= dt)): in_group = True for k in range(dt): if ((p1.coordinates[-k] != None) and (p2.coordinates[-k] != None) and (p1.coordinates[-k].DistanceFrom(p2.coordinates[-k]) > self._minDist)): in_group = False if in_group: p1.inGroupWith.append(p2) p2.inGroupWith.append(p1)
nilq/baby-python
python
__version__ = "0.2.8" from . import utils from . import common from . import manager from .common import Module from .common import Sequential from .common import Linear from .common import Identity from .common import ModuleList from .common import MultiModule from .common import Parameter from .manager import register_packages from .manager import get_module_dict from .manager import get_module_classes from .manager import get_module_names def create_model_cls(package=None, model_path=None, name=None, modargs=None): """ Create a model-initializing function that accepts positional arguments. :param package: the package to search for the model. If none given, all known packages will be searched. :param model_path: yaml file path that contains keyword-only arguments. :param name: model name to search for. If no model path is specified, this option will be used. :param modargs: keyword-only module arguments to initialize the function. :return: function """ if model_path is None: if name is None: classes = manager.get_module_classes(package) assert len(classes) > 0, \ f"no modules found in package " \ f"'{package if package is not None else 'all'}" name = classes[0].name modargs = get_optarg_template(classes[0]) if modargs is None: modargs = dict() else: opts = utils.load_yaml(model_path) name, modargs = opts.get("type"), opts.get("vargs") namemap = manager.get_module_dict(package) assert name in namemap, \ f"module name '{name}' does not exist. available names: " \ f"{list(namemap.keys())}" model_cls = namemap[name] caster = common.get_caster(model_cls) return caster({ "type": model_cls.name, "vargs": modargs }) def get_optarg_template(cls: common.Module): def get_value_template(optarg: common.OptionalArgument): if optarg.islist: sample = optarg.default[0] else: sample = optarg.default if common.is_module_cls(sample): pkg = sample.get_package() classes = manager.get_module_classes(pkg) assert classes, \ f"no available modules found for package '{pkg}'" cls = classes[0] val = {"type": cls.name} args = get_optarg_template(cls) if args: val["vargs"] = args else: val = sample if optarg.islist: val = [val] return val return { name: get_value_template(optarg) for name, optarg in cls.get_optargs().items() }
nilq/baby-python
python
from src.abstract_load_balancer import AbstractLoadBalancer, LoadBalancerQueue class UtilisationAwareLoadBalancer(AbstractLoadBalancer): def __init__(self, APISERVER, DEPLOYMENT): self.apiServer = APISERVER self.deployment = DEPLOYMENT self.internalQueue = [] def UpdatePodList(self): self.internalQueue.clear() endPoints = self.apiServer.GetEndPointsByLabel(self.deployment.deploymentLabel) for endPoint in endPoints: if endPoint.pod and endPoint.pod.isRunning(): queueItem = LoadBalancerQueue(endPoint.pod, len(endPoint.pod.requests)) self.internalQueue.append(queueItem) def FindPriorityQueueItem(self): priorityQueueItem = self.internalQueue[0] for queueItem in self.internalQueue: if queueItem.priority < priorityQueueItem.priority: priorityQueueItem = queueItem return priorityQueueItem def FindPod(self): self.UpdatePodList() if len(self.internalQueue) > 0: queueItem = self.FindPriorityQueueItem() if queueItem is not None: return queueItem.pod return None
nilq/baby-python
python
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Metric that tests models against snow variations.""" import numpy as np from tqdm import tqdm from collections import Iterable from .base import Metric from .base import call_decorator from PIL import Image import warnings from perceptron.benchmarks.motion_blur import MotionBlurMetric import pdb class SnowMetric(Metric): """Metric that tests models against snow variations.""" @call_decorator def __call__(self, adv, angle=45, annotation=None, unpack=True, abort_early=True, verify=False, epsilons=1000): """Change the snow of the image until it is misclassified. Parameters ---------- adv : `numpy.ndarray` The original, unperturbed input as a `numpy.ndarray`. angle : float Angle of snowfall. annotation : int The reference label of the original input. Must be passed if `a` is a `numpy.ndarray`. unpack : bool If true, returns the adversarial input, otherwise returns the Adversarial object. abort_early : bool If true, returns when got first adversarial, otherwise returns when all the iterations are finished. verify : bool If True, return verifiable bound. epsilons : int or Iterable[float] Either Iterable of contrast levels or number of brightness factors between 1 and 0 that should be tried. Epsilons are one minus the brightness factor. Epsilons are not used if verify = True. """ import cv2 if verify is True: warnings.warn('epsilon is not used in verification mode ' 'and abort_early is set to True.') a = adv del adv del annotation del unpack image = a.original_image min_, max_ = a.bounds() axis = a.channel_axis(batch=False) hw = [image.shape[i] for i in range(image.ndim) if i != axis] img_height, img_width = hw if not isinstance(epsilons, Iterable): epsilons = np.linspace(0, 1, num=epsilons)[1:] else: epsilons = epsilons snow_mask_np = np.zeros((img_height // 10, img_height // 10, 3)) ch = snow_mask_np.shape[0] // 2 cw = snow_mask_np.shape[1] // 2 cr = min(img_height, img_width) * 0.1 for i in range(snow_mask_np.shape[0]): for j in range(snow_mask_np.shape[1]): if (i - ch) ** 2 + (j - cw) ** 2 <= cr: snow_mask_np[i, j] = np.ones(3) kernel = MotionBlurMetric.motion_Kernel((int(ch * 0.9), int(cw * 0.9)), angle) blured = cv2.filter2D(snow_mask_np, -1, kernel) blured = np.clip(blured, min_, max_).astype(np.float32) blured = blured * max_ blured_h, blured_w = blured.shape[:2] if axis == 0: blured = np.transpose(blured, (2, 0, 1)) cc0 = [1, 100] for _, epsilon in enumerate(tqdm(epsilons)): p0 = int(cc0[0] + epsilon * (cc0[1] - cc0[0])) positions_h = np.random.randint(img_height - blured_h, size=p0) positions_w = np.random.randint(img_width - blured_w, size=p0) perturbed = np.copy(image) for temp_h, temp_w in zip(positions_h, positions_w): if axis == 0: perturbed[:, temp_h: temp_h + blured_h, temp_w: temp_w + blured_w] += blured else: perturbed[temp_h: temp_h + blured_h, temp_w: temp_w + blured_w, :] += blured perturbed = np.clip(perturbed, min_, max_) _, is_adversarial = a.predictions(perturbed) if is_adversarial: if abort_early or verify: break else: bound = epsilon a.verifiable_bounds = (bound, None) return
nilq/baby-python
python
"""Utilities for reading configuration from settings.""" from collections import namedtuple from functools import partial from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.utils.text import slugify import six import logging logger = logging.getLogger(__name__) # Decorators that can be composed. PIPES = [] # Placeholder decorators PIPELINES = [] class Pipe: """Configuration class.""" def __init__(self, function, name, slug, meta, enabled): """Initialize Pipe.""" self.function = function self.name = name self.slug = slug self.meta = meta self.enabled = enabled # Decorators used in codebase. Pipeline = namedtuple('Pipeline', ['slug', 'name', 'meta']) def conf_to_pipe(conf): """Create Pipe object out of configuration.""" # if conf is a string type, convert it to if isinstance(conf, six.string_types): conf = {'function': conf} if not isinstance(conf, dict): raise ImproperlyConfigured( 'Dynamicdecorator configuration should be string or dictionay:' '%s' % conf) # Default enabled value. conf['enabled'] = False # Only mandatory field is function: if 'function' not in conf: raise ImproperlyConfigured( 'Configuration do not have function item: %s' % conf) # If name is not defined use function name as name if 'name' not in conf: conf['name'] = conf['function'] if 'slug' not in conf: conf['slug'] = conf['name'] # Ensure that slug is slugified conf['slug'] = slugify(conf['slug']) # Group will be used in interface if 'meta' not in conf: conf['meta'] = {} return Pipe(**conf) def get_pipes(): """Get pipes from settings.""" # TODO: If settings does not have PROVIDED_DECORATORS assign it. # we should return default decorators in this case. # TODO: PROVIDED_DECORATORS seems to be not used right now. if PIPES: return PIPES for c in settings.DYNAMIC_DECORATORS: # Set Default vaues. p = conf_to_pipe(c) if any(e for e in PIPES if p.slug == e.slug): raise ImproperlyConfigured( 'Duplicate name in decorator configuration: %s' % p) PIPES.append(p) return PIPES def get_pipelines(): """Get pipelines.""" return PIPELINES def register_pipeline(slug, name, meta): """Register given pipeline.""" if not isinstance(meta, dict): raise ImproperlyConfigured( 'Meta value of a decorator must be a dictionay:' '%s' % meta) pipeline = Pipeline(slug, name, meta) if not any(p.slug == slug for p in PIPELINES): PIPELINES.append(pipeline) return pipeline else: logger.info('[DYNAMIC_DECORATORS] %s is already registered. Ignoring.' % slug) return next(p for p in PIPELINES if p.slug == slug) def get_pipeline_by_slug(slug): """Search pipeline by slug value.""" return next(p for p in PIPELINES if p.slug == slug) def is_match(pipeline, pipe): """Check pipe against pipeline. Check if there is any meta property on pipeline that matches with pipe. """ # if pipe does not have any meta attribute it automatically matches. # if pipe has meta attributes it only matches if all meta attributes # that exists on both pipe and pipeline has same values. # This relationship is not surjective. return not pipe.meta or all(pipe.meta[k] == v for k, v in pipeline.meta.iteritems() if k in pipe.meta) def filter_pipes(pipeline, pipes): """Filter given pipes by meta values of current pipeline.""" return filter(partial(is_match, pipeline), pipes)
nilq/baby-python
python
import pytest from karp.domain.models.resource import create_resource from karp.domain.models.entry import EntryRepository, create_entry from karp.infrastructure.sql import sql_entry_repository from karp.infrastructure.unit_of_work import unit_of_work @pytest.fixture def resource_blam(): resource = create_resource( { "resource_id": "blam", "resource_name": "Blam", "sort": ["baseform"], "fields": {"baseform": {"type": "string", "required": True}}, "id": "baseform", } ) yield resource resource.entry_repository.teardown() def test_resource_has_entry_respository(resource_blam): assert isinstance(resource_blam.entry_repository, EntryRepository) with unit_of_work(using=resource_blam.entry_repository) as uw: assert len(uw.entry_ids()) == 0 def test_resource_put_entry(resource_blam): assert isinstance(resource_blam.entry_repository, EntryRepository) with unit_of_work(using=resource_blam.entry_repository) as uw: entry = create_entry("hubba", {}) uw.put(entry) uw.commit() entry_ids = uw.entry_ids() assert len(entry_ids) == 1 assert "hubba" in entry_ids
nilq/baby-python
python
# coding=utf-8 from __future__ import unicode_literals, print_function import re import datetime from ..models import RawLog, DummyLogger, MacAddress, UserAction CODE_WLAN_JOIN = "WLAN-Gerät angemeldet" CODE_WLAN_LEAVE = "WLAN-Gerät hat sich abgemeldet" CODE_WLAN_REMOVED = "WLAN-Gerät wurde abgemeldet" def parse_logs(log=None): if log is None: log = DummyLogger() report = { "scanned": 0, "new_macs": 0, "failed": 0, "new_actions": 0, } for rawlog in RawLog.objects.all(): # replace comma in things like '(2,4 Ghz)' def _repl(match): return "(%s.%s)" % match.groups() text = re.sub(r"\(([^\).]+),([^\).]+)\)", _repl, rawlog.text) if text.endswith("."): text = text[:-1] info = [x.strip() for x in text.split(",")] print(info) if len(info) < 1: continue def _parse_address(info, report): name, ip, mac = info[0:3] if not ip.startswith("IP") or not mac.startswith("MAC"): log.error("Could not parse IP/MAC for log entry pk=%s" % rawlog.pk) report["failed"] += 1 return None ip = ip.split()[1] mac = mac.split()[1][:17] obj, created = MacAddress.objects.get_or_create(mac=mac[:20], name=name[:100]) if created: report["new_macs"] += 1 return { "name": name, "ip": ip, "mac": mac } def _add_action(address, action): obj, created = UserAction.objects.get_or_create( date=rawlog.date, mac=address["mac"], ip=address["ip"], action=action, ) if created: report["new_actions"] += 1 code = info[0] if code.startswith(CODE_WLAN_JOIN): if len(info) >= 4: address = _parse_address(info[2:5], report) if address is None: continue _add_action(address, UserAction.ACT_WLAN_CONNECT) elif code.startswith(CODE_WLAN_LEAVE) or code.startswith(CODE_WLAN_REMOVED): if len(info) >= 4: address = _parse_address(info[1:4], report) if address is None: continue _add_action(address, UserAction.ACT_WLAN_DISCONNECT) log.log("%(new_macs)s new MACs, %(new_actions)s new user-actions" % report)
nilq/baby-python
python
# Load library import numpy as np # Create matrix matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) # View number of rows and columns matrix.shape # (3, 4) # View number of elements (rows * columns) matrix.size # 12 # View number of dimensions matrix.ndim # 2
nilq/baby-python
python
from django.core.cache import cache from django.test import TestCase, override_settings from django.urls import reverse from posts.models import User, Post, Group, Follow class TestPostCreation(TestCase): """Test for proper post creation and protection from anons""" def setUp(self): self.text = 'test_text' self.user = User.objects.create_user(username='testuser', password=12345) def test_auth_user_post_creation(self): # Login into our user and check for redirection. self.client.login(username=self.user.username, password=12345) response = self.client.post(reverse('new_post'), {'text': self.text}) self.assertEqual(response.status_code, 302) # Test that the text is equal post = Post.objects.first() self.assertEqual(post.text, self.text) def test_anon_post_creation_redirect(self): # Test, if anon is able to retrieve the new_post page response = self.client.get(reverse('new_post')) self.assertRedirects(response=response, expected_url='/auth/login?next=/new/', target_status_code=301) def test_anon_post_creation_post_request(self): # Test, if anon is able to create a post through a POST request. self.client.post(reverse('new_post'), {'text': self.text}) post_count = Post.objects.filter(text=self.text).count() self.assertEqual(post_count, 0) class TestPostRender(TestCase): """Test for proper post's rendering.""" def setUp(self): self.user = User.objects.create_user(username='testuser', password=12345) self.text = 'test_text' self.post = Post.objects.create(text=self.text, author=self.user) def test_profile(self): # Profile test response = self.client.get( reverse('profile', kwargs={'username': self.user.username})) self.assertContains(response, self.text) def test_index(self): cache.clear() # Index page test response = self.client.get(reverse('index')) self.assertContains(response, self.text) def test_direct_post_view(self): # Direct post's page test response = self.client.get( reverse('post_view', kwargs={'username': 'testuser', 'post_id': self.post.pk})) self.assertContains(response, self.text) class TestPostEdit(TestCase): """Test for proper post editing.""" def setUp(self): self.user = User.objects.create_user(username='testuser', password=12345) self.text = 'test_text' self.post = Post.objects.create(text=self.text, author=self.user) self.text_edited = 'test_text_edit' def test_post_edit(self): self.client.login(username=self.user.username, password=12345) # Post editing self.client.post(reverse('post_edit', kwargs={'username': self.user.username, 'post_id': self.post.pk}), {'text': self.text_edited}) # Test that no unwanted entities got created and contents are ok post_edited = Post.objects.first() post_count = Post.objects.all().count() self.assertEqual(self.post, post_edited) self.assertEqual(post_edited.text, self.text_edited) self.assertEqual(post_count, 1) @override_settings(CACHES={ 'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}) class TestEditedPostRender(TestCase): """Test for rendering edited posts.""" def setUp(self): self.user = User.objects.create_user(username='testuser', password=12345) self.text = 'test_text' self.post = Post.objects.create(text=self.text, author=self.user) self.text_edited = 'test_text_edit' def test_post_render_all_pages(self): # Post editing self.client.login(username=self.user.username, password=12345) self.client.post(reverse('post_edit', kwargs={'username': self.user.username, 'post_id': self.post.pk}), {'text': self.text_edited}) # Test for rendering response = self.client.get( reverse('profile', kwargs={'username': self.user.username})) self.assertContains(response, self.text_edited) response = self.client.get(reverse('index')) self.assertContains(response, self.text_edited) response = self.client.get(reverse( 'post_view', kwargs={ 'username': self.user.username, 'post_id': Post.objects.first().pk}) ) self.assertContains(response, self.text_edited) class TestHandlers(TestCase): """Test for custom error handlers""" def test_404(self): response = self.client.get('/test_non_existing_url_qweqwe/') self.assertEqual(response.status_code, 404) class TestImageRender(TestCase): """Test for image handling, and rendering looking for <img tag in a response.""" def setUp(self): self.tag = '<img' self.user = User.objects.create_user(username='testuser', password=12345) self.text = 'test_text' self.post = Post.objects.create( text=self.text, author=self.user, image='posts/test_image/Test_image.jpg' ) def test_direct_post_image_render(self): response = self.client.get( reverse('post_view', kwargs={'username': self.user.username, 'post_id': self.post.pk})) self.assertContains(response, self.tag) def test_profile_post_image_render(self): response = self.client.get( reverse('profile', kwargs={'username': self.user.username})) self.assertContains(response, self.tag) def test_group_post_Image_Render(self): # Creating a new group and assigning it to the existing test post self.group = Group.objects.create(title='Test group', slug='test-group', description='Test group description') self.post.group_id = self.group.pk self.post.save() response = self.client.get( reverse('group_posts', kwargs={'slug': self.group.slug})) self.assertContains(response, self.tag) class TestImageFormProtection(TestCase): """Test for image form protection""" def setUp(self): self.user = User.objects.create_user(username='testuser', password=12345) self.client.force_login(self.user) self.post = Post.objects.create(text='test_text', author=self.user) self.image_path = 'media/posts/test_image/Test_image.jpg' self.non_image_path = 'posts/tests.py' self.error_message = f'Загрузите правильное изображение. Фа\ йл, который вы загрузили, поврежден или не является изображением.' def test_correct_image_form_protection(self): with open(self.image_path, 'rb') as img: self.client.post(reverse('post_edit', kwargs={ 'username': self.user.username, 'post_id': self.post.pk}), {'image': img, 'text': 'edited text with an image'}) post = Post.objects.first() self.assertIsNotNone(post.image) def test_incorrect_image_form_protection(self): with open(self.non_image_path, 'rb') as non_img: response = self.client.post(reverse( 'post_edit', kwargs={ 'username': self.user.username, 'post_id': self.post.pk}), {'image': non_img, 'text': 'edited text with wrong file '} ) self.assertFormError(response, 'form', 'image', self.error_message) class TestCache(TestCase): """Test for caching""" def setUp(self): self.user = User.objects.create_user(username='testuser', password=12345) self.client.force_login(self.user) self.text = 'test_text' def test_index_cache(self): # Create a cached page and check that there's no new post yet. self.client.get(reverse('index')) self.client.post(reverse('new_post'), {'text': self.text}) response = self.client.get(reverse('index')) self.assertNotContains(response, self.text) class TestFollowerSystem(TestCase): """Test for follower system. Test for follow and unfollow, and proper construction of follower-index page""" def setUp(self): self.user = User.objects.create_user(username='testuser', password=12345) self.user_to_follow = User.objects.create_user( username='test_user_to_follow', password=12345) self.client.force_login(self.user) self.text = 'test_text' self.post = Post.objects.create( text=self.text, author=self.user_to_follow) def test_auth_user_follow_follow(self): response = self.client.get( reverse('profile_follow', kwargs={'username': self.user_to_follow.username})) self.assertIsNotNone(Follow.objects.first()) def test_auth_user_follow_unfollow(self): response = self.client.get( reverse('profile_unfollow', kwargs={'username': self.user_to_follow.username})) self.assertIsNone(Follow.objects.first()) def test_follower_index(self): self.client.get(reverse('profile_follow', kwargs={ 'username': self.user_to_follow.username})) response = self.client.get(reverse('follow_index')) self.assertContains(response, self.text) @override_settings(CACHES={ 'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}) def test_not_follower_index(self): response = self.client.get(reverse('follow_index')) self.assertNotContains(response, self.text) class TestCommentSystem(TestCase): """Test for proper commenting. Test if anon and non-anon can or cannot comment""" def setUp(self): self.user = User.objects.create_user(username='testuser', password=12345) self.text = 'test_text' self.post = Post.objects.create( text=self.text, author=self.user) self.commenting_user = User.objects.create_user( username='commenting_user', password=12345) self.comment_text = 'test_comment' def test_auth_user_commenting(self): self.client.force_login(self.commenting_user) response = self.client.post( reverse('add_comment', kwargs={'username': self.user.username, 'post_id': self.post.pk}), {'text': self.comment_text}, follow=True) self.assertContains(response, self.comment_text) def test_anon_user_commenting(self): """Anons should not be able to comment, make a POST request without logging""" response = self.client.post( reverse('add_comment', kwargs={'username': self.user.username, 'post_id': self.post.pk}), {'text': self.comment_text}, follow=True) self.assertNotContains(response, self.comment_text)
nilq/baby-python
python
import unittest from entity_embeddings.util import processor_utils class TestProcessorUtils(unittest.TestCase): def test_get_invalid_target_processor(self): self.assertRaises(ValueError, processor_utils.get_target_processor, 1000)
nilq/baby-python
python
import torch import numpy as np import os from datasets.base_dataset import BaseDataset from models.base_model import Model from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from utils.metrics import compute_chamfer_l1 from utils.util import quantize, downsample class AutoencoderDataset(BaseDataset): def __init__(self, config: dict, mode: str): BaseDataset.__init__(self, config, mode) self.z_dim = config['z_dim'] self.implicit_rep = config['implicit_rep'] self.voxel_size = config['voxel_size'] self.implicit_input_cnt = config['implicit_input_cnt'] self.query_cnt = config['query_cnt'] self.max_dist = config['max_dist'] def convert_rep(self, signed_rep: torch.Tensor): """ :param signed_rep: torch.tensor of N Signed representation of the implicit field :return: rep: torch.tensor of N Converted representation """ if self.implicit_rep == 'sdf': return signed_rep elif self.implicit_rep == 'udf': return torch.abs(signed_rep) elif self.implicit_rep == 'occ': return (signed_rep > 0.).float() else: raise ValueError('representation {} not allowed'.format(self.implicit_rep)) class AutoencoderShapenetDataset(AutoencoderDataset): name = 'cgca_autoencoder_shapenet' def __init__(self, config: dict, mode: str): AutoencoderDataset.__init__(self, config, mode) self.obj_class = config['obj_class'] self.summary_name = self.obj_class self.surface_cnt = config['surface_cnt'] self.query_dist_filter = config['query_dist_filter_rate'] * self.max_dist if mode == 'train': self.data_root = os.path.join( config['data_root'], self.obj_class, 'train' ) data_list_file_name = 'train.txt' elif mode == 'val' or mode == 'test': self.data_root = os.path.join( config['data_root'], self.obj_class, 'test' ) data_list_file_name = 'test.txt' else: raise ValueError() data_list_file_path = os.path.join( config['data_root'], self.obj_class, data_list_file_name ) with open(data_list_file_path, 'r') as f: self.data_list = f.read().splitlines() self.data_list = sorted([ x[:-1] if x[-1] == '\n' else x for x in self.data_list ]) if (mode == 'val') and (config['eval_size'] is not None): # fix vis_indices eval_size = config['eval_size'] if isinstance(eval_size, int): val_indices = torch.linspace(0, len(self.data_list) - 1, eval_size).int().tolist() self.data_list = [self.data_list[i] for i in val_indices] def __getitem__(self, idx): if self.config['overfit_one_ex'] is not None: idx = self.config['overfit_one_ex'] data_name = self.data_list[idx] data_path = os.path.join(self.data_root, data_name + '.npz') data = np.load(data_path) surface = downsample(torch.tensor(data['surface']), self.surface_cnt) sdf_pos = data['sdf_pos'] sdf_pos = torch.tensor(sdf_pos[~np.isnan(sdf_pos).any(axis=1)]) sdf_neg = data['sdf_neg'] sdf_neg = torch.tensor(sdf_neg[~np.isnan(sdf_neg).any(axis=1)]) sdf = torch.cat([sdf_pos, sdf_neg], dim=0) sdf = sdf[torch.randperm(sdf.shape[0]), :] implicit_field = sdf[torch.abs(sdf[:, 3]) < self.voxel_size] implicit_field = downsample(implicit_field, self.implicit_input_cnt) query = sdf[torch.abs(sdf[:, 3]) < self.query_dist_filter] query = downsample(query, self.query_cnt) # translate if self.mode == 'train': translation = 4 * torch.rand([1, 4]) * self.voxel_size translation[0, 3] = 0. else: translation = torch.zeros([1, 4]) surface = surface + translation[:, :3] query = query + translation implicit_field = implicit_field + translation # normalize surface = quantize(surface, self.voxel_size) query = query / self.voxel_size query_coord, query_val = query.split(3, 1) implicit_field = implicit_field / self.voxel_size query_val = query_val.view(-1) query_val = self.convert_rep(query_val) implicit_field[:, 3] = self.convert_rep(implicit_field[:, 3]) return { 'surface_voxel': surface, # torch tensor of N1 x 3 'implicit_field': implicit_field, # torch tensor of N2 x 4 'query_coord': query_coord, # torch tensor of N3 x 3 'query_val': query_val, # torch tensor of N3 'translation': translation, # torch tensor of 1 x 4 'file_name': data_name, 'path': data_path, } def __len__(self): return len(self.data_list) def test(self, model: Model, writer: SummaryWriter, step): training = model.training model.eval() # collect testset test_sample_num = self.config['test_sample_num'] surfaces = {} for file_name in self.data_list: data_path = os.path.join(self.data_root, file_name + '.npz') data = np.load(data_path) surfaces[file_name] = torch.tensor( data['surface'][:test_sample_num] ).float() print('Collected {} complete shapes'.format(len(surfaces))) data_loader = DataLoader( self, batch_size=self.config['test_batch_size'], num_workers=self.config['num_workers'], collate_fn=self.collate_fn, drop_last=False, shuffle=False ) test_chamfer_l1 = [] for test_step, data in tqdm(enumerate(data_loader)): file_names = data['file_name'] gts = [surfaces[file_name].to(self.device) for file_name in file_names] pred_pcs = model.get_pointcloud(data, step) for batch_idx, pred_pc in enumerate(pred_pcs): pred_coords_down = torch.stack(pred_pc, dim=0).to(self.device) chamfer_l1s = compute_chamfer_l1(pred_coords_down, gts[batch_idx]) test_chamfer_l1.append(chamfer_l1s[0]) chamfer_l1 = np.array(test_chamfer_l1).mean() print('chamfer_l1: {}'.format(chamfer_l1)) # write to tensorboard model.scalar_summaries['metrics/chamfer_l1'] += [chamfer_l1] model.write_dict_summaries(step) model.train(training)
nilq/baby-python
python
""" factoidbot.py - A plugin for remembering facts. Copyright (C) 2007 Kevin Smith SleekBot is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. SleekBot is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this software; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ import logging import pickle from sleekbot.commandbot import botcmd from sleekbot.plugbot import BotPlugin class FactStore(object): """ Storage for facts """ def __init__(self): self.null = None self.data = {} self.loaddefault() def list_terms(self): return self.data.keys() def add(self, term, fact): self.data[term.lower()] = fact self.savedefault() def get(self, term): if term.lower() in self.data: return self.data[term.lower()] return "No facts known about " + term def delete(self, term): if term.lower() in self.data: del self.data[term.lower()] self.savedefault() def loaddefault(self): self.load("factoids.dat") def savedefault(self): self.save("factoids.dat") def load(self, filename): try: f = open(filename, 'rb') except: logging.warning("Error loading factoids. Cannot open fact file: %s", filename) return self.data = pickle.load(f) f.close() def save(self, filename): try: f = open(filename, 'wb') except IOError: logging.warning("Error saving factoids. Cannot open fact file: %s", filename) return pickle.dump(self.data, f) f.close() class Factoid(BotPlugin): """A plugin to remember facts.""" def _on_register(self): self.factstore = FactStore() @botcmd(name='fact', usage='fact [topic]') def handle_fact(self, command, args, msg): """Returns a fact""" subcommand = None term = None fact = None if args.count(" ") > 1: [subcommand, term, fact] = args.split(" ", 2) elif args.count(" ") > 0: [subcommand, term] = args.split(" ", 1) else: subcommand = args admin_commands = ['list', 'add', 'delete'] #non-admin commands if subcommand not in admin_commands: response = "facts for " + args + "\n" + args + ": " + \ self.factstore.get(args) return response #admin commands if "list" == subcommand: if not self.bot.msg_from_admin(msg): return "You do not have access to this function" terms = self.factstore.list_terms() response = "I know about the following topics:\n" for term in terms: response = response + "\t" + term response = response + "." elif "add" == subcommand: if not self.bot.msg_from_admin(msg): response = "You do not have access to this function" elif term != None and fact != None: self.factstore.add(term, fact) response = "Fact added" else: response = "To add a fact, both a topic and " + \ "description are needed." elif "delete" == subcommand: if not self.bot.msg_from_admin(msg): response = "You do not have access to this function" else: self.factstore.delete(term) response = "Deleted (if found)" logging.debug("handle_fact done: %s" % response) return response
nilq/baby-python
python
from django.apps import AppConfig from django.utils.translation import gettext_lazy as _ class FilesConfig(AppConfig): """Application config for files.""" name = "apps.files" verbose_name = _("Files") label = "files"
nilq/baby-python
python
import numpy as np import cv2 import matplotlib.pyplot as plt import copy import argparse import os def histogram(image): # determine the normalized histogram m, n = image.shape hist = [0.0] * 256 for i in range(m): for j in range(n): #for every intensity add the count hist[image[i, j]] += 1 return np.array(hist)/(m*n) def cumulativeSum(hist): # calculate the cumulative sum return [sum(hist[:i+1]) for i in range(len(hist))] def histogramEqualization(image): #calculate Histogram hist = histogram(image) #find the cdf function cdf = np.array(cumulativeSum(hist)) #multiply cdf with 255 transfer = np.uint8(255 * cdf) k, l = image.shape final = np.zeros_like(image) # construct the final histogram equalization image for i in range(0, k): for j in range(0, l): final[i, j] = transfer[image[i, j]] return final def gamma_correction(img,gamma): gamma = 1/gamma lT =[] for i in np.arange(0,256).astype(np.uint8): lT.append(np.uint8(((i/255)**gamma)*255)) lookup = np.array(lT) #Creating the lookup table to find values corrected = cv2.LUT(img,lookup) return corrected def main(args): video = cv2.VideoWriter('Night_Drive_Correction.avi',cv2.VideoWriter_fourcc(*'XVID'), 20,(1024,600)) cap = cv2.VideoCapture(args['file']) method = args['method'] while (cap.isOpened()): ret, frame = cap.read() if not ret: break frame = cv2.resize(frame, (1024,600)) #split in b,g,r b,g,r= cv2.split(frame) if (method == 'histogram'): #compute histogram equalization for each channel b1 = histogramEqualization(b) g1 = histogramEqualization(g) r1 = histogramEqualization(r) #merge the channels final = cv2.merge((b1,g1,r1)) elif (method == 'gamma'): final = gamma_correction(frame, 1.8) else: print('invalid method ; exit') return cv2.imshow('Final', final) video.write(final) if cv2.waitKey(25) & 0XFF == ord('q'): break cv2.destroyAllWindows() video.release() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-method", "--method", required=True, help="Input: histogram or gamma", type=str) parser.add_argument("-path", "--file", required=False, help="video path", default='Night Drive - 2689.mp4', type=str) args = vars(parser.parse_args()) if (not os.path.exists(args['file'])): print('File does not exist. Re run with correct path or place file in current directory and run') exit() main(args)
nilq/baby-python
python
from json import JSONDecodeError from typing import Dict import pytest from common.serializers.serialization import node_status_db_serializer from plenum.common.constants import LAST_SENT_PRE_PREPARE from plenum.common.util import getNoInstances from plenum.test.test_node import ensureElectionsDone, getPrimaryReplica from plenum.test.view_change.helper import ensure_view_change nodeCount = 7 def pack_pp_key(value: Dict) -> bytes: return node_status_db_serializer.serialize(value) def unpack_pp_key(value: bytes) -> Dict: return node_status_db_serializer.deserialize(value) @pytest.fixture(scope="module") def view_no_set(looper, txnPoolNodeSet): for _ in range(2): ensure_view_change(looper, txnPoolNodeSet) ensureElectionsDone(looper, txnPoolNodeSet) assert txnPoolNodeSet[0].viewNo == 2 @pytest.fixture(scope="function") def setup(txnPoolNodeSet): for node in txnPoolNodeSet: if LAST_SENT_PRE_PREPARE in node.nodeStatusDB: node.nodeStatusDB.remove(LAST_SENT_PRE_PREPARE) for replica in node.replicas.values(): replica.h = 0 replica._lastPrePrepareSeqNo = 0 replica.last_ordered_3pc = (replica.viewNo, 0) @pytest.fixture(scope="function") def replica_with_unknown_primary_status(txnPoolNodeSet, setup): replica = txnPoolNodeSet[0].replicas[1] old_primary_name = replica._primaryName replica._primaryName = None yield replica replica._primaryName = old_primary_name def test_store_last_sent_pp_seq_no_if_some_stored( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, pack_pp_key({1: [2, 5]})) node.last_sent_pp_store_helper.store_last_sent_pp_seq_no(inst_id=1, pp_seq_no=6) assert unpack_pp_key(node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE)) == \ {'1': [2, 6]} def test_store_last_sent_pp_seq_no_if_none_stored( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.last_sent_pp_store_helper.store_last_sent_pp_seq_no(inst_id=1, pp_seq_no=6) assert unpack_pp_key(node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE)) == \ {'1': [2, 6]} def test_erase_last_sent_pp_seq_no_if_some_stored( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, pack_pp_key({'1': [2, 5]})) node.last_sent_pp_store_helper.erase_last_sent_pp_seq_no() assert LAST_SENT_PRE_PREPARE not in node.nodeStatusDB def test_erase_last_sent_pp_seq_no_if_none_stored( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.last_sent_pp_store_helper.erase_last_sent_pp_seq_no() assert LAST_SENT_PRE_PREPARE not in node.nodeStatusDB def test_try_restore_last_sent_pp_seq_no_if_relevant_stored( tconf, txnPoolNodeSet, view_no_set, setup): replica = getPrimaryReplica(txnPoolNodeSet, instId=1) node = replica.node assert node.viewNo == 2 node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, pack_pp_key({1: [2, 5]})) node.last_sent_pp_store_helper.try_restore_last_sent_pp_seq_no() assert replica.lastPrePrepareSeqNo == 5 assert replica.last_ordered_3pc == (2, 5) assert replica.h == 5 assert replica.H == 5 + tconf.LOG_SIZE def test_try_restore_last_sent_pp_seq_no_if_irrelevant_stored( tconf, txnPoolNodeSet, view_no_set, setup): replica = getPrimaryReplica(txnPoolNodeSet, instId=1) node = replica.node assert node.viewNo == 2 node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, pack_pp_key({2: [1, 9]})) node.last_sent_pp_store_helper.try_restore_last_sent_pp_seq_no() assert replica.lastPrePrepareSeqNo == 0 assert replica.last_ordered_3pc == (2, 0) assert replica.h == 0 assert replica.H == 0 + tconf.LOG_SIZE def test_try_restore_last_sent_pp_seq_no_if_none_stored( tconf, txnPoolNodeSet, view_no_set, setup): replica = getPrimaryReplica(txnPoolNodeSet, instId=1) node = replica.node assert node.viewNo == 2 node.last_sent_pp_store_helper.try_restore_last_sent_pp_seq_no() assert replica.lastPrePrepareSeqNo == 0 assert replica.last_ordered_3pc == (2, 0) assert replica.h == 0 assert replica.H == 0 + tconf.LOG_SIZE def test_try_restore_last_sent_pp_seq_no_if_invalid_stored( tconf, txnPoolNodeSet, view_no_set, setup): replica = getPrimaryReplica(txnPoolNodeSet, instId=1) node = replica.node assert node.viewNo == 2 node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, pack_pp_key({1: [2, 5]})[:-1]) node.last_sent_pp_store_helper.try_restore_last_sent_pp_seq_no() assert replica.lastPrePrepareSeqNo == 0 assert replica.last_ordered_3pc == (2, 0) assert replica.h == 0 assert replica.H == 0 + tconf.LOG_SIZE def test_cannot_restore_last_sent_pp_seq_no_if_another_view( txnPoolNodeSet, view_no_set, setup): replica = getPrimaryReplica(txnPoolNodeSet, instId=1) node = replica.node assert node.viewNo == 2 can = node.last_sent_pp_store_helper._can_restore_last_sent_pp_seq_no( 1, [1, 5]) assert can is False def test_cannot_restore_last_sent_pp_seq_no_if_replica_absent( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] assert node.viewNo == 2 absent_replica_index = getNoInstances(nodeCount) can = node.last_sent_pp_store_helper._can_restore_last_sent_pp_seq_no( absent_replica_index, [2, 5]) assert can is False def test_cannot_restore_last_sent_pp_seq_no_if_replica_status_unknown( view_no_set, setup, replica_with_unknown_primary_status): replica = replica_with_unknown_primary_status assert replica.instId == 1 node = replica.node assert node.viewNo == 2 can = node.last_sent_pp_store_helper._can_restore_last_sent_pp_seq_no( 1, [2, 5]) assert can is False def test_cannot_restore_last_sent_pp_seq_no_if_replica_is_master( txnPoolNodeSet, view_no_set, setup): replica = getPrimaryReplica(txnPoolNodeSet, instId=0) node = replica.node assert node.viewNo == 2 can = node.last_sent_pp_store_helper._can_restore_last_sent_pp_seq_no( 0, [2, 5]) assert can is False def test_can_restore_last_sent_pp_seq_no_if_relevant( txnPoolNodeSet, view_no_set, setup): replica = getPrimaryReplica(txnPoolNodeSet, instId=1) node = replica.node assert node.viewNo == 2 can = node.last_sent_pp_store_helper._can_restore_last_sent_pp_seq_no( 1, [2, 5]) assert can is True def test_restore_last_sent_pp_seq_no( tconf, txnPoolNodeSet, view_no_set, setup): replica = getPrimaryReplica(txnPoolNodeSet, instId=1) node = replica.node assert node.viewNo == 2 node.last_sent_pp_store_helper._restore_last_stored( 1, [2, 5]) for replica in node.replicas.values(): if replica.instId == 1: assert replica.lastPrePrepareSeqNo == 5 assert replica.last_ordered_3pc == (2, 5) assert replica.h == 5 assert replica.H == 5 + tconf.LOG_SIZE else: assert replica.lastPrePrepareSeqNo == 0 assert replica.last_ordered_3pc == (2, 0) assert replica.h == 0 assert replica.H == tconf.LOG_SIZE def test_can_load_absent_last_sent_pre_preapre_key( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() assert pp_key is None def test_cannot_load_last_sent_pre_preapre_key_if_empty_value( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, b'') with pytest.raises(JSONDecodeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_cannot_load_last_sent_pre_preapre_key_if_not_valid_dict( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize({1: [2, 5]})[:-1]) with pytest.raises(JSONDecodeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_cannot_load_last_sent_pre_preapre_key_if_none( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize(None)) with pytest.raises(TypeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_cannot_load_last_sent_pre_preapre_key_if_dict_has_no_entries( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize({})) with pytest.raises(TypeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_cannot_load_last_sent_pre_preapre_key_if_inst_id_missed( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize([2, 5])) with pytest.raises(TypeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_cannot_load_last_sent_pre_preapre_key_if_view_no_missed( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize([1, 5])) with pytest.raises(TypeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_cannot_load_last_sent_pre_preapre_key_if_pp_seq_no_missed( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize([1, 2])) with pytest.raises(TypeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_cannot_load_last_sent_pre_preapre_key_if_json_has_extra_fields( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize({'1': [2, 5, 1]})) with pytest.raises(TypeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_cannot_load_last_sent_pre_preapre_key_if_inst_id_is_not_int( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize({None: [2, 5]})) with pytest.raises(TypeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_cannot_load_last_sent_pre_preapre_key_if_view_no_is_not_int( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize({1: ['', 5]})) with pytest.raises(TypeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_cannot_load_last_sent_pre_preapre_key_if_pp_seq_not_int( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize({'1': [2, 5.0]})) with pytest.raises(TypeError): pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() def test_can_load_valid_last_sent_pre_preapre_key_if_valid( txnPoolNodeSet, view_no_set, setup): node = txnPoolNodeSet[0] node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, node_status_db_serializer.serialize({'1': [2, 5]})) pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key() assert pp_key == {'1': [2, 5]}
nilq/baby-python
python
__author__ = 'etuka' __date__ = '22 March 2019' import os import csv import ntpath import pandas as pd from django.conf import settings from dal.copo_da import Sample, Description from django.core.files.storage import FileSystemStorage from web.apps.web_copo.lookup.copo_enums import Loglvl, Logtype lg = settings.LOGGER """ class handles the ingestion of csv data to supply metadata for description """ class IngestData: def __init__(self, description_token=str(), profile_id=str()): self.description_token = description_token self.profile_id = self.set_profile_id(profile_id) self.schema = Sample().get_schema().get("schema_dict") def set_profile_id(self, profile_id): p_id = profile_id if not p_id and self.description_token: description = Description().GET(self.description_token) p_id = description.get("profile_id", str()) return p_id def get_object_path(self): """ function returns directory to description data :return: """ object_path = os.path.join(settings.MEDIA_ROOT, 'description_data', self.description_token) return object_path def get_object_file_path(self): """ function returns file path to description data :return: """ file_path = os.path.join(self.get_object_path(), 'uploaded.csv') return file_path def save_uploaded_csv(self, csv_file): """ function saves the passed file to the file system :param csv_file: :return: boolean - indicating success or otherwise of file save """ result = dict(status='success', message='') if csv_file: csv_file.name = ntpath.basename(self.get_object_file_path()) # removed previous file if os.path.exists(self.get_object_file_path()): os.remove(self.get_object_file_path()) fs = FileSystemStorage(location=self.get_object_path()) try: fs.save(csv_file.name, csv_file) except Exception as e: message = 'Error Ingesting data: ' + str(e) print(message) lg.log(message, level=Loglvl.ERROR, type=Logtype.FILE) raise return result def align_columns(self): """ function compares ingested columns to generated columns - they should align :return: """ result = dict(status='success', message='') if not os.path.exists(self.get_object_file_path()): result["status"] = "error" result["message"] = "Couldn't locate uploaded CSV. Try re-uploading." return result with open(self.get_object_file_path(), 'r') as fobject: ingested_columns = (next(csv.reader(fobject))) description = Description().GET(self.description_token) stored_columns = description.get("meta", dict()).get("generated_columns", list()) ingested_columns = [x.strip().lower() for x in ingested_columns if x.strip()] stored_columns = [x['title'].strip().lower() for x in stored_columns if x['title'].strip()] if not ingested_columns == stored_columns: result["status"] = "error" result["message"] = "Headers from uploaded CSV do not match displayed columns." return result return result def align_rows(self): """ function compares ingested sample names to generated names - they should align :return: """ result = dict(status='success', message='') ingested_df = pd.read_csv(self.get_object_file_path()) ingested_df.columns = [x.lower() for x in list(ingested_df.columns)] ingested_names = list(ingested_df.name) description = Description().GET(self.description_token) stored_names = description.get("meta", dict()).get("generated_names", str()).split(",") ingested_names.sort() stored_names.sort() if not ingested_names == stored_names: result["status"] = "error" result["message"] = "Sample names from uploaded CSV do not match displayed names." return result return result def manage_process(self, csv_file): """ function orchestrates the ingestion of metadata to description metadata :param csv_file: metadata file to be ingested :return: returns updated dataset """ # save uploaded csv result = self.save_uploaded_csv(csv_file=csv_file) if result["status"] == "error": return result # match ingested columns to rendered columns result = self.align_columns() if result["status"] == "error": return result # match ingested sample names to rendered names result = self.align_rows() if result["status"] == "error": return result # process data result = self.align_rows() if result["status"] == "error": return result return result def process_data(self): """ having passed preliminary tests, function processes ingested data :return: """
nilq/baby-python
python
from FeatureModel import pointPillarFeatureNet from ModelBackbone import pointPillarModel from ModelBackbone import model class TrainingPipeline: def __init__(self, trainPillars, trainLabels, testPillars, testLabels): self.trainPillars = trainPillars self.trainLabels = trainLabels self.testPillars = testPillars self.testLabels = testLabels def trainModel(self): '''ppFeatureNet = pointPillarFeatureNet.PointPillarFeatureNet() ppFeatures, input_pillars, input_indices = ppFeatureNet.feedForward() ppModel = pointPillarModel.PointPillarModel("./myModel.h5py") ppModel.createModelBackbone(ppFeatures, self.trainPillars, self.trainLabels, self.testPillars, self.testLabels, input_pillars, input_indices) ''' mod = model.Model() return mod.train(self.trainPillars, self.trainLabels, self.testPillars, self.testLabels)
nilq/baby-python
python
import markdown from atomicpress.app import app from atomicpress.models import Post, PostStatus, PostType from flask import send_from_directory from sqlalchemy import desc from werkzeug.contrib.atom import AtomFeed from flask import request @app.route("/uploads/<filename>") def uploaded_file(filename): return send_from_directory(app.config["UPLOADS_PATH"], filename) @app.route("/feed/atom/") def feed_latest_posts(): feed_url = request.url url_root = request.url_root.strip("/") if "SITE_URL" in app.config: url_root = app.config["SITE_URL"] feed_url = "%s%s" % (url_root, request.path) feed = AtomFeed("Recent posts", feed_url=feed_url, url=url_root) posts = Post.query.order_by(desc(Post.date)).\ filter(Post.status == PostStatus.PUBLISH).\ filter(Post.type == PostType.POST) for post in posts: content = post.content if post.markdown: content = markdown.markdown(content) if post.author: author_name = post.author.nicename else: author_name = "Empty" feed.add(post.title, unicode(content), content_type='html', author=author_name, url="%s/%s" % (url_root, post.name), updated=post.date, published=post.modified) return feed.get_response()
nilq/baby-python
python
#!/usr/bin/python import csv import os.path from collections import namedtuple import sn import os import sys,string import numpy as np import math import vcf import fnmatch #try: # file_map = sys.argv[1];dir_files_phenotype1 = sys.argv[2];dir_files_phenotype2 = sys.argv[3];outfilename = sys.argv[4] #except: # print "Usage:",sys.argv[0], "file.map dir_files_phenotype1 dir_files_phenotype2 outfile";sys.exit(1) file_map="/home/cristovao/Desktop/AUS_project/public_datas/opensnp_datadump.201303070733/phenotypes_201303070733.csv" folder="/home/cristovao/Desktop/AUS_project/public_datas/opensnp_datadump.201303070733" def get_dataset(): """return """ handle = csv.DictReader(open(file_map, "r"), #fieldnames=["user_id","date_of_birth","chrom_sex","Jewish Ancestry","Subjective dream intensity","Webbed toes","Dyslexia","Artistic ability","lips size","ethnicity","Acrophobia","Myers-Briggs Type Indicator","Irritable Bowel Syndrome","Diego Blood Group","Cholesterol","Moles raised","Autism","Interest in Spirituality and Mysticism","Physician-diagnosed celiac/coeliac disease","Hypertriglyceridemia","SAT Writing","Panic Disorder","Bone Mineral Density","Sexual Preferences","Energy Level","Faktor 5 Leiden (F5)","Age learned to read","ear proximity to head ","Atheism","Earwax type","ring finger longer than index finger","Eye with Blue Halo ","Beard Color","Birth year","Migraine frequency","Serotonin transporter","Sport interest","Number of toes","Number of wisdom teeth","Widow's Peak","natural skinny","Wake up preference","Lisp","Do you like the taste of hops?","Wanting to be immortal","Purposefulness ","Ambition","Do hops taste like soap?","ABH Blood Group (antigens) ","Fish Preference","Smell of coffee in urine","hair on fingers","Neanderthal","Are You The Advertising Phenotype?","(male) penis releases pre-cum when sexually aroused.","Morton's Toe","Sports interest","Does cilantro taste like soap to you?","Tongue roller","Enjoy watching TV","Aspirin Allergy","libido ","Blood type","First word","Enjoy using the Internet","mtDNA Haplogroup (PhyloTree)","Like the taste of Stevia","Negative reaction to fluoroquinolone antibiotics","white skin","Fat-pad knee syndrome","Ability to Tan","Strabismus","Amblyopia","Autoimmune disorder","Y-DNA Haplogroup (ISOGG)","Asthma","Freckling","form of the nose","Ancestry","Metabolic Syndrome [MetS]","Enjoy riding a motorbike","Hair Color","Tea consumption","Height","Sex","Motion sickness","Cystic Fibrosis Like Disease","mouth size","Peanut butter preference","Sneezing induced by sexual ideation or orgasm?","Woolnerian Tip (Darwin's Tubercle)","SAT Math","prognathism","Taste of broccoli","Jogger","Phobia","Kell Blood Group (K/k antigens) ","Desmoid Tumor","SAT Verbal","Astigmatism","excessive daytime sleepiness","Enjoy driving a car","ABO Rh ","Kidd Blood Group","Sense of smell","apthous in mouth tendency","Allergic/bad reaction to fish oil supplements","Interested in news from real newspaper / news from the Internet","erectil disfunction ","Index Toe Longer than Big Toe","Hair Type","Penis Circumference at Glans","Penis Length","Intolerance: gluten, casein, soy","Weight","Short-sightedness (Myopia)","brown hair colour","SAT - when taken","Anorgasmia","Nicotine dependence","CMV serostatus","Musical Perfect Pitch","Rheumatoid Arthritis","(Male) Nipple's size","ADHD","Insect bites and stings","Colour Blindness","Lactose intolerance","Have ME/CFS","Atypical Sulfonomide Antibiotic Reaction","Cramps","Political Ideology","Handedness","cluster headache","Eye color","Social Level","Earlobe: Free or attached","Photic Sneeze Reflex (Photoptarmis)","Coffee consumption","Penicillin reaction","Do you have a parent who was diagnosed with Alzheimer's disease?","R1b1a2a1a1b","Good / poor eater as child","Abnormal Blood Pressure","Type II Diabetes","Migraine","Colon cancer ONLY FOR (rs3219489 GG)!","Ability to find a bug in openSNP","Eurogenes","head form","Cleverness","ENTP","Can you smell cut-grass?","Asparagus Metabolite Detection"], delimiter=";") return handle def get_user(pheno, variation): """Return list of the user with a specific variation """ dataset = get_dataset() user_list = [] for i in dataset: if i[pheno] == variation: user_list.append(i["user_id"]) dataset=[] return user_list def create_dir(user_list,variation): """Create a folder from a list of the user""" user_list=list(set(user_list)) print "total of the user", len(user_list), user_list files= os.listdir(folder) #variation="_".join(variation.split()) os.system("mkdir "+variation) n=0 for j in user_list: for i in files: if fnmatch.fnmatch(i, '*.txt'): u="user"+j+"_" if u in i: print i os.system("cp "+folder+"/"+i +" " +variation+"/") n=1+n print "total of the files copied", n #------------------ execution ------------------- "Eye color" fieldnames=open(file_map).readline().split(';') fieldnames.sort() print "\n\n--------------------------- fieldnames (Phenotypes)\n" for i in fieldnames: print i p=raw_input("\n--------------------------- Phenotype: ") variations_list=[] for i in get_dataset(): if not i[p] in variations_list: variations_list.append(i[p]) print i[p] v=raw_input("\n--------------------------- Variations: ") v=v.split(";") print "\n" os.system("mkdir "+"_".join(p.split())) for i in v: print "Variations: ", i l=get_user( p, i) variation="_".join(i.split()) create_dir(l,variation) os.system("mv "+ variation+" "+"_".join(p.split())) print "\n"
nilq/baby-python
python
""" This is a utility script for updating the spacy meta.json Sample call python --meta meta.json --augment metrics/dane_augmented_best_dacy_small_trf-0.1.0.json -- """ import json def main(meta_json, meta_augment_json, size, decimals=3): with open(meta_json) as f: meta = json.load(f) with open(meta_augment_json) as f: meta_augment = json.load(f) meta["email"] = "Kenneth.enevoldsen@cas.au.dk" meta["author"] = "Centre for Humanities Computing Aarhus" meta["url"] = "https://chcaa.io/#/" meta["license"] = "Apache-2.0 License" mdl_used = { "small": { "name": "Maltehb/-l-ctra-danish-electra-small-cased", "author": "Malte Højmark-Bertelsen", "url": "https://huggingface.co/Maltehb/-l-ctra-danish-electra-small-cased", "license": "CC BY 4.0", }, "medium": { "name": "Maltehb/danish-bert-botxo", "author": "BotXO.ai", "url": "https://huggingface.co/Maltehb/danish-bert-botxo", "license": "CC BY 4.0", }, "large": { "name": "xlm-roberta-large", "author": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer, Veselin Stoyanov", "url": "https://huggingface.co/xlm-roberta-large", "license": "CC BY 4.0", }, } model = mdl_used[size] meta["sources"] = [ { "name": "UD Danish DDT v2.5", "url": "https://github.com/UniversalDependencies/UD_Danish-DDT", "license": "CC BY-SA 4.0", "author": "Johannsen, Anders; Mart\u00ednez Alonso, H\u00e9ctor; Plank, Barbara", }, { "name": "DaNE", "url": "https://github.com/alexandrainst/danlp/blob/master/docs/datasets.md#danish-dependency-treebank-dane", "license": "CC BY-SA 4.0", "author": "Rasmus Hvingelby, Amalie B. Pauli, Maria Barrett, Christina Rosted, Lasse M. Lidegaard, Anders S\u00f8gaard", }, model, ] meta["requirements"] = ["spacy-transformers>=1.0.3,<1.1.0"] meta[ "description" ] = f""" <a href="https://github.com/centre-for-humanities-computing/Dacy"><img src="https://centre-for-humanities-computing.github.io/DaCy/_static/icon.png" width="175" height="175" align="right" /></a> # DaCy {size} transformer DaCy is a Danish language processing framework with state-of-the-art pipelines as well as functionality for analysing Danish pipelines. DaCy's largest pipeline has achieved State-of-the-Art performance on Named entity recognition, part-of-speech tagging and dependency parsing for Danish on the DaNE dataset. Check out the [DaCy repository](https://github.com/centre-for-humanities-computing/DaCy) for material on how to use DaCy and reproduce the results. DaCy also contains guides on usage of the package as well as behavioural test for biases and robustness of Danish NLP pipelines. """ meta[ "notes" ] = """ ## Bias and Robustness Besides the validation done by SpaCy on the DaNE testset, DaCy also provides a series of augmentations to the DaNE test set to see how well the models deal with these types of augmentations. The can be seen as behavioural probes akinn to the NLP checklist. ### Deterministic Augmentations Deterministic augmentations are augmentation which always yield the same result. | Augmentation | Part-of-speech tagging (Accuracy) | Morphological tagging (Accuracy) | Dependency Parsing (UAS) | Dependency Parsing (LAS) | Sentence segmentation (F1) | Lemmatization (Accuracy) | Named entity recognition (F1) | | --- | --- | --- | --- | --- | --- | --- | --- | """ for aug, metrics in meta_augment.items(): if metrics["k"] == 1: pos = f'{round(metrics["mean"]["pos_acc"], decimals)}' morph = f'{round(metrics["mean"]["morph_acc"], decimals)}' dep_uas = f'{round(metrics["mean"]["dep_uas"], decimals)}' dep_las = f'{round(metrics["mean"]["dep_las"], decimals)}' sent_f = f'{round(metrics["mean"]["sents_f"], decimals)}' lemma = f'{round(metrics["mean"]["lemma_acc"], decimals)}' ents_f = f'{round(metrics["mean"]["ents_f"], decimals)}' meta[ "notes" ] += f"| {aug} | {pos} | {morph} | {dep_uas} | {dep_las} | {sent_f} | {lemma} | {ents_f} |\n" meta[ "notes" ] += """ ### Stochastic Augmentations Stochastic augmentations are augmentation which are repeated mulitple times to estimate the effect of the augmentation. | Augmentation | Part-of-speech tagging (Accuracy) | Morphological tagging (Accuracy) | Dependency Parsing (UAS) | Dependency Parsing (LAS) | Sentence segmentation (F1) | Lemmatization (Accuracy) | Named entity recognition (F1) | | --- | --- | --- | --- | --- | --- | --- | --- | """ for aug, metrics in meta_augment.items(): if metrics["k"] > 1: pos = f'{round(metrics["mean"]["pos_acc"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})' morph = f'{round(metrics["mean"]["morph_acc"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})' dep_uas = f'{round(metrics["mean"]["dep_uas"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})' dep_las = f'{round(metrics["mean"]["dep_las"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})' sent_f = f'{round(metrics["mean"]["sents_f"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})' lemma = f'{round(metrics["mean"]["lemma_acc"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})' ents_f = f'{round(metrics["mean"]["ents_f"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})' meta[ "notes" ] += f"| {aug} | {pos} | {morph} | {dep_uas} | {dep_las} | {sent_f} | {lemma} | {ents_f} |\n" meta["notes"] += create_description() meta[ "notes" ] += "\n\n### Hardware\nThis was run an trained on a Quadro RTX 8000 GPU." with open(f"template_meta_{size}.json", "w") as f: json.dump(meta, f) def create_description(): from augment import augmenters describtion = """ <details> <summary> Description of Augmenters </summary> """ describtion for aug, nam, k, desc in augmenters: describtion += f"\n\n**{nam}:**\n{desc}" describtion += "\n </details> \n <br /> \n" return describtion if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument( "--meta", type=str, help="the meta file you wish to update", required=True ) parser.add_argument( "--augment", type=str, help="the json file of the augmented resutls", required=True, ) parser.add_argument("--size", type=str, help="the model size", required=True) args = parser.parse_args() main(args.meta, args.augment, args.size)
nilq/baby-python
python
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import kaiming_init, normal_init from mmdet.ops import ConvModule from ..builder import build_loss from ..registry import HEADS @HEADS.register_module class GridHead(nn.Module): def __init__(self, grid_points=9, num_convs=8, roi_feat_size=14, in_channels=256, conv_kernel_size=3, point_feat_channels=64, deconv_kernel_size=4, class_agnostic=False, loss_grid=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15), conv_cfg=None, norm_cfg=dict(type='GN', num_groups=36)): super(GridHead, self).__init__() self.grid_points = grid_points self.num_convs = num_convs self.roi_feat_size = roi_feat_size self.in_channels = in_channels self.conv_kernel_size = conv_kernel_size self.point_feat_channels = point_feat_channels self.conv_out_channels = self.point_feat_channels * self.grid_points self.class_agnostic = class_agnostic self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN': assert self.conv_out_channels % norm_cfg['num_groups'] == 0 assert self.grid_points >= 4 self.grid_size = int(np.sqrt(self.grid_points)) if self.grid_size * self.grid_size != self.grid_points: raise ValueError('grid_points must be a square number') # the predicted heatmap is half of whole_map_size if not isinstance(self.roi_feat_size, int): raise ValueError('Only square RoIs are supporeted in Grid R-CNN') self.whole_map_size = self.roi_feat_size * 4 # compute point-wise sub-regions self.sub_regions = self.calc_sub_regions() self.convs = [] for i in range(self.num_convs): in_channels = ( self.in_channels if i == 0 else self.conv_out_channels) stride = 2 if i == 0 else 1 padding = (self.conv_kernel_size - 1) // 2 self.convs.append( ConvModule( in_channels, self.conv_out_channels, self.conv_kernel_size, stride=stride, padding=padding, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=True)) self.convs = nn.Sequential(*self.convs) self.deconv1 = nn.ConvTranspose2d( self.conv_out_channels, self.conv_out_channels, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points) self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels) self.deconv2 = nn.ConvTranspose2d( self.conv_out_channels, grid_points, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points) # find the 4-neighbor of each grid point self.neighbor_points = [] grid_size = self.grid_size for i in range(grid_size): # i-th column for j in range(grid_size): # j-th row neighbors = [] if i > 0: # left: (i - 1, j) neighbors.append((i - 1) * grid_size + j) if j > 0: # up: (i, j - 1) neighbors.append(i * grid_size + j - 1) if j < grid_size - 1: # down: (i, j + 1) neighbors.append(i * grid_size + j + 1) if i < grid_size - 1: # right: (i + 1, j) neighbors.append((i + 1) * grid_size + j) self.neighbor_points.append(tuple(neighbors)) # total edges in the grid self.num_edges = sum([len(p) for p in self.neighbor_points]) self.forder_trans = nn.ModuleList() # first-order feature transition self.sorder_trans = nn.ModuleList() # second-order feature transition for neighbors in self.neighbor_points: fo_trans = nn.ModuleList() so_trans = nn.ModuleList() for _ in range(len(neighbors)): # each transition module consists of a 5x5 depth-wise conv and # 1x1 conv. fo_trans.append( nn.Sequential( nn.Conv2d( self.point_feat_channels, self.point_feat_channels, 5, stride=1, padding=2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1))) so_trans.append( nn.Sequential( nn.Conv2d( self.point_feat_channels, self.point_feat_channels, 5, 1, 2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1))) self.forder_trans.append(fo_trans) self.sorder_trans.append(so_trans) self.loss_grid = build_loss(loss_grid) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): # TODO: compare mode = "fan_in" or "fan_out" kaiming_init(m) for m in self.modules(): if isinstance(m, nn.ConvTranspose2d): normal_init(m, std=0.001) nn.init.constant_(self.deconv2.bias, -np.log(0.99 / 0.01)) def forward(self, x): assert x.shape[-1] == x.shape[-2] == self.roi_feat_size # RoI feature transformation, downsample 2x x = self.convs(x) c = self.point_feat_channels # first-order fusion x_fo = [None for _ in range(self.grid_points)] for i, points in enumerate(self.neighbor_points): x_fo[i] = x[:, i * c:(i + 1) * c] for j, point_idx in enumerate(points): x_fo[i] = x_fo[i] + self.forder_trans[i][j]( x[:, point_idx * c:(point_idx + 1) * c]) # second-order fusion x_so = [None for _ in range(self.grid_points)] for i, points in enumerate(self.neighbor_points): x_so[i] = x[:, i * c:(i + 1) * c] for j, point_idx in enumerate(points): x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx]) # predicted heatmap with fused features x2 = torch.cat(x_so, dim=1) x2 = self.deconv1(x2) x2 = F.relu(self.norm1(x2), inplace=True) heatmap = self.deconv2(x2) # predicted heatmap with original features (applicable during training) if self.training: x1 = x x1 = self.deconv1(x1) x1 = F.relu(self.norm1(x1), inplace=True) heatmap_unfused = self.deconv2(x1) else: heatmap_unfused = heatmap return dict(fused=heatmap, unfused=heatmap_unfused) def calc_sub_regions(self): """Compute point specific representation regions. See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details. """ # to make it consistent with the original implementation, half_size # is computed as 2 * quarter_size, which is smaller half_size = self.whole_map_size // 4 * 2 sub_regions = [] for i in range(self.grid_points): x_idx = i // self.grid_size y_idx = i % self.grid_size if x_idx == 0: sub_x1 = 0 elif x_idx == self.grid_size - 1: sub_x1 = half_size else: ratio = x_idx / (self.grid_size - 1) - 0.25 sub_x1 = max(int(ratio * self.whole_map_size), 0) if y_idx == 0: sub_y1 = 0 elif y_idx == self.grid_size - 1: sub_y1 = half_size else: ratio = y_idx / (self.grid_size - 1) - 0.25 sub_y1 = max(int(ratio * self.whole_map_size), 0) sub_regions.append( (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size)) return sub_regions def get_target(self, sampling_results, rcnn_train_cfg): # mix all samples (across images) together. pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results], dim=0).cpu() pos_gt_bboxes = torch.cat( [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu() assert pos_bboxes.shape == pos_gt_bboxes.shape # expand pos_bboxes to 2x of original size x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1) pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1) num_rois = pos_bboxes.shape[0] map_size = self.whole_map_size # this is not the final target shape targets = torch.zeros((num_rois, self.grid_points, map_size, map_size), dtype=torch.float) # pre-compute interpolation factors for all grid points. # the first item is the factor of x-dim, and the second is y-dim. # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1) factors = [] for j in range(self.grid_points): x_idx = j // self.grid_size y_idx = j % self.grid_size factors.append((1 - x_idx / (self.grid_size - 1), 1 - y_idx / (self.grid_size - 1))) radius = rcnn_train_cfg.pos_radius radius2 = radius**2 for i in range(num_rois): # ignore small bboxes if (pos_bbox_ws[i] <= self.grid_size or pos_bbox_hs[i] <= self.grid_size): continue # for each grid point, mark a small circle as positive for j in range(self.grid_points): factor_x, factor_y = factors[j] gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + ( 1 - factor_x) * pos_gt_bboxes[i, 2] gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + ( 1 - factor_y) * pos_gt_bboxes[i, 3] cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] * map_size) cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] * map_size) for x in range(cx - radius, cx + radius + 1): for y in range(cy - radius, cy + radius + 1): if x >= 0 and x < map_size and y >= 0 and y < map_size: if (x - cx)**2 + (y - cy)**2 <= radius2: targets[i, j, y, x] = 1 # reduce the target heatmap size by a half # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688). sub_targets = [] for i in range(self.grid_points): sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i] sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2]) sub_targets = torch.cat(sub_targets, dim=1) sub_targets = sub_targets.cuda() return sub_targets def loss(self, grid_pred, grid_targets): loss_fused = self.loss_grid(grid_pred['fused'], grid_targets) loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets) loss_grid = loss_fused + loss_unfused return dict(loss_grid=loss_grid) def get_bboxes(self, det_bboxes, grid_pred, img_meta): # TODO: refactoring assert det_bboxes.shape[0] == grid_pred.shape[0] det_bboxes = det_bboxes.cpu() cls_scores = det_bboxes[:, [4]] det_bboxes = det_bboxes[:, :4] grid_pred = grid_pred.sigmoid().cpu() R, c, h, w = grid_pred.shape half_size = self.whole_map_size // 4 * 2 assert h == w == half_size assert c == self.grid_points # find the point with max scores in the half-sized heatmap grid_pred = grid_pred.view(R * c, h * w) pred_scores, pred_position = grid_pred.max(dim=1) xs = pred_position % w ys = pred_position // w # get the position in the whole heatmap instead of half-sized heatmap for i in range(self.grid_points): xs[i::self.grid_points] += self.sub_regions[i][0] ys[i::self.grid_points] += self.sub_regions[i][1] # reshape to (num_rois, grid_points) pred_scores, xs, ys = tuple( map(lambda x: x.view(R, c), [pred_scores, xs, ys])) # get expanded pos_bboxes widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1) heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1) x1 = (det_bboxes[:, 0, None] - widths / 2) y1 = (det_bboxes[:, 1, None] - heights / 2) # map the grid point to the absolute coordinates abs_xs = (xs.float() + 0.5) / w * widths + x1 abs_ys = (ys.float() + 0.5) / h * heights + y1 # get the grid points indices that fall on the bbox boundaries x1_inds = [i for i in range(self.grid_size)] y1_inds = [i * self.grid_size for i in range(self.grid_size)] x2_inds = [ self.grid_points - self.grid_size + i for i in range(self.grid_size) ] y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)] # voting of all grid points on some boundary bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, x1_inds].sum(dim=1, keepdim=True)) bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, y1_inds].sum(dim=1, keepdim=True)) bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, x2_inds].sum(dim=1, keepdim=True)) bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, y2_inds].sum(dim=1, keepdim=True)) bbox_res = torch.cat( [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1) bbox_res[:, [0, 2]].clamp_(min=0, max=img_meta[0]['img_shape'][1] - 1) bbox_res[:, [1, 3]].clamp_(min=0, max=img_meta[0]['img_shape'][0] - 1) return bbox_res
nilq/baby-python
python
#!/usr/bin/env python import os import shutil import subprocess import difflib import filecmp import sys rootdir = "." for subdir, dirs, files in os.walk(rootdir): for file in files: if "RLBin" in (os.path.join(subdir, file)): os.remove(os.path.join(subdir, file)) print(os.path.join(subdir, file) + " removed" )
nilq/baby-python
python
#!/usr/bin/env python2 # Copyright (c) 2019 Erik Schilling # ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from emuvim.api.osm.pre_configured_osm import PreConfiguredOSM from mininet.log import setLogLevel setLogLevel('debug') with PreConfiguredOSM() as osm: osm.onboard_vnfd('vnfs/ping_vnf') osm.onboard_vnfd('vnfs/pong_vnf') nsd_id = osm.onboard_nsd('services/pingpong_ns') ns_id = osm.ns_create('pingpong-test', nsd_id) osm.ns_wait_until_all_in_status('running') osm.ns_delete(ns_id) osm.ns_wait_until_all_in_status('terminated')
nilq/baby-python
python
from .db_api import DbApi from .meta import Db from .schema import * class Impl(DbApi): def __init__(self, db): assert isinstance(db, Db) DbApi.__init__(self) self.__db = db def __del__(self): self.close() def close(self): if self.__db is not None: self.__db.close() self.__db = None def add_source_file(self, filename): """ Returns the ID for the source file. Raises exception if it already exists. """ return self.__db.table('SOURCE_FILE').insert(filename) def get_source_file_id(self, filename): """ Returns the ID for the source file, or None if not present. """ c = self.__db.query( "SELECT source_file_id FROM SOURCE_FILE WHERE source_location = ?", filename ) ret = None for r in c: ret = r[0] c.close() break return ret def get_source_file_for_id(self, source_file_id): c = self.__db.query( "SELECT source_location FROM SOURCE_FILE WHERE source_file_id = ?", source_file_id ) ret = None for r in c: ret = r[0] c.close() break return ret def add_tag(self, file_id, tag_name, tag_value): """ Returns the ID of the tag. """ return self.__db.table('TAG').insert( file_id, tag_name, tag_value ) def add_keyword(self, file_id, keyword): """ Returns the ID of the keyword. """ return self.__db.table('FILE_KEYWORD').insert( file_id, keyword ) def delete_keywords_for_source_id(self, file_id): return self.__db.table('FILE_KEYWORD').delete_where( 'source_file_id = ?', file_id ) def get_keywords_for_id(self, file_id): ret = set() c = self.__db.query( 'SELECT keyword FROM FILE_KEYWORD WHERE source_file_id = ?', file_id ) for r in c: ret.add(r[0]) return ret def get_tags_for_id(self, file_id): ret = {} c = self.__db.query( 'SELECT tag_name, tag_value FROM TAG WHERE source_file_id = ?', file_id ) for r in c: ret[r[0]] = r[1] return ret def add_target_file(self, source_file_id, target_filename): """ Returns the ID of the target file. """ return self.__db.table('TARGET_FILE').insert( source_file_id, target_filename ) def get_target_file(self, source_file_id): ret = None c = self.__db.query( 'SELECT target_location FROM TARGET_FILE WHERE source_file_id = ?', source_file_id ) for r in c: ret = r[0] c.close() break return ret def get_source_id_for_target_file(self, target_filename): ret = None c = self.__db.query( 'SELECT source_file_id FROM TARGET_FILE WHERE target_location = ?', target_filename ) for r in c: ret = r[0] c.close() break return ret def get_source_file_for_target_file(self, target_filename): ret = None c = self.__db.query( """ SELECT source_location FROM SOURCE_FILE sf INNER JOIN TARGET_FILE tf ON sf.source_file_id = tf.source_file_id WHERE target_location = ? """, target_filename ) for r in c: ret = r[0] c.close() break return ret def find_target_files(self, target_match=None): ret = set() if target_match is None: c = self.__db.query('SELECT target_location FROM TARGET_FILE') else: c = self.__db.query( 'SELECT target_location FROM TARGET_FILE WHERE target_location LIKE ?', target_match ) for r in c: ret.add(r[0]) return ret def get_source_files_with_tags(self, tags, exact=True): """ Returns the source file names that has the matching tag keys to tag values. If none are found, then an empty list is returned. """ # This is a messy query that really doesn't work with sqlite. # So instead we'll do multiple queries and shrink the result # down in code. tag_keys = [] tag_values = [] for k, v in tags.items(): tag_keys.append(k) tag_values.append(v) if len(tag_keys) <= 0: return [] matching_file_ids = set() if exact: value_match_sql = "tag_value = ?" else: value_match_sql = "tag_value LIKE ?" c = self.__db.query( 'SELECT source_file_id FROM TAG WHERE tag_name = ? and {0}'.format( value_match_sql), tag_keys[0], tag_values[0] ) for r in c: matching_file_ids.add(str(r[0])) if len(matching_file_ids) <= 0: return [] for i in range(1, len(tag_keys)): c = self.__db.query( 'SELECT source_file_id FROM TAG WHERE tag_name = ? AND {0} AND source_file_id in ({1})'.format( value_match_sql, ','.join('?' * len(matching_file_ids))), tag_keys[i], tag_values[i], *matching_file_ids ) matching_file_ids = set() for r in c: matching_file_ids.add(str(r[0])) c.close() if len(matching_file_ids) <= 0: return [] c = self.__db.query( 'SELECT source_location FROM SOURCE_FILE WHERE source_file_id in ({0})'.format( ','.join('?' * len(matching_file_ids))), *matching_file_ids ) ret = [] for r in c: ret.append(r[0]) return ret def get_source_files_with_matching_keywords(self, keywords): """ Returns a list of [source file name, keyword], possibily with duplicate source files, for any keyword. """ ksql = [] for k in keywords: ksql.append('?') c = self.__db.query( '''SELECT source_location, keyword FROM FILE_KEYWORD fk INNER JOIN SOURCE_FILE sf ON fk.source_file_id = sf.source_file_id WHERE keyword IN ({0})'''.format(','.join(ksql)), *keywords ) ret = [] for r in c: ret.append((r[0], r[1])) return ret def add_duplicate(self, source_id, duplicate_of_id): return self.__db.table('DUPLICATE_FILE').insert( source_id, duplicate_of_id ) def get_duplicate_of_id(self, source_id): """ Returns the source file ID of the file marked as a duplicate of the source file. """ c = self.__db.query( 'SELECT duplicate_of_source_file_id FROM DUPLICATE_FILE WHERE source_file_id = ?', source_id ) ret = None for r in c: ret = r[0] c.close() break return ret def get_duplicate_ids_for_id(self, duplicate_of_id): """ Get the source id for the duplicate_of_id. """ ret = set() c = self.__db.query( 'SELECT source_file_id FROM DUPLICATE_FILE WHERE duplicate_of_source_file_id = ?', duplicate_of_id ) for r in c: ret.add(r[0]) return ret def get_duplicate_filenames_for_id(self, source_id): """ Get the filenames for any duplicate of the source id. Does not look for duplicates of duplicates. """ ret = [] for d in self.get_duplicate_data_for_id(source_id): ret.append(d['location']) return ret def get_duplicate_data_for_id(self, source_id): """ Returns any duplicate of the source id as get_duplicate_filenames_for_id. Each value in the returned collection is a dictionary. Does not look for duplicates of duplicates. """ dup_ids = set() ret = [] c = self.__db.query( """SELECT sf.source_file_id, sf.source_location, d.duplicate_id, d.duplicate_of_source_file_id FROM SOURCE_FILE sf INNER JOIN DUPLICATE_FILE d ON sf.source_file_id = d.source_file_id WHERE d.duplicate_of_source_file_id = ? """, source_id ) for r in c: if r[0] not in dup_ids and r[0] != source_id: dup_ids.add(r[0]) ret.append({ 'source_file_id': r[0], 'source_location': r[1], 'duplicate_id': r[2], 'duplicate_of_source_file_id': r[3], # User meaningful data 'filename': r[1] }) c = self.__db.query( """SELECT sf.source_file_id, sf.source_location, d.duplicate_id, d.duplicate_of_source_file_id FROM SOURCE_FILE sf INNER JOIN DUPLICATE_FILE d ON sf.source_file_id = d.duplicate_of_source_file_id WHERE d.source_file_id = ? """, source_id ) for r in c: if r[2] not in dup_ids and r[2] != source_id: dup_ids.add(r[0]) ret.append({ 'source_file_id': r[0], 'source_location': r[1], 'duplicate_id': r[2], 'duplicate_of_source_file_id': r[3], # User meaningful data 'filename': r[1] }) return ret def delete_duplicate_id(self, duplicate_id): return self.__db.table('DUPLICATE_FILE').delete_by_id(duplicate_id) def get_source_files_like(self, name_like=None): ret = set() if name_like is None: c = self.__db.query('SELECT source_location FROM SOURCE_FILE') else: c = self.__db.query( 'SELECT source_location FROM SOURCE_FILE WHERE source_location LIKE ?', name_like ) for r in c: ret.add(r[0]) return ret def remove_tags_for_source_id(self, source_id): return self.__db.table('TAG').delete_where( "source_file_id = ?", source_id ) def delete_source_graph(self, source_id): self.__db.table('DUPLICATE_FILE').delete_where( "duplicate_of_source_file_id = ? OR source_file_id = ?", source_id, source_id ) self.__db.table('FILE_KEYWORD').delete_where( "source_file_id = ?", source_id ) self.__db.table('TAG').delete_where( "source_file_id = ?", source_id ) self.__db.table('TARGET_FILE').delete_where( "source_file_id = ?", source_id ) return self.__db.table('SOURCE_FILE').delete_by_id(source_id) def delete_transcoded_file_for_source_id(self, source_id): return self.__db.table('TARGET_FILE').delete_where( "source_file_id = ?", source_id ) def get_source_files_without_tag_names(self, tag_names): ret = set() # Need to perform the query for every tag name, individually. for tag_name in tag_names: c = self.__db.query(""" SELECT source_location FROM SOURCE_FILE WHERE source_file_id NOT IN ( SELECT source_file_id FROM TAG WHERE tag_name = ? ) """, tag_name) for r in c: ret.add(r[0]) return ret # TODO temporary to get past a weird encoding. def get_source_file_ids_like(self, like): c = self.__db.query(""" SELECT source_file_id FROM SOURCE_FILE WHERE source_location LIKE ? """, like) for r in c: yield r[0]
nilq/baby-python
python
# vpe6080 Analog Input Thermistor Module 8 Channel # Demo Program reads 8 channels # Thermistor 10K Ohm 3380 Beta installed in Channel 1 to read room temperature import asyncio from pywlmio import * NodeID = 7 #NodeID location is the Bacplane ID (Jumpers) and Power Supply Slot location async def main(): init() th = VPE6080(NodeID) try: await asyncio.gather( th.ch1.configure(1), # Channel Enabled, default 3380 Beta, 25°C Room Value th.ch2.configure(0), # Channel Disabled th.ch3.configure(0), # Channel Disabled th.ch4.configure(0), # Channel Disabled th.ch5.configure(0), # Channel Disabled th.ch6.configure(0), # Channel Disabled th.ch7.configure(0), # Channel Disabled th.ch8.configure(0) # Channel Disabled ) except WlmioWrongNodeError: print("Error NodeID = %d Wrong module installed" % NodeID) # Error Check if wrong type of module installed except WlmioInternalError: print("Error NodeID = %d Timed out" % NodeID) # Error Check - Typically module not installed while True: try: a = await asyncio.gather( th.ch1.read(), # Read Channel 1 th.ch2.read(), # Read Channel 2 th.ch3.read(), # Read Channel 3 th.ch4.read(), # Read Channel 4 th.ch5.read(), # Read Channel 5 th.ch6.read(), # Read Channel 6 th.ch7.read(), # Read Channel 7 th.ch8.read() # Read Channel 8 ) print("Module VPE6080 NodeID = %d" % NodeID) print("Reading Array = ", a) # Array holds all input channel readings # Readings scaled x10 and are in °Kelvin, add 273.15 to convert to °C print("Channel 1 Thermistor = %0.1f Deg C" % (a[0] / 10 - 273.15)) # Print channel 1 print("") except WlmioWrongNodeError: print("Error NodeID = %d Wrong module installed" % NodeID) # Error Check if wrong type of module installed except WlmioInternalError: print("Error NodeID = %d Timed out" % NodeID) # Error Check - Typically module not installed await asyncio.sleep(1) asyncio.run(main(), debug=True)
nilq/baby-python
python
#!/usr/bin/python3 -OO # Copyright 2007-2021 The SABnzbd-Team <team@sabnzbd.org> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ tests.sabnews - Fake newsserver to use in end-to-end testing Run sabnews.py -h for parameters! """ import argparse import asyncio import logging import os import re import time from random import randint import sabyenc3 logging.getLogger().setLevel(logging.INFO) # Expecting the following message-id: # ARTICLE <file=folder/filename.mkv|part=4|start=5000|size=5000>\r\n ARTICLE_INFO = re.compile( b"^(ARTICLE|BODY) (?P<message_id><file=(?P<file>.*)\\|part=(?P<part>\\d+)\\|start=(?P<start>\\d+)\\|size=(?P<size>\\d+)>)\\r\\n$", re.MULTILINE, ) YENC_ESCAPE = [0x00, 0x0A, 0x0D, ord("="), ord(".")] class NewsServerProtocol(asyncio.Protocol): def __init__(self): self.transport = None self.connected = False self.in_article = False super().__init__() def connection_made(self, transport): logging.info("Connection from %s", transport.get_extra_info("peername")) self.transport = transport self.connected = True self.transport.write(b"200 Welcome (SABNews)\r\n") def data_received(self, message): logging.debug("Data received: %s", message.strip()) # Handle basic commands if message.startswith(b"QUIT"): self.close_connection() elif message.startswith((b"ARTICLE", b"BODY")): parsed_message = ARTICLE_INFO.search(message) self.serve_article(parsed_message) # self.transport.write(data) def serve_article(self, parsed_message): # Check if we parsed everything try: message_id = parsed_message.group("message_id") file = parsed_message.group("file").decode("utf-8") file_base = os.path.basename(file) part = int(parsed_message.group("part")) start = int(parsed_message.group("start")) size = int(parsed_message.group("size")) except (AttributeError, ValueError): logging.warning("Can't parse article information") self.transport.write(b"430 No Such Article Found (bad message-id)\r\n") return # Check if file exists if not os.path.exists(file): logging.warning("File not found: %s", file) self.transport.write(b"430 No Such Article Found (no file on disk)\r\n") return # Check if sizes are valid file_size = os.path.getsize(file) if start + size > file_size: logging.warning("Invalid start/size attributes") self.transport.write(b"430 No Such Article Found (invalid start/size attributes)\r\n") return logging.debug("Serving %s" % message_id) # File is found, send headers self.transport.write(b"222 0 %s\r\n" % message_id) self.transport.write(b"Message-ID: %s\r\n" % message_id) self.transport.write(b'Subject: "%s"\r\n\r\n' % file_base.encode("utf-8")) # Write yEnc headers self.transport.write( b"=ybegin part=%d line=128 size=%d name=%s\r\n" % (part, file_size, file_base.encode("utf-8")) ) self.transport.write(b"=ypart begin=%d end=%d\r\n" % (start + 1, start + size)) with open(file, "rb") as inp_file: inp_file.seek(start) inp_buffer = inp_file.read(size) # Encode data output_string, crc = sabyenc3.encode(inp_buffer) self.transport.write(output_string) # Write footer self.transport.write(b"\r\n=yend size=%d part=%d pcrc32=%08x\r\n" % (size, part, crc)) self.transport.write(b".\r\n") def close_connection(self): logging.debug("Closing connection") self.transport.write(b"205 Connection closing\r\n") self.transport.close() async def serve_sabnews(hostname, port): # Start server logging.info("Starting SABNews on %s:%d", hostname, port) # Needed for Python 3.5 support! loop = asyncio.get_event_loop() server = await loop.create_server(lambda: NewsServerProtocol(), hostname, port) return server def create_nzb(nzb_file=None, nzb_dir=None, metadata=None): article_size = 500000 files_for_nzb = [] output_file = "" # Either use directory or single file if nzb_dir: if not os.path.exists(nzb_dir) or not os.path.isdir(nzb_dir): raise NotADirectoryError("%s is not a valid directory" % nzb_dir) # List all files files_for_nzb = [os.path.join(nzb_dir, fl) for fl in os.listdir(nzb_dir)] files_for_nzb = [fl for fl in files_for_nzb if os.path.isfile(fl)] output_file = os.path.join(nzb_dir, os.path.basename(os.path.normpath(nzb_dir)) + ".nzb") if nzb_file: if not os.path.exists(nzb_file) or not os.path.isfile(nzb_file): raise FileNotFoundError("Cannot find %s or it is not a file" % nzb_file) files_for_nzb = [nzb_file] output_file = os.path.splitext(nzb_file)[0] + ".nzb" if not files_for_nzb: raise RuntimeError("No files found to include in NZB") # Let's write a file! with open(output_file, "w", encoding="utf-8") as nzb: nzb.write('<?xml version="1.0" encoding="UTF-8"?>\n') nzb.write('<!DOCTYPE nzb PUBLIC "-//newzBin//DTD NZB 1.0//EN" "http://www.newzbin.com/DTD/nzb/nzb-1.0.dtd">\n') nzb.write('<nzb xmlns="http://www.newzbin.com/DTD/2003/nzb">\n') if metadata: nzb.write("<head>\n") for meta_name, meta_value in metadata.items(): nzb.write('<meta type="%s">%s</meta>\n' % (meta_name, meta_value)) nzb.write("</head>\n") nzb_time = time.time() - randint(0, int(time.time() - 746863566)) for fl in files_for_nzb: nzb.write('<file poster="SABNews" date="%d" subject="&quot;%s&quot;">\n' % (nzb_time, os.path.basename(fl))) nzb.write("<groups><group>alt.binaries.test</group></groups>\n") nzb.write("<segments>\n") # Create segments file_size = os.path.getsize(fl) for seg_nr, seg_start in enumerate(range(0, file_size, article_size), 1): segement_size = min(article_size, file_size - seg_start) nzb.write( '<segment number="%d" bytes="%d">file=%s|part=%s|start=%d|size=%d</segment>\n' % (seg_nr, segement_size, fl, seg_nr, seg_start, segement_size) ) nzb.write("</segments>\n") nzb.write("</file>\n") nzb.write("</nzb>\n") logging.info("NZB saved to %s" % output_file) return output_file def main(): parser = argparse.ArgumentParser() parser.add_argument("-s", help="Hostname", dest="hostname", default="127.0.0.1") parser.add_argument("-p", help="Port", dest="port", type=int, default=8888) parser.add_argument("--nzbfile", help="Create NZB of specified file", dest="nzb_file", metavar="FILE") parser.add_argument("--nzbdir", help="Create NZB for files in specified directory", dest="nzb_dir", metavar="DIR") args = parser.parse_args() # Serve if we are not creating NZB's if not args.nzb_file and not args.nzb_dir: loop = asyncio.get_event_loop() loop.run_until_complete(serve_sabnews(args.hostname, args.port)) loop.run_forever() else: create_nzb(args.nzb_file, args.nzb_dir) if __name__ == "__main__": main()
nilq/baby-python
python
from setuptools import find_packages, setup setup( name = 'upbit_wrapper', version = '0.0.9', description = 'Python wrapper for upbit', long_description = open('README.md','rt').read(), long_description_content_type='text/markdown', author = 'BS LEE', author_email = 'beomsu317@gmail.com', url = 'https://github.com/beomsu317/upbit_wrapper', install_requires = ['websocket','websocket-client','requests'], keyword = ['upbit'], python_requires = '>=3', license = 'MIT', packages = find_packages(), classifiers = [ 'Programming Language :: Python :: 3.8' ], zip_safe = False )
nilq/baby-python
python
from fqf_iqn_qrdqn.agent.base_agent import BaseAgent from DMoGDiscrete.DMoGQ import DMoGQ from fqf_iqn_qrdqn.utils import disable_gradients, update_params from torch.optim import Adam import torch from DMoGDiscrete.utils import calculate_dmog_loss, evaluate_mog_at_action class DMoGQAgent(BaseAgent): def __init__(self, env, test_env, log_dir, num_steps=5 * (10 ** 7), batch_size=32, num_gaussians=5, eta=0.5, beta=3, delta=10, lr=5e-5, memory_size=10 ** 6, gamma=0.99, multi_step=1, update_interval=4, target_update_interval=10000, start_steps=50000, epsilon_train=0.01, epsilon_eval=0.001, epsilon_decay_steps=250000, double_q_learning=False, dueling_net=False, noisy_net=False, use_per=False, log_interval=100, eval_interval=250000, num_eval_steps=125000, max_episode_steps=27000, grad_cliping=None, cuda=True, seed=0): super(DMoGQAgent, self).__init__(env, test_env, log_dir, num_steps, batch_size, memory_size, gamma, multi_step, update_interval, target_update_interval, start_steps, epsilon_train, epsilon_eval, epsilon_decay_steps, double_q_learning, dueling_net, noisy_net, use_per, log_interval, eval_interval, num_eval_steps, max_episode_steps, grad_cliping, cuda, seed) self.num_gaussians = num_gaussians self.eta = eta self.beta = beta self.delta = delta # Online network. self.online_net = DMoGQ( num_channels=env.observation_space.shape[0], num_actions=self.num_actions, num_gaussians=num_gaussians, dueling_net=dueling_net, noisy_net=noisy_net).to(self.device) # Target network. self.target_net = DMoGQ( num_channels=env.observation_space.shape[0], num_actions=self.num_actions, num_gaussians=num_gaussians, dueling_net=dueling_net, noisy_net=noisy_net).to(self.device).to(self.device) # Copy parameters of the learning network to the target network. self.update_target() # Disable calculations of gradients of the target network. disable_gradients(self.target_net) self.optim = Adam( self.online_net.parameters(), lr=lr, eps=1e-2 / batch_size) def learn(self): self.learning_steps += 1 self.online_net.sample_noise() self.target_net.sample_noise() if self.use_per: (states, actions, rewards, next_states, dones), weights = \ self.memory.sample(self.batch_size) else: states, actions, rewards, next_states, dones = \ self.memory.sample(self.batch_size) weights = None dmog_loss = self.calculate_loss( states, actions, rewards, next_states, dones, weights) update_params( self.optim, dmog_loss, networks=[self.online_net], retain_graph=False, grad_cliping=self.grad_cliping) def calculate_loss(self, states, actions, rewards, next_states, dones, weights): mog_pi, mog_mu, mog_sigma = self.online_net(states=states) mog_pi_sa, mog_mu_sa, mog_sigma_sa = evaluate_mog_at_action(mog_pi=mog_pi, mog_mu=mog_mu, mog_sigma=mog_sigma, actions=actions) assert mog_pi_sa.shape == (self.batch_size, self.num_gaussians, 1) with torch.no_grad(): next_mog_pi, next_mog_mu, next_mog_sigma = self.target_net(states=next_states) mog_q_value = torch.sum(next_mog_pi * next_mog_mu, dim=1) next_actions = torch.argmax(mog_q_value, dim=1, keepdim=True) assert next_actions.shape == (self.batch_size, 1) next_mog_pi_sa, next_mog_mu_sa, next_mog_sigma_sa = \ evaluate_mog_at_action(mog_pi=next_mog_pi, mog_mu=next_mog_mu, mog_sigma=next_mog_sigma, actions=next_actions) assert next_mog_pi_sa.shape == (self.batch_size, 1, self.num_gaussians) # Calculate target mog values. target_mog_mu_sa = rewards[..., None] + (1.0 - dones[..., None]) * self.gamma_n * next_mog_mu_sa target_mog_pi_sa = torch.tensor(1.0 / self.num_gaussians) * dones[..., None] + ( 1.0 - dones[..., None]) * next_mog_pi_sa target_mog_sigma_sa = torch.tensor(1.0) * dones[..., None] + ( 1.0 - dones[..., None]) * self.gamma_n * next_mog_sigma_sa assert target_mog_mu_sa.shape == (self.batch_size, self.num_gaussians, 1) dmog_loss = calculate_dmog_loss(mog_pi_sa, mog_mu_sa, mog_sigma_sa, target_mog_mu_sa, target_mog_pi_sa, target_mog_sigma_sa, eta=self.eta, beta=self.beta, delta=self.delta, weight=weights) return dmog_loss
nilq/baby-python
python
# 用random.randint(1,10),随机生成一个有100个元素的列表,然后按照元素出现次数的高低,从高到底排序并输出 import random numbers = [random.randint(1, 10) for i in range(100)] numbers_info = {} def sorted_by_freq(numbers): for number in numbers: # 遍历随机数列表 if number not in numbers_info: # 若该元素没有统计过 numbers_info[number] = numbers.count(number) # 以该元素为key,出现次数为value,加入字典 return sorted(numbers_info.items(), key=lambda item: item[1], reverse=True) # 降序排序后返回 ans = sorted_by_freq(numbers) print(ans)
nilq/baby-python
python
# Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Reformulation of function statements. Consult the developer manual for information. TODO: Add ability to sync source code comments with developer manual sections. """ from nuitka.nodes.AssignNodes import ( StatementAssignmentVariable, StatementAssignmentVariableName, StatementReleaseVariable ) from nuitka.nodes.AsyncgenNodes import ( ExpressionAsyncgenObjectBody, ExpressionMakeAsyncgenObject ) from nuitka.nodes.BuiltinIteratorNodes import ( ExpressionBuiltinIter1, StatementSpecialUnpackCheck ) from nuitka.nodes.BuiltinNextNodes import ExpressionSpecialUnpack from nuitka.nodes.BuiltinRefNodes import makeExpressionBuiltinRef from nuitka.nodes.CodeObjectSpecs import CodeObjectSpec from nuitka.nodes.CoroutineNodes import ( ExpressionCoroutineObjectBody, ExpressionMakeCoroutineObject ) from nuitka.nodes.FunctionNodes import ( ExpressionFunctionBody, ExpressionFunctionCreation, ExpressionFunctionRef ) from nuitka.nodes.GeneratorNodes import ( ExpressionGeneratorObjectBody, ExpressionMakeGeneratorObject, StatementGeneratorReturnNone ) from nuitka.nodes.LocalsDictNodes import StatementSetLocalsDictionary from nuitka.nodes.OutlineNodes import ExpressionOutlineFunction from nuitka.nodes.ReturnNodes import StatementReturn, StatementReturnNone from nuitka.nodes.VariableRefNodes import ( ExpressionTempVariableRef, ExpressionVariableNameRef, ExpressionVariableRef ) from nuitka.PythonVersions import python_version from nuitka.specs.ParameterSpecs import ParameterSpec from .ReformulationTryFinallyStatements import makeTryFinallyStatement from .SyntaxErrors import raiseSyntaxError from .TreeHelpers import ( buildAnnotationNode, buildFrameNode, buildNode, buildNodeList, detectFunctionBodyKind, extractDocFromBody, getKind, makeCallNode, makeDictCreationOrConstant2, makeStatementsSequenceFromStatement, mangleName ) def _insertFinalReturnStatement(function_statements_body, return_statement): if function_statements_body is None: function_statements_body = makeStatementsSequenceFromStatement( statement = return_statement ) elif not function_statements_body.isStatementAborting(): function_statements_body.setStatements( function_statements_body.getStatements() + ( return_statement, ) ) return function_statements_body def _insertInitialSetLocalsDictStatement(function_body, function_statements_body): locals_statement = StatementSetLocalsDictionary( locals_scope = function_body.getFunctionLocalsScope(), source_ref = function_body.source_ref ) if function_statements_body is None: function_statements_body = makeStatementsSequenceFromStatement( statement = locals_statement ) else: function_statements_body.setStatements( ( locals_statement, ) + function_statements_body.getStatements() ) return function_statements_body def _injectDecorator(decorators, inject, acceptable, source_ref): assert type(inject) is str assert type(acceptable) is tuple for decorator in decorators: if decorator.isExpressionVariableNameRef() and \ decorator.getVariableName() in acceptable: break else: decorators.append( makeExpressionBuiltinRef( builtin_name = inject, source_ref = source_ref ) ) def buildFunctionNode(provider, node, source_ref): # Functions have way too many details, pylint: disable=too-many-locals assert getKind(node) == "FunctionDef" function_statement_nodes, function_doc = extractDocFromBody(node) function_kind, flags = detectFunctionBodyKind( nodes = function_statement_nodes ) function_body, code_body, code_object = buildFunctionWithParsing( provider = provider, function_kind = function_kind, name = node.name, function_doc = function_doc, flags = flags, node = node, source_ref = source_ref ) if function_kind in ("Generator", "Coroutine"): if function_kind == "Coroutine": code_body = ExpressionCoroutineObjectBody( provider = function_body, name = node.name, code_object = code_object, flags = flags, source_ref = source_ref ) maker_class = ExpressionMakeCoroutineObject else: code_body = ExpressionGeneratorObjectBody( provider = function_body, name = node.name, code_object = code_object, flags = flags, source_ref = source_ref ) maker_class = ExpressionMakeGeneratorObject code_body.qualname_provider = provider for variable in function_body.getVariables(): code_body.getVariableForReference(variable.getName()) function_body.setBody( makeStatementsSequenceFromStatement( statement = StatementReturn( expression = maker_class( ExpressionFunctionRef( function_body = code_body, source_ref = source_ref ), source_ref = source_ref ), source_ref = source_ref ) ) ) decorators = buildNodeList( provider = provider, nodes = reversed(node.decorator_list), source_ref = source_ref ) defaults = buildNodeList( provider = provider, nodes = node.args.defaults, source_ref = source_ref ) kw_defaults = buildParameterKwDefaults( provider = provider, node = node, function_body = function_body, source_ref = source_ref ) function_statements_body = buildFrameNode( provider = code_body, nodes = function_statement_nodes, code_object = code_object, source_ref = source_ref ) if function_kind == "Function": # TODO: Generators might have to raise GeneratorExit instead. function_statements_body = _insertFinalReturnStatement( function_statements_body = function_statements_body, return_statement = StatementReturnNone( source_ref = source_ref ) ) if "has_exec" in flags: function_statements_body = _insertInitialSetLocalsDictStatement( function_body = code_body, function_statements_body = function_statements_body, ) if function_statements_body.isStatementsFrame(): function_statements_body = makeStatementsSequenceFromStatement( statement = function_statements_body ) code_body.setBody( function_statements_body ) annotations = buildParameterAnnotations(provider, node, source_ref) function_creation = ExpressionFunctionCreation( function_ref = ExpressionFunctionRef( function_body = function_body, source_ref = source_ref ), defaults = defaults, kw_defaults = kw_defaults, annotations = annotations, source_ref = source_ref ) # Add the "staticmethod" decorator to __new__ methods if not provided. # CPython made these optional, but secretly applies them when it does # "class __new__". We add them earlier, so our optimization will see it. if python_version < 300 and \ node.name == "__new__" and \ provider.isExpressionClassBody(): _injectDecorator(decorators, "staticmethod", ("staticmethod", "classmethod"), source_ref) # Add the "classmethod" decorator to __init_subclass__ methods if not provided. if python_version >= 360 and \ node.name == "__init_subclass__" and \ provider.isExpressionClassBody(): _injectDecorator(decorators, "classmethod", ("classmethod",), source_ref) if python_version >= 370 and \ node.name == "__class_getitem__" and \ provider.isExpressionClassBody(): _injectDecorator(decorators, "classmethod", ("classmethod",), source_ref) decorated_function = function_creation for decorator in decorators: decorated_function = makeCallNode( decorator, decorated_function, decorator.getSourceReference() ) result = StatementAssignmentVariableName( provider = provider, variable_name = mangleName(node.name, provider), source = decorated_function, source_ref = source_ref ) if python_version >= 340: function_body.qualname_setup = result.getVariableName() return result def buildAsyncFunctionNode(provider, node, source_ref): # We are creating a function here that creates coroutine objects, with # many details each, pylint: disable=too-many-locals assert getKind(node) == "AsyncFunctionDef" function_statement_nodes, function_doc = extractDocFromBody(node) function_kind, flags = detectFunctionBodyKind( nodes = function_statement_nodes, start_value = "Coroutine" ) creator_function_body, _, code_object = buildFunctionWithParsing( provider = provider, function_kind = function_kind, name = node.name, function_doc = function_doc, flags = (), node = node, source_ref = source_ref ) if function_kind == "Coroutine": function_body = ExpressionCoroutineObjectBody( provider = creator_function_body, name = node.name, code_object = code_object, flags = flags, source_ref = source_ref ) else: function_body = ExpressionAsyncgenObjectBody( provider = creator_function_body, name = node.name, code_object = code_object, flags = flags, source_ref = source_ref ) function_body.qualname_provider = provider for variable in creator_function_body.getVariables(): function_body.getVariableForReference(variable.getName()) decorators = buildNodeList( provider = provider, nodes = reversed(node.decorator_list), source_ref = source_ref ) defaults = buildNodeList( provider = provider, nodes = node.args.defaults, source_ref = source_ref ) function_statements_body = buildFrameNode( provider = function_body, nodes = function_statement_nodes, code_object = code_object, source_ref = source_ref ) function_statements_body = _insertFinalReturnStatement( function_statements_body = function_statements_body, return_statement = StatementGeneratorReturnNone( source_ref = source_ref ) ) if function_statements_body.isStatementsFrame(): function_statements_body = makeStatementsSequenceFromStatement( statement = function_statements_body ) function_body.setBody( function_statements_body ) annotations = buildParameterAnnotations(provider, node, source_ref) kw_defaults = buildParameterKwDefaults( provider = provider, node = node, function_body = creator_function_body, source_ref = source_ref ) if function_kind == "Coroutine": creation_node = ExpressionMakeCoroutineObject( coroutine_ref = ExpressionFunctionRef( function_body = function_body, source_ref = source_ref ), source_ref = source_ref ) else: creation_node = ExpressionMakeAsyncgenObject( asyncgen_ref = ExpressionFunctionRef( function_body = function_body, source_ref = source_ref ), source_ref = source_ref ) creator_function_body.setBody( makeStatementsSequenceFromStatement( statement = StatementReturn( expression = creation_node, source_ref = source_ref ) ) ) function_creation = ExpressionFunctionCreation( function_ref = ExpressionFunctionRef( function_body = creator_function_body, source_ref = source_ref ), defaults = defaults, kw_defaults = kw_defaults, annotations = annotations, source_ref = source_ref ) decorated_function = function_creation for decorator in decorators: decorated_function = makeCallNode( decorator, decorated_function, decorator.getSourceReference() ) result = StatementAssignmentVariableName( provider = provider, variable_name = mangleName(node.name, provider), source = decorated_function, source_ref = source_ref ) function_body.qualname_setup = result.getVariableName() # Share the non-local declarations. TODO: This may also apply to generators # and async generators. creator_function_body.non_local_declarations = function_body.non_local_declarations return result def buildParameterKwDefaults(provider, node, function_body, source_ref): # Build keyword only arguments default values. We are hiding here, that it # is a Python3 only feature. if python_version >= 300: kw_only_names = function_body.getParameters().getKwOnlyParameterNames() if kw_only_names: keys = [] values = [] for kw_only_name, kw_default in \ zip(kw_only_names, node.args.kw_defaults): if kw_default is not None: keys.append(kw_only_name) values.append( buildNode(provider, kw_default, source_ref) ) kw_defaults = makeDictCreationOrConstant2( keys = keys, values = values, source_ref = source_ref ) else: kw_defaults = None else: kw_defaults = None return kw_defaults def buildParameterAnnotations(provider, node, source_ref): # Too many branches, because there is too many cases, pylint: disable=too-many-branches # Build annotations. We are hiding here, that it is a Python3 only feature. if python_version < 300: return None # Starting with Python 3.4, the names of parameters are mangled in # annotations as well. if python_version < 340: mangle = lambda variable_name: variable_name else: mangle = lambda variable_name: mangleName(variable_name, provider) keys = [] values = [] def addAnnotation(key, value): keys.append(mangle(key)) values.append(value) def extractArg(arg): if getKind(arg) == "Name": assert arg.annotation is None elif getKind(arg) == "arg": if arg.annotation is not None: addAnnotation( key = arg.arg, value = buildAnnotationNode(provider, arg.annotation, source_ref) ) elif getKind(arg) == "Tuple": for sub_arg in arg.elts: extractArg(sub_arg) else: assert False, getKind(arg) for arg in node.args.args: extractArg(arg) for arg in node.args.kwonlyargs: extractArg(arg) if python_version < 340: if node.args.varargannotation is not None: addAnnotation( key = node.args.vararg, value = buildNode( provider, node.args.varargannotation, source_ref ) ) if node.args.kwargannotation is not None: addAnnotation( key = node.args.kwarg, value = buildNode( provider, node.args.kwargannotation, source_ref ) ) else: if node.args.vararg is not None: extractArg(node.args.vararg) if node.args.kwarg is not None: extractArg(node.args.kwarg) # Return value annotation (not there for lambdas) if hasattr(node, "returns") and node.returns is not None: addAnnotation( key = "return", value = buildAnnotationNode( provider, node.returns, source_ref ) ) if keys: return makeDictCreationOrConstant2( keys = keys, values = values, source_ref = source_ref ) else: return None def _wrapFunctionWithSpecialNestedArgs(name, outer_body, parameters, special_args, source_ref): inner_name = name.strip("<>") + "$inner" iter_vars = [] values = [] statements = [] def unpackFrom(source, arg_names): accesses = [] sub_special_index = 0 iter_var = outer_body.allocateTempVariable(None, "arg_iter_%d" % len(iter_vars)) iter_vars.append(iter_var) statements.append( StatementAssignmentVariable( variable = iter_var, source = ExpressionBuiltinIter1( value = source, source_ref = source_ref ), source_ref = source_ref ) ) for element_index, arg_name in enumerate(arg_names): if getKind(arg_name) == "Name": arg_var = outer_body.createProvidedVariable(arg_name.id) outer_body.registerProvidedVariable(arg_var) statements.append( StatementAssignmentVariable( variable = arg_var, source = ExpressionSpecialUnpack( value = ExpressionTempVariableRef( variable = iter_var, source_ref = source_ref ), count = element_index + 1, expected = len(arg_names), starred = False, source_ref = source_ref ), source_ref = source_ref ) ) accesses.append( ExpressionVariableRef( variable = arg_var, source_ref = source_ref ) ) elif getKind(arg_name) == "Tuple": accesses.extend( unpackFrom( source = ExpressionSpecialUnpack( value = ExpressionTempVariableRef( variable = iter_var, source_ref = source_ref ), count = element_index + 1, expected = len(arg_names), starred = False, source_ref = source_ref ), arg_names = arg_name.elts ) ) sub_special_index += 1 else: assert False, arg_name statements.append( StatementSpecialUnpackCheck( iterator = ExpressionTempVariableRef( variable = iter_var, source_ref = source_ref ), count = len(arg_names), source_ref = source_ref ) ) return accesses for arg_name in parameters.getParameterNames(): if arg_name.startswith('.'): source = ExpressionVariableNameRef( provider = outer_body, variable_name = arg_name, source_ref = source_ref ) values.extend( unpackFrom(source, special_args[arg_name]) ) else: values.append( ExpressionVariableNameRef( provider = outer_body, variable_name = arg_name, source_ref = source_ref ) ) code_body = ExpressionOutlineFunction( provider = outer_body, name = inner_name, source_ref = source_ref ) statements.append( StatementReturn( expression = code_body, source_ref = source_ref ) ) outer_body.setBody( makeStatementsSequenceFromStatement( statement = makeTryFinallyStatement( provider = outer_body, tried = statements, final = [ StatementReleaseVariable( variable = variable, source_ref = source_ref ) for variable in sorted( outer_body.getTempVariables(), key = lambda variable: variable.getName() ) ], source_ref = source_ref, public_exc = False ) ) ) return code_body def buildFunctionWithParsing(provider, function_kind, name, function_doc, flags, node, source_ref): # This contains a complex re-formulation for nested parameter functions. # pylint: disable=too-many-locals kind = getKind(node) assert kind in ("FunctionDef", "Lambda", "AsyncFunctionDef"), "unsupported for kind " + kind def extractArg(arg): if arg is None: return None elif type(arg) is str: return mangleName(arg, provider) elif getKind(arg) == "Name": return mangleName(arg.id, provider) elif getKind(arg) == "arg": return mangleName(arg.arg, provider) elif getKind(arg) == "Tuple": # These are to be re-formulated on the outside. assert False else: assert False, getKind(arg) special_args = {} def extractNormalArgs(args): normal_args = [] for arg in args: if type(arg) is not str and getKind(arg) == "Tuple": special_arg_name = ".%d" % (len(special_args) + 1) special_args[special_arg_name] = arg.elts normal_args.append(special_arg_name) else: normal_args.append(extractArg(arg)) return normal_args normal_args = extractNormalArgs(node.args.args) parameters = ParameterSpec( ps_name = name, ps_normal_args = normal_args, ps_kw_only_args = [ extractArg(arg) for arg in node.args.kwonlyargs ] if python_version >= 300 else [], ps_list_star_arg = extractArg(node.args.vararg), ps_dict_star_arg = extractArg(node.args.kwarg), ps_default_count = len(node.args.defaults) ) message = parameters.checkParametersValid() if message is not None: raiseSyntaxError( message, source_ref.atColumnNumber(node.col_offset), ) parent_module = provider.getParentModule() code_object = CodeObjectSpec( co_name = name, co_kind = function_kind, co_varnames = parameters.getParameterNames(), co_argcount = parameters.getArgumentCount(), co_kwonlyargcount = parameters.getKwOnlyParameterCount(), co_has_starlist = parameters.getStarListArgumentName() is not None, co_has_stardict = parameters.getStarDictArgumentName() is not None, co_filename = parent_module.getRunTimeFilename(), co_lineno = source_ref.getLineNumber(), future_spec = parent_module.getFutureSpec() ) outer_body = ExpressionFunctionBody( provider = provider, name = name, code_object = code_object, flags = flags, doc = function_doc, parameters = parameters, source_ref = source_ref ) # Wrap if necessary for special nested arguments. if special_args: code_body = _wrapFunctionWithSpecialNestedArgs( name = name, outer_body = outer_body, parameters = parameters, special_args = special_args, source_ref = source_ref ) else: code_body = outer_body return outer_body, code_body, code_object def addFunctionVariableReleases(function): assert function.isExpressionFunctionBodyBase() releases = [] # We attach everything to the function definition source location. source_ref = function.getSourceReference() for variable in function.getLocalVariables(): # Shared variables are freed by function object attachment. if variable.getOwner() is not function: continue releases.append( StatementReleaseVariable( variable = variable, source_ref = source_ref ) ) if releases: body = function.getBody() if body.isStatementsFrame(): body = makeStatementsSequenceFromStatement( statement = body ) body = makeTryFinallyStatement( provider = function, tried = body, final = releases, source_ref = source_ref ) function.setBody( makeStatementsSequenceFromStatement( statement = body ) ) # assert body.isStatementAborting(), body.asXmlText()
nilq/baby-python
python
# 5 Faça um Programa que converta metros para centímetros. distancia = int(input('Digite uma distância em metros: ')) converção = distancia * 100 print(f'De acordo com a distância informada: {distancia} M, Sua conversão em centímetros é: {converção} CM ')
nilq/baby-python
python
#!/usr/bin/env python3 # -*- coding: utf8 -*- from io import StringIO import os import subprocess import sys import types from .exceptions import (DeliveryTransportError, DeliveryPackingError) from .pickle import pickle, unpickle, ModulePickle class DeliveryBox(object): """Container for data exchange""" # OUTPUT VALUES stdout = None stderr = None return_value = None exception = None # INPUT VALUES instance = None func = None args = None kwargs = None modules = set() pickled_modules = set() def __str__(self): return "\n".join(["{:15s}: {}".format(key, value) for (key, value) in self.__dict__.items()]) def __eq__(self, other): return self.__dict__ == other.__dict__ class DeliveryBoy(object): """Operator for call the new process and handle input/output When called the decorated function and non-standard modules stored in its `__globals__` attribute are pickled and passed via the transport command to the newly started python process. If an exception is raised during execution of the decorated function, this exception is pickled and reraised. If `async` is `False`, STDOUT, STDERR and the return value of the decorated function are returned upon calling the decorated function. Otherwise only the process ID is returned; if a transport is defined, it is the process ID of the transport, otherwise the process ID of the interpreter. After execution STDOUT and STDERR writing during execution of the callable are written to STDOUT and STDERR of the main process. This applies only to synchronous execution! :param func: Function object that is called in the new process :type func: callable :param transport: Transport command :type transport: str :param transport_params: Additional arguments for the transport command. :type transport_params: list :param executable: The python executable to be called. Default: `sys.executable`. :type executable: Absolute path of python interpreter :param async: If set to `True`, this process will not wait for the process called via the transport command to finish. Default: `False` :type async: bool :param discard_excess: If set to `False`, all output written to STDOUT by the new process that is not redirected gets pre- or appended accordingly to the delivery box. Default: `True` :type discard_excess: bool :return: Return value of the decorated callable :raises deliveryboy.exceptions.DeliveryPackingError: if decorated callable is not supported, if a module cannot be added to the delivery box :raises deliveryboy.exceptions.DeliveryTransportError: if calling the transport or executable fail (e.g. command not found, exit code not equal zero. """ def __init__(self, func, transport=None, transport_params=[], executable=sys.executable, async=False, discard_excess=True, **params): self.func = func self.params = params self.async = async self.discard_excess= discard_excess self.executable = executable self.transport = transport self.transport_params = transport_params self.inbox = DeliveryBox() self.outbox = None def __call__(self, *args, **kwargs): self._pack_box(args, kwargs) response = self._run_delivery() if self.transport: self.outbox, prefix, suffix = unpickle(response[0], self.discard_excess) if prefix or suffix: self.outbox.stdout = prefix + self.outbox.stdout + suffix self._pipe_stdout_err() self._reraise() return self.outbox.return_value def __get__(self, obj, classobj=None): if obj is not None: self.inbox.instance = obj return self def _pack_box(self, args, kwargs): """Pack callable, arguments and modules :param args: Arguments to be passed to the callable :type args: list :param kwargs: Arguments to be passed to the callable :type kwargs: dict """ self.inbox.args = args self.inbox.kwargs = kwargs if isinstance(self.func, types.FunctionType): self.inbox.func = self.func.__code__ self._pack_box_modules() # myglobals = self.func.__globals__ else: raise DeliveryPackingError( "This type of callable is not supported" ) def _pack_box_modules(self): """Add modules to box for pickling""" allmodules = [(k, v) for (k, v) in self.func.__globals__.items() if isinstance(v, types.ModuleType) and not k.startswith("__")] venv = os.environ.get("VIRTUAL_ENV", None) path = sys.path[1:] if venv: path = [p for p in path if p and not p.startswith(venv)] path.append(venv) try: # Handle builtins and modules from virtual env # Start with those that have no __file__ attribute self.inbox.modules |= set([k for (k, v) in allmodules if getattr(v, '__file__', None) is None]) # Then add those from the system paths for sitepath in path: self.inbox.modules |= { k for (k, v) in allmodules if getattr(v, '__file__', '').startswith(sitepath) } except Exception as error: raise DeliveryPackingError( "Cannot pack built-in/venv modules", real_exception=error ) # TODO: This breaks availability of imported submodules mod_pickle = ModulePickle(modules=[v for (k, v) in allmodules if k not in self.inbox.modules]) self.inbox.pickled_modules = mod_pickle.pickle() self.inbox.modules |= set([k for (k, v) in allmodules if k not in self.inbox.modules]) def _run_delivery(self): """Executes the actual transport/executable If `transport` is `None`, it and `transport_params` will be omitted from the command line. In this case the callable is run directly. Also, in this case the `async` option is ignored. """ if self.transport: cmd = [self.transport, ] + self.transport_params + [ self.executable, "-m", "deliveryboy", pickle(self.inbox) ] try: child_process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except Exception as error: raise DeliveryTransportError(real_exception=error) if not self.async: response = child_process.communicate() self._handle_call_error(response, child_process.returncode) return response else: return child_process.pid else: self.outbox = execute(self.inbox) def _handle_call_error(self, response, returncode): if returncode: raise DeliveryTransportError( "Child process exited with {}: {}".format( returncode, response[1].decode("utf8") )) def _pipe_stdout_err(self): """Redirect STDOUT and STDERR from delivered callable""" for stream in ["stdout", "stderr"]: if isinstance(self.outbox, DeliveryBox) \ and getattr(self.outbox, stream, None): print( getattr(self.outbox, stream), file=getattr(sys, stream) ) def _reraise(self): """Re-raises an exception originating from the callable""" if self.outbox and isinstance(self.outbox.exception, Exception): raise self.outbox.exception class DeliveryBoyDecorator(object): """Decorator for functions Decorated functions are pickled and passed to a newly started python process that is called via a transport command (e.g. sudo) :param transport: Transport command :type transport: str :param executable: The python executable to be called. Default: `sys.executable`. :type executable: Absolute path of python interpreter :param async: If set to `True`, this process will not wait for the process called via the transport command to finish. Default: `False` :type async: bool """ def __init__(self, **params): self.params = params def __call__(self, func, *args, **kwargs): return DeliveryBoy(func, **self.params) def execute(inbox): """Setup the environment and execute the decorated callable :param inbox: Pickled :py:obj:`DeliveryBox` instance :return: :py:obj:`DeliveryBox` :raises deliveryboy.exception.DeliveryPackingError: If callable is missing """ # Load pickled modules mod_pickle = ModulePickle(pickled=inbox.pickled_modules) mod_pickle.unpickle() # Import modules globals().update({x: __import__(x) for x in inbox.modules}) orig_stdout = sys.stdout orig_stderr = sys.stderr sys.stdout = StringIO() sys.stderr = StringIO() if inbox.func is not None and isinstance(inbox.func, types.CodeType): func = types.FunctionType(inbox.func, globals()) else: del mod_pickle raise DeliveryPackingError("No callable to run in delivery box") box = DeliveryBox() try: if inbox.instance is not None: box.return_value = func(inbox.instance, *inbox.args, **inbox.kwargs) else: box.return_value = func(*inbox.args, **inbox.kwargs) except Exception as error: box.exception = error box.stdout = sys.stdout.getvalue() box.stderr = sys.stderr.getvalue() sys.stdout = orig_stdout sys.stderr = orig_stderr del mod_pickle return box def main(): """Entry function for new process This method unpickles data from the command line, redirects STDOUT + STDERR and pickles the return value and exception Input and output of this function are base64 encoded strings representing pickled :py:obj:`deliveryboy.core.DeliveryBox` objects. """ try: inbox = unpickle(bytes(sys.argv[1], "utf8"))[0] except Exception as error: box = DeliveryBox() box.exception = error else: box = execute(inbox) print(pickle(box))
nilq/baby-python
python
# Кириллов Алексей, ИУ7-22 from math import sqrt from tkinter import * root = Tk() draw_pole = Canvas(root, width = 800, height = 600, bg = "white") def dist(x, y, x1, y1, x2, y2): lenth = abs((x-x1) * (y2-y1) - (y-y1) * (x2-x1)) /\ sqrt((x2-x1)**2 + (y2-y1)**2) #print(lenth) return lenth def uline(a1, b1, a2, b2, b): if b1 == b2: return 0 else: return (a2 - a1)*(b - b1)/(b2 - b1) + a1 R = float(input("Задайте радиус окружностей: ")) points = [] print("\nВведите x и y точки через пробел; пустая строка завершает ввод:") s = input() while s != "": points.append(list(map(float, s.split()))) s = input() circles = [] print("Введите x и y центра окружности через пробел; \ пустая строка завершает ввод:") s = input() while s != "": circles.append(list(map(float, s.split()))) s = input() max_k = 0 point_a = 0 point_b = 0 for i in range(len(points) - 1): for j in range(i + 1, len(points)): k = 0 a = 0 b = 0 c = 0 for circle in circles: if dist(circle[0], circle[1], points[i][0], points[i][1], points[j][0], points[j][1]) < R: k += 1 if k > max_k: point_a = i point_b = j max_k = k print("{:} пересечений с окружностями у линии, проходящей\nчерез точки \ ({:}; {:}) и ({:}; {:})".format(max_k, points[point_a][0], points[point_a][1], points[point_b][0], points[point_b][1])) min_x = max_x = points[0][0] min_y = max_y = points[0][1] for point in points: max_x = max(max_x, point[0]) min_x = min(min_x, point[0]) max_y = max(max_y, point[1]) min_y = min(min_y, point[1]) for circle in circles: max_x = max(max_x, circle[0]) min_x = min(min_x, circle[0]) max_y = max(max_y, circle[1]) min_y = min(min_y, circle[1]) scale = min(500/(max_y - min_y), 700/(max_x - min_x)) disp_x = 50 + round((700 - (max_x - min_x)*scale)/2) disp_y = 550 - round((500 - (max_y - min_y)*scale)/2) x = round(disp_x - min_x*scale) y = round(disp_y + min_y*scale) draw_pole.create_line(0, y, 800, y, width=2, fill="grey", arrow=LAST) draw_pole.create_line(x, 600, x, 0, width=2, fill="grey", arrow=LAST) draw_pole.create_text(x + 8, 9, text = "y",\ font="Arial 8", justify=CENTER, fill="green") draw_pole.create_text(790, y - 9, text = "x",\ font="Arial 8", justify=CENTER, fill="green") x1 = uline(points[point_a][0], points[point_a][1], points[point_b][0], points[point_b][1], (max_y-min_y)*2) x2 = uline(points[point_a][0], points[point_a][1], points[point_b][0], points[point_b][1], -(max_y-min_y)*2) y1 = uline(points[point_a][1], points[point_a][0], points[point_b][0], points[point_b][1], x1) y2 = uline(points[point_a][1], points[point_a][0], points[point_b][0], points[point_b][1], x2) x1 = round(disp_x + (x1 - min_x)*scale) y1 = round(disp_y - (y1 - min_y)*scale) x2 = round(disp_x + (x2 - min_x)*scale) y2 = round(disp_y - (y2 - min_y)*scale) draw_pole.create_line(x1, y1, x2, y2, width=2, fill="magenta") R = round(R * scale) for point in points: x = round(disp_x + (point[0] - min_x)*scale) y = round(disp_y - (point[1] - min_y)*scale) draw_pole.create_oval(x-2, y-2, x+2, y+2, fill="black") draw_pole.create_text(x, y - 13,text="({:};{:})".format(point[0], point[1]),\ font="Arial 8", justify=CENTER, fill="blue") for circle in circles: x = round(disp_x + (circle[0] - min_x)*scale) y = round(disp_y - (circle[1] - min_y)*scale) draw_pole.create_oval(x - R, y - R, x + R, y + R, outline = "red") draw_pole.create_oval(x - 1, y - 1, x + 1, y + 1, fill = "red") draw_pole.create_text(x, y - 13, text="({:};{:})".format(circle[0],\ circle[1]), font="Arial 8", justify=CENTER, \ fill="green") draw_pole.pack() root.mainloop()
nilq/baby-python
python
# -------------------------------------- #! /usr/bin/python # File: 7. Reverse Integer.py # Author: Kimberly Gao # My solution: (Run time: 28ms) # Memory Usage: 14.4 MB class Solution: def _init_(self,name): self.name = name def reverse1(self, x: int) -> int: string = str(x) list1 = list(string) if list1[0] == '-': list_no_sign = list1[1:] # remove the sign list_reverse = list_no_sign[::-1] # reverse the numbers list_reverse.insert(0, '-') else: list_reverse = list1[::-1] num_reverse_str = ''.join(list_reverse) # ['3','2','1'] -> ['321'] num_reverse = int(num_reverse_str) if num_reverse < pow(2, 31)-1 and num_reverse >= -pow(2, 31): return num_reverse else: return 0 # Best solution: (Run time: 20ms) def reverse2(self, x: int): rev, flg = 0, 1 if x < 0: flg = -1 x = abs(x) while (x): unit = x % 10 rev = rev * 10 + unit x = x // 10 if rev > 2 ** 31 - 1 or rev < -2 ** 31: return 0 return rev * flg if __name__ == '__main__': x = 1534236469 # x = 15346 my_solution = Solution().reverse1(x) print(my_solution) best_solution = Solution().reverse2(x) print(best_solution)
nilq/baby-python
python
from abaqusConstants import * from .Section import Section from ..Connector.ConnectorBehaviorOptionArray import ConnectorBehaviorOptionArray class ConnectorSection(Section): """A ConnectorSection object describes the connection type and the behavior of a connector. The ConnectorSection object is derived from the Section object. Notes ----- This object can be accessed by: .. code-block:: python import section mdb.models[name].sections[name] import odbSection session.odbs[name].sections[name] The corresponding analysis keywords are: - CONNECTOR SECTION - CONNECTOR BEHAVIOR - CONNECTOR CONSTITUTIVE REFERENCE """ def __init__(self, name: str, assembledType: SymbolicConstant = NONE, rotationalType: SymbolicConstant = NONE, translationalType: SymbolicConstant = NONE, integration: SymbolicConstant = UNSPECIFIED, u1ReferenceLength: float = None, u2ReferenceLength: float = None, u3ReferenceLength: float = None, ur1ReferenceAngle: float = None, ur2ReferenceAngle: float = None, ur3ReferenceAngle: float = None, massPerLength: float = None, contactAngle: float = None, materialFlowFactor: float = 1, regularize: Boolean = ON, defaultTolerance: Boolean = ON, regularization: float = 0, extrapolation: SymbolicConstant = CONSTANT, behaviorOptions: ConnectorBehaviorOptionArray = None): """This method creates a ConnectorSection object. Notes ----- This function can be accessed by: .. code-block:: python mdb.models[name].ConnectorSection session.odbs[name].ConnectorSection Parameters ---------- name A String specifying the repository key. assembledType A SymbolicConstant specifying the assembled connection type. Possible values are:NONEBEAMBUSHINGCVJOINTCYLINDRICALHINGEPLANARRETRACTORSLIPRINGTRANSLATORUJOINTWELDThe default value is NONE.You cannot include the *assembledType* argument if *translationalType* or *rotationalType* are given a value other than NONE. At least one of the arguments *assembledType*, *translationalType*, or *rotationalType* must be given a value other than NONE. rotationalType A SymbolicConstant specifying the basic rotational connection type. Possible values are:NONEALIGNCARDANCONSTANT_VELOCITYEULERFLEXION_TORSIONFLOW_CONVERTERPROJECTION_FLEXION_TORSIONREVOLUTEROTATIONROTATION_ACCELEROMETERUNIVERSALThe default value is NONE.You cannot include the *rotationalType* argument if *assembledType* is given a value other than NONE. At least one of the arguments *assembledType*, *translationalType*, or *rotationalType* must be given an value other than NONE. translationalType A SymbolicConstant specifying the basic translational connection type. Possible values are:NONEACCELEROMETERAXIALCARTESIANJOINLINKPROJECTION_CARTESIANRADIAL_THRUSTSLIDE_PLANESLOTThe default value is NONE.You cannot include the *translationalType* argument if *assembledType* is given a value other than NONE. At least one of the arguments *assembledType*, *translationalType*, or *rotationalType* must be given an value other than NONE. integration A SymbolicConstant specifying the time integration scheme to use for analysis. This argument is applicable only to an Abaqus/Explicit analysis. Possible values are UNSPECIFIED, IMPLICIT, and EXPLICIT. The default value is UNSPECIFIED. u1ReferenceLength None or a Float specifying the reference length associated with constitutive response for the first component of relative motion. The default value is None. u2ReferenceLength None or a Float specifying the reference length associated with constitutive response for the second component of relative motion. The default value is None. u3ReferenceLength None or a Float specifying the reference length associated with constitutive response for the third component of relative motion. The default value is None. ur1ReferenceAngle None or a Float specifying the reference angle in degrees associated with constitutive response for the fourth component of relative motion. The default value is None. ur2ReferenceAngle None or a Float specifying the reference angle in degrees associated with constitutive response for the fifth component of relative motion. The default value is None. ur3ReferenceAngle None or a Float specifying the reference angle in degrees associated with constitutive response for the sixth component of relative motion. The default value is None. massPerLength None or a Float specifying the mass per unit reference length of belt material. This argument is applicable only when *assembledType*=SLIPRING, and must be specified in that case. The default value is None. contactAngle None or a Float specifying the contact angle made by the belt wrapping around node b. This argument is applicable only to an Abaqus/Explicit analysis, and only when *assembledType*=SLIPRING. The default value is None. materialFlowFactor A Float specifying the scaling factor for material flow at node b. This argument is applicable only when *assembledType*=RETRACTOR or *rotationalType*=FLOW_CONVERTER. The default value is 1.0. regularize A Boolean specifying whether or not all tabular data associated with the *behaviorOptions* will be regularized. This argument is applicable only for an Abaqus/Explicit analysis. The default value is ON. defaultTolerance A Boolean specifying whether or not the default regularization tolerance will be used for all tabular data associated with the *behaviorOptions*. This argument is applicable only for an Abaqus/Explicit analysis and only if *regularize*=ON. The default value is ON. regularization A Float specifying the regularization increment to be used for all tabular data associated with the *behaviorOptions*. This argument is applicable only for an Abaqus/Explicit analysis and only if *regularize*=ON and *defaultTolerance*=OFF. The default value is 0.03. extrapolation A SymbolicConstant specifying the extrapolation technique to be used for all tabular data associated with the *behaviorOptions*. Possible values are CONSTANT and LINEAR. The default value is CONSTANT. behaviorOptions A ConnectorBehaviorOptionArray object. Returns ------- A ConnectorSection object. Raises ------ InvalidNameError RangeError """ super().__init__() pass def setValues(self, assembledType: SymbolicConstant = NONE, rotationalType: SymbolicConstant = NONE, translationalType: SymbolicConstant = NONE, integration: SymbolicConstant = UNSPECIFIED, u1ReferenceLength: float = None, u2ReferenceLength: float = None, u3ReferenceLength: float = None, ur1ReferenceAngle: float = None, ur2ReferenceAngle: float = None, ur3ReferenceAngle: float = None, massPerLength: float = None, contactAngle: float = None, materialFlowFactor: float = 1, regularize: Boolean = ON, defaultTolerance: Boolean = ON, regularization: float = 0, extrapolation: SymbolicConstant = CONSTANT, behaviorOptions: ConnectorBehaviorOptionArray = None): """This method modifies the ConnectorSection object. Parameters ---------- assembledType A SymbolicConstant specifying the assembled connection type. Possible values are:NONEBEAMBUSHINGCVJOINTCYLINDRICALHINGEPLANARRETRACTORSLIPRINGTRANSLATORUJOINTWELDThe default value is NONE.You cannot include the *assembledType* argument if *translationalType* or *rotationalType* are given a value other than NONE. At least one of the arguments *assembledType*, *translationalType*, or *rotationalType* must be given a value other than NONE. rotationalType A SymbolicConstant specifying the basic rotational connection type. Possible values are:NONEALIGNCARDANCONSTANT_VELOCITYEULERFLEXION_TORSIONFLOW_CONVERTERPROJECTION_FLEXION_TORSIONREVOLUTEROTATIONROTATION_ACCELEROMETERUNIVERSALThe default value is NONE.You cannot include the *rotationalType* argument if *assembledType* is given a value other than NONE. At least one of the arguments *assembledType*, *translationalType*, or *rotationalType* must be given an value other than NONE. translationalType A SymbolicConstant specifying the basic translational connection type. Possible values are:NONEACCELEROMETERAXIALCARTESIANJOINLINKPROJECTION_CARTESIANRADIAL_THRUSTSLIDE_PLANESLOTThe default value is NONE.You cannot include the *translationalType* argument if *assembledType* is given a value other than NONE. At least one of the arguments *assembledType*, *translationalType*, or *rotationalType* must be given an value other than NONE. integration A SymbolicConstant specifying the time integration scheme to use for analysis. This argument is applicable only to an Abaqus/Explicit analysis. Possible values are UNSPECIFIED, IMPLICIT, and EXPLICIT. The default value is UNSPECIFIED. u1ReferenceLength None or a Float specifying the reference length associated with constitutive response for the first component of relative motion. The default value is None. u2ReferenceLength None or a Float specifying the reference length associated with constitutive response for the second component of relative motion. The default value is None. u3ReferenceLength None or a Float specifying the reference length associated with constitutive response for the third component of relative motion. The default value is None. ur1ReferenceAngle None or a Float specifying the reference angle in degrees associated with constitutive response for the fourth component of relative motion. The default value is None. ur2ReferenceAngle None or a Float specifying the reference angle in degrees associated with constitutive response for the fifth component of relative motion. The default value is None. ur3ReferenceAngle None or a Float specifying the reference angle in degrees associated with constitutive response for the sixth component of relative motion. The default value is None. massPerLength None or a Float specifying the mass per unit reference length of belt material. This argument is applicable only when *assembledType*=SLIPRING, and must be specified in that case. The default value is None. contactAngle None or a Float specifying the contact angle made by the belt wrapping around node b. This argument is applicable only to an Abaqus/Explicit analysis, and only when *assembledType*=SLIPRING. The default value is None. materialFlowFactor A Float specifying the scaling factor for material flow at node b. This argument is applicable only when *assembledType*=RETRACTOR or *rotationalType*=FLOW_CONVERTER. The default value is 1.0. regularize A Boolean specifying whether or not all tabular data associated with the *behaviorOptions* will be regularized. This argument is applicable only for an Abaqus/Explicit analysis. The default value is ON. defaultTolerance A Boolean specifying whether or not the default regularization tolerance will be used for all tabular data associated with the *behaviorOptions*. This argument is applicable only for an Abaqus/Explicit analysis and only if *regularize*=ON. The default value is ON. regularization A Float specifying the regularization increment to be used for all tabular data associated with the *behaviorOptions*. This argument is applicable only for an Abaqus/Explicit analysis and only if *regularize*=ON and *defaultTolerance*=OFF. The default value is 0.03. extrapolation A SymbolicConstant specifying the extrapolation technique to be used for all tabular data associated with the *behaviorOptions*. Possible values are CONSTANT and LINEAR. The default value is CONSTANT. behaviorOptions A ConnectorBehaviorOptionArray object. Raises ------ RangeError """ pass
nilq/baby-python
python
#!/usr/bin/env python # encoding: utf-8 #use nc -u 127.0.0.1 8888 to communicate with the server 1-way """A non-blocking, single-threaded TCP server.""" from __future__ import absolute_import, division, print_function, with_statement import errno import os import socket import ssl import stat import sys from tornado.log import app_log from tornado.ioloop import IOLoop from tornado.iostream import IOStream, SSLIOStream from tornado.netutil import ssl_wrap_socket from tornado import process #from tornado.netutil import set_close_exec #web socket support import tornado.web import tornado.httpserver import tornado.ioloop import tornado.websocket import tornado.options PIPE = None class UDPServer(object): def __init__(self, io_loop=None): self.io_loop = io_loop self._sockets = {} # fd -> socket object self._pending_sockets = [] self._started = False def add_sockets(self, sockets): if self.io_loop is None: self.io_loop = IOLoop.instance() for sock in sockets: self._sockets[sock.fileno()] = sock add_accept_handler(sock, self._on_recive, io_loop=self.io_loop) def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=25): sockets = bind_sockets(port, address=address, family=family, backlog=backlog) if self._started: self.add_sockets(sockets) else: self._pending_sockets.extend(sockets) def start(self, num_processes=1): assert not self._started self._started = True if num_processes != 1: process.fork_processes(num_processes) sockets = self._pending_sockets self._pending_sockets = [] self.add_sockets(sockets) def stop(self): for fd, sock in self._sockets.iteritems(): self.io_loop.remove_handler(fd) sock.close() def _on_recive(self, data, address): print(data) host = address[0] port = address[1] print(host) print(port) if(PIPE): PIPE.write_message(data) #sock = socket.socket( #socket.AF_INET, socket.SOCK_STREAM) #sock.connect((host, port)) #sock.send("abcde\r\n\r\n") def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=25): sockets = [] if address == "": address = None flags = socket.AI_PASSIVE if hasattr(socket, "AI_ADDRCONFIG"): flags |= socket.AI_ADDRCONFIG for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_DGRAM, 0, flags)): af, socktype, proto, canonname, sockaddr = res sock = socket.socket(af, socktype, proto) #set_close_exec(sock.fileno()) if os.name != 'nt': sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if af == socket.AF_INET6: if hasattr(socket, "IPPROTO_IPV6"): sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) #sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) sock.setblocking(0) sock.bind(sockaddr) sockets.append(sock) return sockets if hasattr(socket, 'AF_UNIX'): def bind_unix_socket(file, mode=0o0600, backlog=128): sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) #set_close_exec(sock.fileno()) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(0) try: st = os.stat(file) except (OSError) as err: if err.errno != errno.ENOENT: raise else: if st.S_ISSOCK(st.st_mode): os.remove(file) else: raise ValueError("File %s exists and is not a socket", file) sock.bind(file) os.chmod(file, mode) sock.listen(backlog) return sock def add_accept_handler(sock, callback, io_loop=None): if io_loop is None: io_loop = IOLoop.instance() def accept_handler(fd, events): while True: try: data, address = sock.recvfrom(2500) except (socket.error) as e: if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN): return raise callback(data, address) io_loop.add_handler(sock.fileno(), accept_handler, IOLoop.READ) LISTEN_PORT = 8000 LISTEN_ADDRESS = '127.0.0.1' class EchoWebSocket(tornado.websocket.WebSocketHandler): def open(self): print("WebSocket opened") global PIPE PIPE = self def on_message(self, message): self.write_message(u"You said: " + message) def on_close(self): print("WebSocket closed") global PIPE PIPE = None def check_origin(self, origin): """ Override the origin check if needed """ return True class ChannelHandler(tornado.websocket.WebSocketHandler): """ Handler that handles a websocket channel """ @classmethod def urls(cls): return [ (r'/web-socket/', cls, {}), # Route/Handler/kwargs ] def initialize(self): self.channel = None def open(self, channel): """ Client opens a websocket """ self.channel = channel def on_message(self, message): """ Message received on channel """ print("Received",message) def on_close(self): """ Channel is closed """ def check_origin(self, origin): """ Override the origin check if needed """ return True server = UDPServer() server.bind(8888) server.start(1) print("Start UDP Server on Port:8888") app = tornado.web.Application([ (r'/web-socket/', EchoWebSocket, {}), # Route/Handler/kwargs ])#ChannelHandler.urls()) # Setup HTTP Server http_server = tornado.httpserver.HTTPServer(app) http_server.listen(8013) print("Start websocket server on port 8013") IOLoop.instance().start()
nilq/baby-python
python
from django.contrib.gis import admin from leaflet.admin import LeafletGeoAdmin from world.models import Border, School, Facility, Busstop class BorderAdmin(LeafletGeoAdmin): search_fields = ['n03_001','n03_003','n03_004'] list_filter = ('n03_003') admin.site.register(Border, LeafletGeoAdmin) admin.site.register(School, LeafletGeoAdmin) admin.site.register(Facility, LeafletGeoAdmin) admin.site.register(Busstop, LeafletGeoAdmin) admin.site.site_title = 'GeoDjangoログイン' admin.site.site_header = 'GeoDjangoハンズオン' admin.site.index_title = 'GeoDjangoメニュー'
nilq/baby-python
python
DATA = b'\x00\x00\x01X\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\r\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x12\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x15\x00\x00\x00\x00\x00\x00\x00\x16\x00\x00\x00\x00\x00\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x1b\x00\x00\x00\x00\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00!\x00\x00\x00\x00\x00\x00\x00"\x00\x00\x00\x00\x00\x00\x00#\x00\x00\x00\x00\x00\x00\x00$\x00\x00\x00\x00\x00\x00\x00%\x00\x00\x00\x00\x00\x00\x00&\x00\x00\x00\x00\x00\x00\x00\'\x00\x00\x00\x00\x00\x00\x00(\x00\x00\x00\x00\x00\x00\x00)\x00\x00\x00\x00\x00\x00\x00*' def test_report_luns_command(): from infi.os_info import get_platform_string from infi.asi.unix import UnixFile import os if 'ubuntu' not in get_platform_string() or not os.path.exists("/dev/sg1"): # on some of our other environments, sg0 is the cdrom and sg1 is the local disk, and on others it's the # other way around. just test this on Ubuntu only. return from infi.asi.coroutines.sync_adapter import sync_wait from infi.asi.cdb.report_luns import ReportLunsCommand from infi.asi import create_platform_command_executer handle = UnixFile(os.open("/dev/sg1", os.O_RDWR)) executer = create_platform_command_executer(handle) cdb = ReportLunsCommand(select_report=0) result = sync_wait(cdb.execute(executer)) assert result.lun_list != [] assert 0 in result.lun_list def test_report_luns_data(): from infi.asi.cdb.report_luns import ReportLunsData data = ReportLunsData.create_from_string(DATA) assert data.lun_list == [i for i in range(0,43)]
nilq/baby-python
python
# --- # jupyter: # jupytext: # cell_metadata_filter: all,-execution,-papermill,-trusted # formats: ipynb,py//py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.7.1 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] tags=[] # # Description # %% [markdown] tags=[] # It analyzes how consensus partitions generated before agree with the ensemble, and selects the best ones for downstream analyses. # %% [markdown] tags=[] # # Modules loading # %% tags=[] # %load_ext autoreload # %autoreload 2 # %% tags=[] from pathlib import Path from IPython.display import display import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import conf # %% [markdown] tags=[] # # Load consensus clustering results # %% tags=[] CONSENSUS_CLUSTERING_DIR = Path( conf.RESULTS["CLUSTERING_DIR"], "consensus_clustering" ).resolve() display(CONSENSUS_CLUSTERING_DIR) # %% tags=[] input_file = Path(CONSENSUS_CLUSTERING_DIR, "consensus_clustering_runs.pkl").resolve() display(input_file) # %% tags=[] consensus_clustering_results = pd.read_pickle(input_file) # %% tags=[] consensus_clustering_results.shape # %% tags=[] consensus_clustering_results.head() # %% [markdown] tags=[] # # Explore clustering indexes # %% tags=[] _col = "ari_mean" _best_parts_by_ari = ( consensus_clustering_results.groupby("k") .apply(lambda x: x.sort_values(_col, ascending=False).head(1)) .sort_values(_col, ascending=False)[["method", "k", _col]] .rename(columns={_col: "index_value"}) ) # %% tags=[] _col = "ami_mean" _best_parts_by_ami = ( consensus_clustering_results.groupby("k") .apply(lambda x: x.sort_values(_col, ascending=False).head(1)) .sort_values(_col, ascending=False)[["method", "k", _col]] .rename(columns={_col: "index_value"}) ) # %% tags=[] _col = "nmi_mean" _best_parts_by_nmi = ( consensus_clustering_results.groupby("k") .apply(lambda x: x.sort_values(_col, ascending=False).head(1)) .sort_values(_col, ascending=False)[["method", "k", _col]] .rename(columns={_col: "index_value"}) ) # %% tags=[] _indexes_colors = sns.color_palette("colorblind", 3) display(_indexes_colors) # %% tags=[] with sns.plotting_context("talk", font_scale=0.75), sns.axes_style( "whitegrid", {"grid.linestyle": "--"} ): fig, ax = plt.subplots(figsize=(12, 6)) ax = sns.pointplot( data=_best_parts_by_ari, x="k", y="index_value", color=_indexes_colors[0], ci=None, ) ax = sns.pointplot( data=_best_parts_by_ami, x="k", y="index_value", color=_indexes_colors[1], ci=None, ) ax = sns.pointplot( data=_best_parts_by_nmi, x="k", y="index_value", color=_indexes_colors[2], ci=None, ) ax.set_ylabel(f"Agreement with ensemble") ax.set_xlabel("Number of clusters ($k$)") ax.set_xticklabels(ax.get_xticklabels(), rotation=45) plt.legend(labels=["ARI", "AMI", "NMI"]) plt.grid(True) plt.tight_layout() # %% [markdown] tags=[] # AMI and NMI show the same trend for higher `k`. That's surprising. I would have expected that AMI has the same pattern as ARI, since both are adjusted-for-chance, and should not show higher values for higher `k` as it is expected for a not adjusted-for-chance index as NMI. # # **CONCLUSION:** I will pick ARI for the follow up analysis. # %% [markdown] tags=[] # # Explore best partition per k # %% tags=[] _selected_measure = "ARI" _mean_column, _median_column = "ari_mean", "ari_median" # %% tags=[] best_parts_by_mean = ( consensus_clustering_results.groupby("k") .apply(lambda x: x.sort_values(_mean_column, ascending=False).head(1)) .sort_values(_mean_column, ascending=False)[["method", "k", _mean_column]] ) display(best_parts_by_mean.head(10)) # %% tags=[] best_parts_by_median = ( consensus_clustering_results.groupby("k") .apply(lambda x: x.sort_values(_median_column, ascending=False).head(1)) .sort_values(_median_column, ascending=False)[["method", "k", _median_column]] ) display(best_parts_by_median.head(10)) # %% tags=[] with sns.plotting_context("talk", font_scale=0.75), sns.axes_style( "whitegrid", {"grid.linestyle": "--"} ): fig, ax = plt.subplots(figsize=(12, 6)) ax = sns.pointplot( data=best_parts_by_mean, x="k", y=_mean_column, ci=None, color=_indexes_colors[0], label="Mean", ) ax = sns.pointplot( data=best_parts_by_median, x="k", y=_median_column, ci=None, color=_indexes_colors[1], label="Median", ax=ax, ) ax.set_ylabel(f"Agreement with ensemble ({_selected_measure})") ax.set_xlabel("Number of clusters ($k$)") ax.set_xticklabels(ax.get_xticklabels(), rotation=45) plt.legend(labels=["Mean", "Median"]) plt.grid(True) plt.tight_layout() # %% [markdown] tags=[] # Both central tendency measures (the mean and the median) have the same behevior: higher agreement on lower/medium k values, and lower agreement on higher k values. # %% [markdown] tags=[] # # Which consensus method performs better? # %% [markdown] tags=[] # For this comparison, I take the partitions with an agreement higher than the 75th percentile. From this set, I count how many times each method won. # %% [markdown] tags=[] # ## Using best by mean # %% tags=[] _stats_data = best_parts_by_mean[_mean_column].describe() display(_stats_data) # %% tags=[] best_parts_by_mean[best_parts_by_mean[_mean_column] > _stats_data["75%"]][ "method" ].value_counts() # %% [markdown] tags=[] # SCC picked the "best partition" 14 times, whereas EAC (hierarhical clustering) did it only once. # %% [markdown] tags=[] # ## Using best by median # %% tags=[] _stats_data = best_parts_by_median[_median_column].describe() display(_stats_data) # %% tags=[] best_parts_by_median[best_parts_by_median[_median_column] > _stats_data["75%"]][ "method" ].value_counts() # %% [markdown] tags=[] # If we use the "best partitions by median", EAC (HC) picked the best one 5 times, whereas SCC did it 10 times. # %% [markdown] tags=[] # **CONCLUSION:** we select SCC as the method for follow up analysis. # %% [markdown] tags=[] # # Select best partition per k # %% tags=[] _selected_stat = "Median" _measure_col = _median_column # %% tags=[] best_parts = ( consensus_clustering_results[ consensus_clustering_results["method"].str.startswith("scc_") ] .groupby("k") .apply(lambda x: x.sort_values(_measure_col, ascending=False).head(1)) .sort_values(_measure_col, ascending=False)[ ["method", "k", "partition", _measure_col] ] ) # %% tags=[] best_parts = best_parts.set_index("k") # %% tags=[] best_parts.shape # %% tags=[] # show partitions with top values best_parts.head(10) # %% tags=[] best_parts.sort_values("k") # %% [markdown] tags=[] # ## Select partitions with highest agreement # %% [markdown] tags=[] # We do not expect all partitions with different `k` to be good ones. Thus, here I select the partitions with an ensemble agreement that pass a relative high threshold (75th percentile). # %% tags=[] best_parts_stats = best_parts[_measure_col].describe() display(best_parts_stats) # %% tags=[] best_threshold = best_parts_stats["75%"] best_threshold_description = "75th percentile" display(best_threshold) best_parts = best_parts.assign( selected=best_parts[_measure_col].apply(lambda x: x >= best_threshold) ) # %% tags=[] best_parts.shape # %% tags=[] best_parts.head() # %% [markdown] tags=[] # ## Save best partitions per k # %% tags=[] output_file = Path(CONSENSUS_CLUSTERING_DIR, "best_partitions_by_k.pkl").resolve() display(output_file) # %% tags=[] best_parts.to_pickle(output_file) # %% [markdown] tags=[] # # Plot of selected best partitions # %% tags=[] plot_data = best_parts.reset_index() display(plot_data.head(5)) # %% tags=[] with sns.plotting_context("talk", font_scale=0.75), sns.axes_style( "whitegrid", {"grid.linestyle": "--"} ), sns.color_palette("muted"): current_palette = iter(sns.color_palette()) fig, ax = plt.subplots(figsize=(12, 6)) ax = sns.pointplot( data=plot_data, x="k", y=_measure_col, color=next(current_palette) ) ax.axhline( best_threshold, ls="--", color=next(current_palette), label=best_threshold_description, ) ax.set_ylabel(f"Agreement with ensemble\n({_selected_stat} {_selected_measure})") ax.set_xlabel("Number of clusters ($k$)") ax.set_xticklabels(ax.get_xticklabels(), rotation=45) plt.legend() plt.grid(True) plt.tight_layout() # %% [markdown] tags=[] # The horizontal line in the plot is the median of the average agreement value; partitions above that line are marked as selected for downstream analysis # %% tags=[] # this list shows the selected final partitions, and which methods achieved the highest agreement plot_data[plot_data["selected"]].sort_values("k") # %% [markdown] tags=[] # From the two evidence accumulation approaches (EAC) we are using, the spectral clustering based one does it better for almost all `k` values, whereas the hierarchical clustering based approach seems to do a little bit better for lower `k`. # %% tags=[]
nilq/baby-python
python
# # inputs outputs # single sin # simple sim # solution so # a a # class Node: def __init__(self, val): self.val = val self.children = [0] * 26 self.is_end = False self.word_count = 1 def get_unique_prefixes(words): root = Node(0) root.word_count += 1 cur = root for word in words: cur = root for ch in word: index = ord(ch) - 97 if cur.children[index] == 0: n = Node(ch) cur.children[index] = n cur = n else: cur.word_count += 1 cur = cur.children[index] cur.is_end = True # print root.children[ord('s')-97].word_count output = [] for word in words: prefix = '' cur = root for ch in word: prefix += ch if cur.word_count <= 1: break cur = cur.children[ord(ch) - 97] output.append(prefix) return output words = ['single', 'simple', 'solution', 'a'] print get_unique_prefixes(words) words = ['single', 'simple'] print get_unique_prefixes(words) words = ['abcd', 'geft', 'aaaa'] print get_unique_prefixes(words) words = ['abcd', 'abcx'] print get_unique_prefixes(words) # /usr/bin/python /Users/harsh/giths634/algorithms/python/test.py # ['si', 'si', 'so', 'a'] # ['si', 'si'] # ['a', 'g', 'a'] # ['abc', 'abc']
nilq/baby-python
python
#!/usr/bin/python3 import json import os from ws_sdk import WS, ws_constants, ws_utilities import logging import sys SCANNER_ID = "ws-gl-int" LICENSE_SCHEMA_V = "2.1" DEPENDENCY_SCHEMA_V = "14.0.2" DEPENDENCY = "dependency" DEPENDENCY_ALERTS_BASED = "dependency_alert_based" LICENSE = "license" VUL_DB_URL = "https://www.whitesourcesoftware.com/vulnerability-database" IS_DEBUG = True if os.environ.get("DEBUG") else False CONCAT_SCOPE_NAME = False LOG_LEVEL = logging.DEBUG if IS_DEBUG else logging.INFO logging.basicConfig(level=LOG_LEVEL, stream=sys.stdout) args = None def parse_args(): import argparse parser = argparse.ArgumentParser(description='WS to GitLab convertor') parser.add_argument('-u', '--userKey', help="WS User Key", dest='ws_user_key', required=True) parser.add_argument('-k', '--token', help="WS Project Token", dest='ws_token', required=True) parser.add_argument('-a', '--wsUrl', help="WS URL", dest='ws_url', default="saas") parser.add_argument('-t', '--conversionType', help="Conversion Type", choices=[LICENSE, DEPENDENCY, DEPENDENCY_ALERTS_BASED], dest='conv_type', required=True) parser.add_argument('-o', '--outputDir', help="Output Dir", dest='output_dir', default=".") return parser.parse_args() def validate_json(json_to_validate: dict): from jsonschema import validate, exceptions as json_exceptions import requests import json if args.conv_type == LICENSE: url = 'https://gitlab.com/gitlab-org/security-products/analyzers/license-finder/-/raw/main/spec/fixtures/schema/v2.1.json' elif args.conv_type.startswith(DEPENDENCY): url = 'https://gitlab.com/gitlab-org/security-products/security-report-schemas/-/raw/master/dist/dependency-scanning-report-format.json' resp = requests.get(url=url) json_schema = json.loads(resp.text) try: validate(instance=json_to_validate, schema=json_schema) except json_exceptions.SchemaError or json_exceptions.ValidationError: logging.exception("Validating failed JSON with schema") return False return True def convert_license(conn): def get_lib_locations(library_location, library): locations = library_location.get('locations') if len(locations): if len(locations) > 1: logging.warning(f"Found {len(library_location['locations'])} locations for lib {library['name']}. Using the first one") loc_name = locations[0].get('path') else: logging.warning(f"No locations found for lib {library['name']} ") loc_name = None return loc_name def get_package_manager(language): pkg_man = ws_utilities.get_package_managers_by_language(language) return "unknown" if not pkg_man else pkg_man[0] licenses = {} dependencies = [] libs = conn.get_licenses(token=args.ws_token, full_spdx=True) libs_loc = ws_utilities.convert_dict_list_to_dict(conn.get_library_location(token=args.ws_token), 'keyUuid') for lib in libs: lib_loc = libs_loc[lib['keyUuid']] lics_lib = lib['licenses'] curr_licenses = [] for lic in lics_lib: if lic.get('spdx_license_dict'): gl_lic = {'id': lic['spdx_license_dict']['licenseId'], 'name': lic['spdx_license_dict']['name'], 'url': lic['url']} licenses[gl_lic['id']] = gl_lic curr_licenses.append(lic['spdx_license_dict']['licenseId']) else: logging.warning(f"SPDX data is missing on library {lib['name']} - license: {lic['name']}") dependencies.append({'name': lib['name'], 'version': lib.get('version'), # TODO: ADD METHOD in ws_utilities to break LIB-1.2.3.SFX to GAV 'package_manager': get_package_manager(lib['type']).capitalize(), 'path': get_lib_locations(lib_loc, lib), 'licenses': sorted(curr_licenses)}) return {'version': LICENSE_SCHEMA_V, 'licenses': sorted(list(licenses.values()), key=lambda k: k['id']), 'dependencies': dependencies} def convert_dependency(conn): def convert_to_gl_vul(vulnerability, inventory): def get_solution(): top_fix = vulnerability.get('topFix') if top_fix: ret_fix = vulnerability.get('fixResolutionText', top_fix['fixResolution']) else: ret_fix = "Fix unknown" logging.info(f"No fix found for {vulnerability['name']}") logging.debug(f"Found fix to vulnerability: {vulnerability['name']} Fix: {ret_fix}") return ret_fix name = f"{vulnerability['name']}:{inventory['artifactId']}:{inventory['version']}" url = f"{VUL_DB_URL}/{vulnerability['name']}" gl_vul = {"category": "dependency_scanning", "name": name, "message": f"{vulnerability['name']} in {inventory['name']} - Detected by WhiteSource", "description": vulnerability['description'], "cve": vulnerability['name'], "severity": vulnerability['severity'].capitalize(), "confidence": "Confirmed", "solution": get_solution(), "scanner": {"id": SCANNER_ID, "name": "WhiteSource"}, "location": {"file": inventory['name'], "dependency": {"version": inventory['version'], "package": {"name": inventory['artifactId']}}}, "identifiers": [{"type": "whitesource", "name": name, "value": name, "url": url}], "links": [{"url": url}]} return gl_vul vulnerabilities = [] if args.conv_type == DEPENDENCY: vulnerabilities = conn.get_vulnerability(token=args.ws_token) elif args.conv_type == DEPENDENCY_ALERTS_BASED: security_alerts = conn.get_alerts(alert_type=ws_constants.AlertTypes.SECURITY_VULNERABILITY) for sec_alert in security_alerts: vul = sec_alert['vulnerability'] vul['library'] = sec_alert['library'] vulnerabilities.append(vul) inventory_dict = ws_utilities.convert_dict_list_to_dict(conn.get_inventory(token=args.ws_token), 'keyUuid') gl_vuls = [] for vul in vulnerabilities: lib_uuid = vul['library']['keyUuid'] gl_vul = convert_to_gl_vul(vul, inventory_dict[lib_uuid]) gl_vuls.append(gl_vul) return {'version': DEPENDENCY_SCHEMA_V, 'vulnerabilities': gl_vuls, 'remediations': [], 'dependency_files': []} def main(): global args args = parse_args() ws_conn = WS(url=args.ws_url, user_key=args.ws_user_key, token=args.ws_token, token_type=ws_constants.PROJECT) logging.info(f"Generating {args.conv_type} report") if args.conv_type == LICENSE: ret = convert_license(ws_conn) filename = "gl-license-scanning-report.json" elif args.conv_type.startswith(DEPENDENCY): ret = convert_dependency(ws_conn) filename = "gl-dependency-scanning-report.json" if IS_DEBUG: validate_json(ret) if CONCAT_SCOPE_NAME: scope_name = ws_conn.get_scope_name_by_token(token=args.ws_token) for char in [':', '#', '*', '\\']: scope_name = scope_name.replace(char, '_') filename = f"{scope_name}-{filename}" full_path = os.path.join(args.output_dir, filename) logging.debug(f"Saving file to: {full_path}") with open(full_path, 'w') as fp: fp.write(json.dumps(ret)) return ret, filename if __name__ == '__main__': main()
nilq/baby-python
python
# # Copyright (c) 2014, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. # # import datetime import pytest import time import unittest from atc_thrift.ttypes import TrafficControlledDevice from atcd.access_manager import AccessManager from atc_thrift.ttypes import AccessToken from atcd.access_manager import AccessTokenException from atcd.access_manager import AtcdTOTP from mock import Mock INTERVAL = 60 @pytest.fixture def control_allowed(): return { ('1.1.1.1', '2.2.2.1'): 20, ('1.1.1.2', '2.2.2.2'): 5, ('1.1.1.1', '2.2.2.4'): 15, ('1.1.1.1', '2.2.2.5'): 5, ('1.1.1.3', '2.2.2.1'): 5, ('1.1.1.4', '2.2.2.1'): 15, } @pytest.fixture def ip_to_totp_map(): return { '2.2.2.1': { 'totp': AtcdTOTP(s='12345', interval=60), 'duration': 15, }, '2.2.2.2': { 'totp': AtcdTOTP(s='12345', interval=60), 'duration': 5, }, } @pytest.fixture def am(): return AccessManager() @pytest.fixture def fake_am(am, control_allowed, ip_to_totp_map): am._control_allowed = control_allowed am._ip_to_totp_map = ip_to_totp_map return am @pytest.fixture def fail_verify(monkeypatch): monkeypatch.setattr(AtcdTOTP, 'verify', Mock(return_value=False)) @pytest.fixture def succeed_verify(monkeypatch): monkeypatch.setattr(AtcdTOTP, 'verify', Mock(return_value=True)) def _make_device(controlling, controlled=None): return TrafficControlledDevice( controllingIP=controlling, controlledIP=controlled ) def _make_token(token): return AccessToken(token=token) class TestAtcdTOTP(unittest.TestCase): interval = 30 s = 'wrn3pqx5uqxqvnqr' def test_valid_until(self): t = 1297553958 endtime30s = 1297553970 endtime10s = 1297553960 with Timecop(t): totp = AtcdTOTP(interval=30, s=self.s) dt = datetime.datetime.fromtimestamp(t) self.assertEqual( datetime.datetime.fromtimestamp(endtime30s), totp.valid_until(dt) ) totp = AtcdTOTP(interval=10, s=self.s) dt = datetime.datetime.fromtimestamp(t) self.assertEqual( datetime.datetime.fromtimestamp(endtime10s), totp.valid_until(dt) ) assert True class TestAccessManager(): def setup_method(self, method): def mocktime(): return 10 self._old_time = time.time time.time = mocktime def teardown_method(self, method): time.time = self._old_time def test_generate_token(self, fake_am): l = len(fake_am._ip_to_totp_map.keys()) fake_am.generate_token('1.1.1.1', 10) assert len(fake_am._ip_to_totp_map.keys()) == l+1 fake_am.generate_token('1.1.1.1', 30) assert len(fake_am._ip_to_totp_map.keys()) == l+1 def test_controlled_by_existing(self, fake_am): controlling_by = fake_am.get_devices_controlled_by('1.1.1.1') assert len(controlling_by) == 2 def test_controlled_by_non_existent(self, fake_am): controlling_by = fake_am.get_devices_controlled_by('3.3.3.3') assert len(controlling_by) == 0 def test_controlling_existing(self, fake_am): controlling_by = fake_am.get_devices_controlling('2.2.2.1') assert len(controlling_by) == 2 def test_controlling_non_existent(self, fake_am): controlling_by = fake_am.get_devices_controlling('3.3.3.3') assert len(controlling_by) == 0 def test_access_allowed_controlling_ip_none(self, fake_am): # controllingIP = None assert not fake_am.access_allowed(_make_device(None, '2.2.2.5')) # Allowed in non-secure mode fake_am.secure = False assert fake_am.access_allowed(_make_device(None, '2.2.2.5')) def test_access_allowed_valid(self, fake_am): # valid entry dev = TrafficControlledDevice( controllingIP='1.1.1.1', controlledIP='2.2.2.1' ) assert fake_am.access_allowed(dev) def test_access_allowed_non_existent(self, fake_am): # entry does not exist dev = TrafficControlledDevice( controllingIP='1.1.1.1', controlledIP='2.2.2.2' ) assert not fake_am.access_allowed(dev) # Allowed in non-secure mode fake_am.secure = False assert fake_am.access_allowed(dev) def test_access_allowed_expired(self, fake_am): # expired entry dev = TrafficControlledDevice( controllingIP='1.1.1.1', controlledIP='2.2.2.5' ) assert not fake_am.access_allowed(dev) # Allowed in non-secure mode fake_am.secure = False assert fake_am.access_allowed(dev) def test_access_allowed_self(self, fake_am): # expired entry dev = TrafficControlledDevice( controllingIP='1.1.1.1', controlledIP='1.1.1.1' ) assert fake_am.access_allowed(dev) def test_validate_token_valid(self, fake_am, succeed_verify): fake_am.validate_token( _make_device('1.1.1.1', '2.2.2.1'), _make_token('12345'), ) def test_validate_token_invalid(self, fake_am, fail_verify): with pytest.raises(AccessTokenException) as excinfo: fake_am.validate_token( _make_device('1.1.1.1', '2.2.2.1'), _make_token('12344'), ) assert str(excinfo.value) == 'Access denied for device pair' # FIXME, this is not really handling expiration properly def test_validate_token_expired_valid(self, fake_am, fail_verify): with pytest.raises(AccessTokenException) as excinfo: fake_am.validate_token( _make_device('1.1.1.2', '2.2.2.2'), _make_token('12345'), ) assert str(excinfo.value) == 'Access denied for device pair' # FIXME, this is not really handling expiration properly def test_validate_token_expired_invalid(self, fake_am, fail_verify): with pytest.raises(AccessTokenException) as excinfo: fake_am.validate_token( _make_device('1.1.1.2', '2.2.2.2'), _make_token('12344'), ) assert str(excinfo.value) == 'Access denied for device pair' def test_validate_token_non_existent(self, fake_am): with pytest.raises(AccessTokenException) as excinfo: fake_am.validate_token( _make_device('1.1.1.2', '2.2.2.0'), _make_token('12344'), ) assert str(excinfo.value) == \ '''That remote device hasn't generated a code yet''' # Directly copied from https://github.com/nathforge/pyotp/blob/master/test.py class Timecop(object): """ Half-assed clone of timecop.rb, just enough to pass our tests. """ def __init__(self, freeze_timestamp): self.freeze_timestamp = freeze_timestamp def __enter__(self): self.real_datetime = datetime.datetime datetime.datetime = self.frozen_datetime() def __exit__(self, type, value, traceback): datetime.datetime = self.real_datetime def frozen_datetime(self): class FrozenDateTime(datetime.datetime): @classmethod def now(cls): return cls.fromtimestamp(timecop.freeze_timestamp) timecop = self return FrozenDateTime
nilq/baby-python
python
# Copyright 2020 Francesco Ceccon # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List import numpy as np import pyomo.environ as pe from pyomo.core.base.var import _GeneralVarData from pyomo.core.expr.calculus.derivatives import differentiate from pooling_network.network import Network from pooling_network.pooling import ( compute_beta_kl_bounds, compute_gamma_kl_bounds, problem_pool_output_qualities, ) from pooling_network.inequalities import _generate_pooling_inequalities def _gradient_cut_if_violated(block: pe.Block, expr, atol: float, diff_vars: List[_GeneralVarData]): expr_value = pe.value(expr) if True or not np.isclose(expr_value, 0.0, atol=atol) and expr_value > 0: diff_map = differentiate(expr, wrt_list=diff_vars) cut_expr = pe.value(expr) + sum(diff_map[i] * (v - pe.value(v)) for i, v in enumerate(diff_vars)) return cut_expr <= 0 return None def _generate_valid_cuts(block: pe.Block, parent: pe.Block, pool_name: str, output_name: str, quality_name: str, problem: Network, violation_threshold=1e-5): s = block.s[pool_name, output_name] y = block.y[output_name, quality_name, pool_name] t = block.t[output_name, quality_name, pool_name] var_cone = block.cut_var_cone[output_name, quality_name, pool_name] var_v = block.cut_var_v[output_name, quality_name, pool_name] gamma_lower, gamma_upper = compute_gamma_kl_bounds( pool_name, output_name, quality_name, problem ) beta_lower, beta_upper = compute_beta_kl_bounds( pool_name, output_name, quality_name, problem ) cut_info = { 'pool': pool_name, 'output': output_name, 'quality': quality_name, 'gamma_lower': gamma_lower, 'gamma_upper': gamma_upper, 'beta_lower': beta_lower, 'beta_upper': beta_upper, } if gamma_lower is None or gamma_upper is None or beta_lower is None or beta_upper is None: return assert gamma_lower is not None and gamma_upper is not None assert beta_lower is not None and beta_upper is not None if beta_lower < 0: # Generate cut based on Equation 15 var_v_value = pe.value(var_v, exception=False) s_value = pe.value(s, exception=False) var_cone_value = pe.value(var_cone, exception=False) if var_v_value is None or s_value is None or var_cone_value is None: return if np.isclose(s_value * var_cone_value, 0.0): return viol_cone = var_v_value - np.sqrt(s_value * var_cone_value) cut_info['type'] = 'cone' cut_info['viol'] = viol_cone if viol_cone > violation_threshold: # add cut for var_v^2 - s*var_cone <= 0 prod_value = s_value * var_cone_value # Deal with numerical issues near the top of the cone if s_value > 0.001 or prod_value > 1e-6: s_sep_value = s_value else: s_sep_value = 0.001001 if var_cone_value > 0.001 or prod_value > 1e-6: var_cone_sep_value = var_cone_value else: var_cone_sep_value = 0.001001 # Recompute prod_val with new values prod_value = s_sep_value * var_cone_sep_value if prod_value > 1e-6: # Add cut! eq_value = var_v_value - np.sqrt(prod_value) deq_dvar_v = 1.0 deq_ds = -0.5 * var_cone_value * (1/np.sqrt(prod_value)) deq_dvar_cone = -0.5 * s_value * (1/np.sqrt(prod_value)) expr = ( eq_value + deq_dvar_v * (var_v - var_v_value) + deq_ds * (s - s_value) + deq_dvar_cone * (var_cone - var_cone_value) ) yield expr <= 0, cut_info if beta_upper > 0 and gamma_lower < 0 and pe.value(y) > 0: # Generate cut based on Equation 18 s_value = pe.value(s, exception=False) t_value = pe.value(t, exception=False) y_value = pe.value(y, exception=False) var_v_value = pe.value(var_v, exception=False) if s_value is None or t_value is None or y_value is None or var_v_value is None: return viol = ( beta_upper * t_value + (gamma_upper - gamma_lower)*(beta_upper * s_value + y_value) - (beta_upper - gamma_lower)*var_v_value - gamma_lower * ( (var_v_value**2.0) / (y_value + var_v_value) ) - beta_upper * gamma_upper ) cut_info['type'] = 'nonlinear' cut_info['viol'] = viol if viol > violation_threshold: # Add cut! eq_value = ( beta_upper*t_value + (gamma_upper - gamma_lower)*(beta_upper*s_value + y_value) - (beta_upper - gamma_lower)*var_v_value - gamma_lower * ( (var_v_value**2.0) / (y_value + var_v_value) ) ) deq_dt = beta_upper deq_ds = (gamma_upper - gamma_lower)*beta_upper deq_dy = (gamma_upper - gamma_lower) - (-gamma_upper*( (var_v_value**2.0) / (y_value + var_v_value))) deq_dvar_v = -(beta_upper - gamma_lower) - gamma_lower*( 2*var_v_value/(y_value + var_v_value) - (var_v_value**2.0)/(y_value + var_v_value) ) expr = ( eq_value + deq_dt * (t - t_value) + deq_ds * (s - s_value) + deq_dy * (y - y_value) + deq_dvar_v * (var_v - var_v_value) - beta_upper*gamma_upper ) yield expr <= 0, cut_info def generate_valid_cuts(block: pe.Block, parent: pe.Block, problem: Network, violation_threshold=1e-5): for pool_name, output_name, quality_name in problem_pool_output_qualities(problem): yield from _generate_valid_cuts(block, parent, pool_name, output_name, quality_name, problem, violation_threshold) def add_valid_cuts(block: pe.Block, parent: pe.Block, problem: Network, violation_threshold: float = 1e-5, add_inequalities: bool = False): all_cuts_info = [] for cut, cut_info in generate_valid_cuts(block, parent, problem, violation_threshold): block._cuts.add(cut) all_cuts_info.append(cut_info) if add_inequalities: for pool_name, output_name, quality_name in problem_pool_output_qualities(problem): for cut, cut_info in _generate_pooling_inequalities(block, parent, pool_name, output_name, quality_name, problem, violation_threshold=violation_threshold): block._cuts.add(cut) all_cuts_info.append(cut_info) return all_cuts_info
nilq/baby-python
python
from .UserRepository import destroy, get_all, get_one_user from .AuthenticationRepository import get_one, create, forgot_password, reset_password from .WorkoutRepository import get_all, create, get_one, delete, update_one from .ExerciseRepository import get_all, get_one, create, update_one, delete from .SessionRepository import get_all, get_one_session, create
nilq/baby-python
python
import pygame, sys from pygame.locals import QUIT pygame.init() display_surface = pygame.display.set_mode((400, 300)) font = pygame.font.Font(pygame.font.get_default_font(), 32) text = font.render('Hello World', True, (0, 0, 0)) textRect = text.get_rect() while True: display_surface.fill((255, 255, 255)) display_surface.blit(text, textRect) for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit() pygame.display.update()
nilq/baby-python
python
import sys import matplotlib.pyplot as plt import numpy as np import pandas as pd from joblib import Parallel, delayed import cocpit import cocpit.pic as pic sys.path.append("../") import multiprocessing # noqa def create_ellipse(campaign, desired_size, file): if campaign == "OLYMPEX": image = pic.Image("../cpi_data/campaigns/" + campaign + "/single_imgs2/", file) else: image = pic.Image("../cpi_data/campaigns/" + campaign + "/single_imgs/", file) image.resize_stretch(desired_size) image.find_contours() return image.create_ellipse() df_all = pd.DataFrame() campaigns = [ "ARM", "CRYSTAL_FACE_NASA", "CRYSTAL_FACE_UND", "ICE_L", "MIDCIX", "MPACE", "OLYMPEX", ] desired_size = 1000 num_cores = multiprocessing.cpu_count() phi_ellipses = [] campaign_names = [] for campaign in campaigns: print(campaign) df = pd.read_csv("../final_databases/no_mask/" + campaign + ".csv") df = df[(df["classification"] != "blurry") & (df["classification"] != "sphere")] phi_ellipse = Parallel(n_jobs=10)( delayed(create_ellipse)(campaign, desired_size, file) for file in df["filename"] ) df.insert(16, "phi_ellipse", phi_ellipse) df.insert(0, "campaign", [campaign] * len(df)) df_all = df_all.append(df) print("done") df_all.to_csv("../final_databases/no_mask/all_campaigns.csv", index=False)
nilq/baby-python
python
from rest_framework.permissions import BasePermission class SearchPermissions(BasePermission): """ DRF permission class that checks that the user has at least one of the permissions in the view_permissions attribute on the search app. """ is_export = False def has_permission(self, request, view): """ Return `True` if permission is granted `False` otherwise. """ return has_permissions_for_app(request, view.search_app, is_export=self.is_export) class SearchAndExportPermissions(SearchPermissions): """ DRF permission class that checks that the user has at least one of the permissions in the view_permissions attribute (on the search app), and additionally has the permission in export_permission attribute (on the search app). """ is_export = True def has_permissions_for_app(request, search_app, is_export=False): """ Checks if the user has permission to search for records related to a search app. This is done by checking if the user has at least one of the permissions in the view_permissions attribute on the search app. If is_export is True, the user must also have the permission in the export_permission attribute on the search app. """ user = request.user has_view_permission = user and any( user.has_perm(permission) for permission in search_app.view_permissions ) if is_export: return has_view_permission and user.has_perm(search_app.export_permission) return has_view_permission
nilq/baby-python
python
#!/usr/bin/env python # # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ """Simple, extendable, mockable Python client for Google Storage. This module only depends on standard Python libraries. It is intended to provide a set of base client classes with all critical features implemented. Advanced features can be added by extending the classes. Or, it can be used as-is. Installation: Put this script in your python path. Usage: 1) Get a Google Storage account and credentials. 2) Put this script in your Python path. 2) Decide how you will store your credentials (private file, environment variables, etc...). 3) Create a GsClient or child instance, passing credentials to constructor. 4) Use the relevant functions on the client URL Encoding: Users of this module do not need to URL encode/decode any request arguments or response results. Object names and query parameters may contain characters that are illegal URL characters. So, all object name and query parameter values are percent encoded by this module before sending the request. This is important to understand since you do not want to encode your strings twice. It is also important to understand that all object names and prefixes found in ListBucketResult responses will not be encoded. Handling Errors: Google Storage service errors will be raised as GsError exceptions. Other connection errors may get raised as httplib.HTTPException errors. Windows Considerations: When opening files, you must specify binary mode, like this: infile = open(filename, 'rb') outfile = open(filename, 'wb') Example where credentials are in GS_ACCESS and GS_SECRET env vars: $ python >>> import os >>> import gslite >>> gs_access = os.environ['GS_ACCESS'] >>> gs_secret = os.environ['GS_SECRET'] >>> bucket = 'my_super_cool_bucket_name' >>> filename = 'hello.txt' >>> client = gslite.GsClient(access_key=gs_access, secret=gs_secret) >>> client.put_bucket(bucket) >>> infile = open(filename) >>> client.put_object(bucket, filename, infile) >>> infile.close() >>> client.get_bucket(bucket).get_keys() ['hello.txt'] >>> client.delete_object(bucket, filename) >>> client.delete_bucket(bucket) """ __version__ = '1.0' import base64 import hashlib import hmac import httplib import logging import os import StringIO import time import urllib import urlparse import xml.dom.minidom # Success and retryable status codes. REDIRECT_CODES = (301, 302, 303, 307) DEFAULT_SUCCESS_CODES = (200,) DEFAULT_RETRYABLE_CODES = (408, 500, 502, 503, 504) GET_OBJECT_SUCCESS_CODES = (200, 206) DEL_BUCKET_SUCCESS_CODES = (204,) DEL_BUCKET_RETRYABLE_CODES = (404, 408, 409, 500, 502, 503, 504) DEL_OBJECT_SUCCESS_CODES = (204,) class GsError(Exception): """Base error for all client errors. Instance data: msg: error message operations: list of operations associated with error """ def __init__(self, msg, operations): """GsError constructor. Args: msg: message string operations: list of operations associated with error. """ self.msg = msg self.operations = operations def __str__(self): """Convert instance to loggable string.""" s = StringIO.StringIO() s.write('GsError: %s' % self.msg) for i in xrange(len(self.operations)): s.write('\n\nOPERATION %d:' % i) s.write('\n%s' % self.operations[i]) return s.getvalue() class GsXmlBase(object): """Base XML oject parser/generator.""" @staticmethod def value_from_elems(elems): """Returns a child node text value in the last element in elems. Args: elems: A list of Element objects from the xml.dom.minidom module. Returns: String value of last node or empty string if not found. """ ret = '' if elems: child_nodes = elems[-1].childNodes if child_nodes: ret = child_nodes[-1].nodeValue return str(ret) @staticmethod def add_text_node(dom, parent_node, node_name, node_text): """Adds a simple text node to a parent node. Args: dom: dom object from xml.dom.minidom module. parent_node: Parent Node object from the xml.dom.minidom module. node_name: Name of new child node node_text: Text content of new node. """ elem = dom.createElement(node_name) text = dom.createTextNode(node_text) elem.appendChild(text) parent_node.appendChild(elem) class GsAccessControlList(GsXmlBase): """AccessControlList XML parser/generator. See the Google Storage API documentation for more information about the AccessControlList XML specification. Instance data: owner_id: owner google storage id as string owner_name: owner name as string entries: list of GsAccessControlList.Entry instances """ class Entry(object): """Entry class corresponding to like named element. Instance data: permission: permission as string ('READ', 'WRITE', etc...) scope_type: scope type as string ('UserById', etc...) scope_user_id: scope user google storage id as string scope_user_name: scope user name as string scope_email: scope user email address as string scope_domain: scope domain as string """ def __init__(self, permission='', scope_type='', scope_user_id='', scope_user_name='', scope_email='', scope_domain=''): """Entry Constructor. Args: permission: permission as string ('READ', 'WRITE', etc...) scope_type: scope type as string ('UserById', etc...) scope_user_id: scope user google storage id as string scope_user_name: scope user name as string scope_email: scope user email address as string scope_domain: scope domain as string """ self.permission = permission self.scope_type = scope_type self.scope_user_id = scope_user_id self.scope_user_name = scope_user_name self.scope_email = scope_email self.scope_domain = scope_domain def __init__(self, owner_id='', owner_name=''): """GsAccessControlList Constructor. Args: owner_id: owner google storage id as string owner_name: owner name as string """ self.owner_id = owner_id self.owner_name = owner_name self.entries = [] def add_entry(self, permission='', scope_type='', scope_user_id='', scope_user_name='', scope_email='', scope_domain=''): """Adds an entry to the acl. Args: permission: permission as string ('READ', 'WRITE', etc...) scope_type: scope type as string ('UserById', etc...) scope_user_id: scope user google storage id as string scope_user_name: scope user name as string scope_email: scope user email address as string scope_domain: scope domain as string """ self.entries.append(GsAccessControlList.Entry( permission=permission, scope_type=scope_type, scope_user_id=scope_user_id, scope_user_name=scope_user_name, scope_email=scope_email, scope_domain=scope_domain)) def parse_xml(self, xml_str): """Parses the given xml string to this object. Args: xml_str: AccessControlList XML as string """ self.owner_id = '' self.owner_name = '' self.entries = [] dom = xml.dom.minidom.parseString(xml_str) owner_elems = dom.getElementsByTagName('Owner') for owner_elem in owner_elems: self.owner_id = self.value_from_elems( owner_elem.getElementsByTagName('ID')) self.owner_name = self.value_from_elems( owner_elem.getElementsByTagName('Name')) entries_elems = dom.getElementsByTagName('Entries') for entries_elem in entries_elems: entry_elems = entries_elem.getElementsByTagName('Entry') for entry_elem in entry_elems: entry = GsAccessControlList.Entry() entry.permission = self.value_from_elems( entry_elem.getElementsByTagName('Permission')) scope_elems = entry_elem.getElementsByTagName('Scope') for scope_elem in scope_elems: entry.scope_type = scope_elem.getAttribute('type') entry.scope_user_id = self.value_from_elems( scope_elem.getElementsByTagName('ID')) entry.scope_user_name = self.value_from_elems( scope_elem.getElementsByTagName('Name')) entry.scope_email = self.value_from_elems( scope_elem.getElementsByTagName('EmailAddress')) entry.scope_domain = self.value_from_elems( scope_elem.getElementsByTagName('Domain')) self.entries.append(entry) def to_xml(self, pretty=False): """Translates this acl object to XML string. Args: pretty: if True, output will use dom.toprettyxml Returns: AccessControlList XML as string """ impl = xml.dom.minidom.getDOMImplementation() dom = impl.createDocument(None, 'AccessControlList', None) top_elem = dom.documentElement if self.owner_id or self.owner_name: owner_elem = dom.createElement('Owner') if self.owner_id: self.add_text_node(dom, owner_elem, 'ID', self.owner_id) if self.owner_name: self.add_text_node(dom, owner_elem, 'Name', self.owner_name) top_elem.appendChild(owner_elem) if self.entries: entries_elem = dom.createElement('Entries') for entry in self.entries: entry_elem = dom.createElement('Entry') if entry.permission: self.add_text_node(dom, entry_elem, 'Permission', entry.permission) if (entry.scope_type or entry.scope_user_id or entry.scope_user_name or entry.scope_email or entry.scope_domain): scope_elem = dom.createElement('Scope') if entry.scope_type: scope_elem.setAttribute('type', entry.scope_type) if entry.scope_user_id: self.add_text_node(dom, scope_elem, 'ID', entry.scope_user_id) if entry.scope_user_name: self.add_text_node(dom, scope_elem, 'Name', entry.scope_user_name) if entry.scope_email: self.add_text_node(dom, scope_elem, 'EmailAddress', entry.scope_email) if entry.scope_domain: self.add_text_node(dom, scope_elem, 'Domain', entry.scope_domain) entry_elem.appendChild(scope_elem) entries_elem.appendChild(entry_elem) top_elem.appendChild(entries_elem) if pretty: return dom.toprettyxml(indent=' ') return dom.toxml() class GsListAllMyBucketsResult(GsXmlBase): """ListAllMyBucketsResult XML parser. See the Google Storage API documentation for more information about the ListAllMyBucketsResult XML specification. Instance data: owner_id: owner google storage id as string owner_display_name: owner name as string bucket_list: list of GsListAllMyBucketsResult.Bucket instances """ class Bucket(object): """Bucket class corresponding to like named element. Instance data: name: bucket name as string creation_date: bucket creation date as string """ def __init__(self): """Bucket constructor.""" self.name = '' self.creation_date = '' def __init__(self): """GsListAllMyBucketsResult constructor.""" self.owner_id = '' self.owner_display_name = '' self.bucket_list = [] def parse_xml(self, xml_str): """Parses the given xml string to this object. Args: xml_str: ListAllMyBucketsResult XML as string """ self.owner_id = '' self.owner_display_name = '' self.bucket_list = [] dom = xml.dom.minidom.parseString(xml_str) owner_elems = dom.getElementsByTagName('Owner') for owner_elem in owner_elems: self.owner_id = self.value_from_elems( owner_elem.getElementsByTagName('ID')) self.owner_display_name = self.value_from_elems( owner_elem.getElementsByTagName('DisplayName')) buckets_elems = dom.getElementsByTagName('Buckets') for buckets_elem in buckets_elems: bucket_elems = buckets_elem.getElementsByTagName('Bucket') for bucket_elem in bucket_elems: bucket = GsListAllMyBucketsResult.Bucket() bucket.name = self.value_from_elems( bucket_elem.getElementsByTagName('Name')) bucket.creation_date = self.value_from_elems( bucket_elem.getElementsByTagName('CreationDate')) self.bucket_list.append(bucket) def get_bucket_names(self): """Returns the list of bucket names from self.bucket_list.""" return [b.name for b in self.bucket_list] class GsListBucketResult(GsXmlBase): """ListBucketResult XML parser. See the Google Storage API documentation for more information about the ListBucketResult XML specification. Instance data: name: bucket name as string prefix: prefix specified in request as string marker: marker specified in request as string is_truncated: "true" if all objects in bucket were returned contents_list: list of GsListBucketResult.Contents instances common_prefixes: list of <CommonPrefixes>.<Prefix> names as strings """ class Contents(object): """Contents class corresponding to like named element. Instance data: key: object name as string last_modified: time object last modified as string etag: object data etag value as string size: object size as string storage_class: object storage class as string owner_id: object owner google storage id as string owner_display_name: object owner name as string """ def __init__(self): """Contents constructor.""" self.key = '' self.last_modified = '' self.etag = '' self.size = '' self.storage_class = '' self.owner_id = '' self.owner_display_name = '' def __init__(self): """GsListBucketResult constructor.""" self.name = '' self.prefix = '' self.marker = '' self.is_truncated = '' self.contents_list = [] self.common_prefixes = [] def parse_xml(self, xml_str): """Parses the given xml string to this object. Args: xml_str: ListBucketResult XML as string """ self.contents_list = [] self.common_prefixes = [] dom = xml.dom.minidom.parseString(xml_str) self.name = self.value_from_elems(dom.getElementsByTagName('Name')) self.prefix = self.value_from_elems(dom.getElementsByTagName('Prefix')) self.marker = self.value_from_elems(dom.getElementsByTagName('Marker')) self.is_truncated = self.value_from_elems( dom.getElementsByTagName('IsTruncated')) contents_elems = dom.getElementsByTagName('Contents') for contents_elem in contents_elems: contents = GsListBucketResult.Contents() contents.key = self.value_from_elems( contents_elem.getElementsByTagName('Key')) contents.last_modified = self.value_from_elems( contents_elem.getElementsByTagName('LastModified')) contents.etag = self.value_from_elems( contents_elem.getElementsByTagName('ETag')) contents.size = self.value_from_elems( contents_elem.getElementsByTagName('Size')) contents.storage_class = self.value_from_elems( contents_elem.getElementsByTagName('StorageClass')) owner_elems = contents_elem.getElementsByTagName('Owner') for owner_elem in owner_elems: contents.owner_id = self.value_from_elems( owner_elem.getElementsByTagName('ID')) contents.owner_display_name = self.value_from_elems( owner_elem.getElementsByTagName('DisplayName')) self.contents_list.append(contents) common_prefixes_elems = dom.getElementsByTagName('CommonPrefixes') for common_prefixes_elem in common_prefixes_elems: prefix_elems = common_prefixes_elem.getElementsByTagName('Prefix') for prefix_elem in prefix_elems: self.common_prefixes.append(prefix_elem.childNodes[0].nodeValue) def get_keys(self): """Returns the list of object names found in self.contents_list.""" return [c.key for c in self.contents_list] class GsOperation(object): """Class to hold the important details of an HTTP request and response. Instance data: connection_host: host name connected to as string connection_port: host port connected to as int request_method: http request method ('GET', 'PUT', etc...) as string request_path_and_query: request URL path and query as string request_headers: request headers as dict response_status: response http status as int response_headers: response headers as dict response_error_body: response error body as string """ def __init__(self): """GsOperation constructor.""" self.connection_host = '' self.connection_port = 80 self.request_method = '' self.request_path_and_query = '' self.request_headers = None self.response_status = 0 self.response_headers = None self.response_error_body = None def __str__(self): """Convert instance to loggable string.""" s = StringIO.StringIO() s.write('REQUEST:') s.write('\nSent to host: %s:%d' % (self.connection_host, self.connection_port)) s.write('\n%s %s' % (self.request_method, self.request_path_and_query)) if self.request_headers: for k, v in self.request_headers.iteritems(): s.write('\n%s: %s' % (k, v)) s.write('\nRESPONSE:') s.write('\n%d' % self.response_status) if self.response_headers: for k, v in self.response_headers.iteritems(): s.write('\n%s: %s' % (k, v)) if self.response_error_body: s.write('\n') s.write(self.response_error_body) return s.getvalue() class GsClient(object): """Google Storage client. Instance data: access_key: google storage access key as string for authentication secret: google storage secret key as string for authentication host: google storage host as string proxy_host: optional proxy host proxy_port: optional proxy port auth_id: authentication type as string max_retries: max num retries for retryable errors max_redirects: max num redirects to follow operations: list of GsOperation instances for most recent request Note that each retry or redirection will append to this list. backoff_exponent: current backoff exponent during failures """ def __init__(self, access_key=None, secret=None, host='commondatastorage.googleapis.com', proxy_host=None, proxy_port=80, auth_id='GOOG1', max_retries=5, max_redirects=10): """GsClient constructor. Args: access_key: google storage access key as string for authentication secret: google storage secret key as string for authentication host: google storage host as string proxy_host: optional proxy host proxy_port: optional proxy port auth_id: authentication type as string max_retries: max num retries for retryable errors max_redirects: max num redirects to follow """ self.access_key = access_key self.secret = secret self.host = host self.proxy_host = proxy_host self.proxy_port = proxy_port self.auth_id = auth_id self.max_retries = max_retries self.max_redirects = max_redirects self.operations = [] self.backoff_exponent = -1 def get_service(self): """GET Service. Returns: GsListAllMyBucketsResult instance """ outfile = StringIO.StringIO() self.send_request('GET', outfile=outfile) result = GsListAllMyBucketsResult() result.parse_xml(outfile.getvalue()) return result def get_bucket(self, bucket, query_parameters=None): """GET Bucket. Args: bucket: bucket name as string query_parameters: query parameters as dict Returns: GsListBucketResult instance """ outfile = StringIO.StringIO() self.send_request('GET', bucket=bucket, outfile=outfile, query_parameters=query_parameters) result = GsListBucketResult() result.parse_xml(outfile.getvalue()) return result def get_bucket_acl(self, bucket): """GET Bucket ACL. Args: bucket: bucket name as string Returns: GsAccessControlList instance """ outfile = StringIO.StringIO() self.send_request('GET', bucket=bucket, outfile=outfile, query_parameters={'acl': None}) acl = GsAccessControlList() acl.parse_xml(outfile.getvalue()) return acl def get_object(self, bucket, key, outfile, extra_headers=None, query_parameters=None, chunk_size=0): """GET Object. Args: bucket: bucket name as string key: object name as string outfile: an open file-like object Only success responses will be written to this file. Error resonses will be found in the operation objects extra_headers: optional request headers as dict query_parameters: optional query parameters as dict chunk_size: size of each socket read (default of 0 = read all) """ self.send_request('GET', bucket=bucket, key=key, outfile=outfile, extra_headers=extra_headers, query_parameters=query_parameters, chunk_size=chunk_size, success_status_codes=GET_OBJECT_SUCCESS_CODES) def get_object_acl(self, bucket, key): """GET Object ACL. Args: bucket: bucket name as string key: object name as string Returns: GsAccessControlList instance """ outfile = StringIO.StringIO() self.send_request('GET', bucket=bucket, key=key, outfile=outfile, query_parameters={'acl': None}) acl = GsAccessControlList() acl.parse_xml(outfile.getvalue()) return acl def head_object(self, bucket, key, extra_headers=None): """HEAD Object. Args: bucket: bucket name as string key: object name as string extra_headers: optional request headers as dict Returns: response headers as dict """ self.send_request('HEAD', bucket=bucket, key=key, extra_headers=extra_headers) return self.operations[-1].response_headers def put_bucket(self, bucket, infile=None, extra_headers=None, query_parameters=None): """PUT Bucket. Args: bucket: bucket name as string infile: an open file-like object data in this file will be written to the http socket extra_headers: optional request headers as dict query_parameters: optional query parameters as dict """ self.send_request('PUT', bucket=bucket, infile=infile, extra_headers=extra_headers, query_parameters=query_parameters) def put_bucket_acl(self, bucket, acl): """PUT Bucket ACL. Args: bucket: bucket name as string acl: GsAccessControlList instance """ infile = StringIO.StringIO(acl.to_xml()) self.put_bucket(bucket, infile=infile, query_parameters={'acl': None}) def put_object(self, bucket, key, infile, extra_headers=None, query_parameters=None, chunk_size=0): """PUT Object. Args: bucket: bucket name as string key: object name as string infile: an open file-like object data in this file will be written to the http socket extra_headers: optional request headers as dict query_parameters: optional query parameters as dict chunk_size: size of each socket write (default of 0 = write all) """ self.send_request('PUT', bucket=bucket, key=key, infile=infile, extra_headers=extra_headers, query_parameters=query_parameters, chunk_size=chunk_size) def put_object_acl(self, bucket, key, acl): """PUT Object ACL. Args: bucket: bucket name as string key: object name as string acl: GsAccessControlList instance """ infile = StringIO.StringIO(acl.to_xml()) self.put_object(bucket, key, infile, query_parameters={'acl': None}) def delete_bucket(self, bucket): """DELETE Bucket. Args: bucket: bucket name as string """ self.send_request( 'DELETE', bucket=bucket, success_status_codes=DEL_BUCKET_SUCCESS_CODES, retryable_status_codes=DEL_BUCKET_RETRYABLE_CODES) def delete_object(self, bucket, key): """DELETE Object. Args: bucket: bucket name as string key: object name as string """ self.send_request('DELETE', bucket=bucket, key=key, success_status_codes=DEL_OBJECT_SUCCESS_CODES) def send_request(self, http_method, bucket=None, key=None, infile=None, outfile=None, extra_headers=None, query_parameters=None, chunk_size=0, success_status_codes=DEFAULT_SUCCESS_CODES, retryable_status_codes=DEFAULT_RETRYABLE_CODES): """Sends the specifed request. Retries and follows redirection as necessary. Args: http_method: http method as string ('GET', 'PUT', etc...) bucket: bucket name as string key: object name as string infile: an open file-like object data in this file will be written to the http socket outfile: an open file-like object Only success responses will be written to this file. Error resonses will be found in the operation objects extra_headers: optional request headers as dict query_parameters: optional query parameters as dict chunk_size: size of each socket read/write (default of 0 = all) success_status_codes: response status codes considered success retryable_status_codes: response status codes considered retryable Returns: self.operations: the list of operations executed for this request. """ self.operations = [] operation = None redirect_location = None retries = 0 redirects = 0 while retries <= self.max_retries and redirects <= self.max_redirects: # Need backoff sleep? if self.backoff_exponent >= 0: self._backoff_sleep() # Prepare operation if redirect_location: operation = self._create_redirect_operation( operation, redirect_location) redirect_location = None else: operation = self._create_init_operation( http_method, bucket=bucket, key=key, extra_headers=extra_headers, query_parameters=query_parameters, infile=infile) # Execute operation try: operation = self._exec_operation( operation, infile=infile, outfile=outfile, chunk_size=chunk_size, success_status_codes=success_status_codes) except httplib.IncompleteRead, e: operation.response_error_body = ( 'IncompleteRead: %d bytes read' % (e.partial)) retries += 1 self._backoff_increment() continue finally: self.operations.append(operation) # Check for success if operation.response_status in success_status_codes: self._backoff_decrement() return self.operations # Check for redirect elif operation.response_status in REDIRECT_CODES: self._backoff_decrement() redirect_location = operation.response_headers['location'] redirects += 1 logging.debug('Redirected to %s', redirect_location) continue # Check for retryable failures elif operation.response_status in retryable_status_codes: self._backoff_increment() retries += 1 continue else: self._backoff_increment() break raise GsError('Service Failure', self.operations) def _exec_operation(self, operation, infile=None, outfile=None, chunk_size=0, success_status_codes=DEFAULT_SUCCESS_CODES): """Executes given operation request, and populates response.""" connection = None try: logging.debug('%s %s %s', operation.request_method, operation.request_path_and_query, str(operation.request_headers)) # Connect connection = self._connect(operation.connection_host, operation.connection_port) # Write the first line of the request self._put_request(connection, operation.request_method, operation.request_path_and_query) # Write the headers self._put_headers(connection, operation.request_headers) # Write the data if infile: self._write(connection, infile, chunk_size) else: # Flush the header write with no body connection.send('') # Get the response response = connection.getresponse() # Get the status operation.response_status = response.status # Read the response headers operation.response_headers = {} operation.response_headers.update(response.getheaders()) # Read the response data (not for HEAD) if operation.request_method != 'HEAD': # Don't put data in outfile unless success status if operation.response_status in success_status_codes: if outfile: self._read(response, outfile, chunk_size) # Read the error body else: operation.response_error_body = response.read() finally: if connection: self._close(connection) return operation def _create_init_operation(self, http_method, bucket=None, key=None, extra_headers=None, query_parameters=None, infile=None): """Inits a new operation with request fields.""" op = GsOperation() if self.proxy_host: op.connection_host = self.proxy_host op.connection_port = self.proxy_port else: op.connection_host = self.host op.connection_port = 80 op.request_method = http_method path = self._get_path(bucket, key) query_string = self._get_query_string(query_parameters) op.request_path_and_query = path + query_string op.request_headers = self._get_request_headers( http_method, path, query_parameters, extra_headers, infile) return op def _create_redirect_operation(self, previous_operation, location): """Creates a new op based on the last op and the redirection.""" parts = urlparse.urlparse(location) op = GsOperation() if self.proxy_host: op.connection_host = self.proxy_host op.connection_port = self.proxy_port else: host_and_port = parts.netloc.split(':') op.connection_host = host_and_port[0] if len(host_and_port) > 1: op.connection_port = int(host_and_port[1]) else: op.connection_port = 80 op.request_method = previous_operation.request_method op.request_path_and_query = parts.path if parts.query: op.request_path_and_query += '?%s' % parts.query op.request_headers = previous_operation.request_headers.copy() op.request_headers['Host'] = parts.netloc # host and optional port return op def _backoff_decrement(self): """Decrements the backoff exponent toward min of -1 (off).""" if self.backoff_exponent > -1: self.backoff_exponent -= 1 def _backoff_increment(self): """Increments the backoff exponent toward max of 5.""" if self.backoff_exponent < 5: self.backoff_exponent += 1 def _backoff_sleep(self): """Backoff sleep function called between retry attempts. See Google Storage docs for required exponential backoff when errors occur. Override this if you want it to do more. """ sleep_sec = 1 << self.backoff_exponent logging.debug('Backoff sleep, retrying in %d seconds...', sleep_sec) time.sleep(sleep_sec) def _connect(self, host, port): """Returns a connection object. Override this if you have an alternate connection implementation. """ return httplib.HTTPConnection(host, port=port) def _close(self, connection): """Closes the connection. Override this if you want it to do more. """ connection.close() def _put_request(self, connection, http_method, path_and_query): """Sends the method, path, and query to the connection. Override this if you want it to do more. """ connection.putrequest(http_method, path_and_query, skip_host=True, skip_accept_encoding=True) def _put_headers(self, connection, headers): """Sends the request headers to the connection. Override this if you want it to do more. """ for name, val in headers.iteritems(): connection.putheader(name, val) connection.endheaders() def _write(self, connection, infile, chunk_size): """Writes data in infile to the open connection. Override this if you want it to do more. Perhaps for performance measuring or periodic callbacks. """ infile.seek(0) if chunk_size > 0: while True: chunk = infile.read(chunk_size) if chunk: connection.send(chunk) else: break else: connection.send(infile.read()) def _read(self, response, outfile, chunk_size): """Reads data from response, and writes it to outfile. Override this if you want it to do more. Perhaps for performance measuring or periodic callbacks. """ if chunk_size > 0: while True: chunk = response.read(chunk_size) if chunk: outfile.write(chunk) else: break else: outfile.write(response.read()) outfile.flush() def _get_request_headers(self, http_method, path, query_parameters, extra_headers, infile): """Returns the request header dict based on args.""" headers = {} # Content-Length if infile: infile.seek(0, os.SEEK_END) headers['Content-Length'] = infile.tell() else: headers['Content-Length'] = '0' # Date headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime()) # Host headers['Host'] = self.host # User-Agent headers['User-Agent'] = 'gslite/' + __version__ # Add extra headers if extra_headers: headers.update(extra_headers) # Authorization if self.access_key and self.secret: headers['Authorization'] = self._get_authentication( http_method, path, query_parameters, headers) return headers def _get_path(self, bucket, key): """Returns the URL path based on args.""" s = StringIO.StringIO() s.write('/') if bucket: s.write(urllib.quote(bucket)) if key: s.write('/') s.write(urllib.quote(key)) return s.getvalue() def _get_query_string(self, query_parameters): """Returns the URL query string based on query dict.""" s = StringIO.StringIO() if query_parameters: s.write('?') first = True for name, val in query_parameters.iteritems(): if first: first = False else: s.write('&') s.write(name) if val: s.write('=%s' % urllib.quote(str(val))) return s.getvalue() def _get_authentication(self, http_method, path, query_parameters, headers): """Returns the Authorization header value based on args.""" string_to_sign = StringIO.StringIO() # HTTP method string_to_sign.write('%s\n' % http_method) # Content-Md5 if 'Content-MD5' in headers: string_to_sign.write(headers['Content-MD5'].strip()) string_to_sign.write('\n') # Content-Type if 'Content-Type' in headers: string_to_sign.write(headers['Content-Type'].strip()) string_to_sign.write('\n') # Date if ('x-goog-date' not in headers and 'Date' in headers): string_to_sign.write(headers['Date']) string_to_sign.write('\n') # Extension headers sorted_header_keys = headers.keys() sorted_header_keys.sort() for header_key in sorted_header_keys: if header_key.startswith('x-goog-'): string_to_sign.write('%s:%s\n' % ( header_key, headers[header_key])) # Resource string_to_sign.write(path) if query_parameters: for subresource in ('acl', 'location', 'logging', 'torrent'): if subresource in query_parameters: string_to_sign.write('?%s' % subresource) # should only be one of these break # HMAC-SHA1 h = hmac.new(self.secret, digestmod=hashlib.sha1) h.update(string_to_sign.getvalue()) signature = base64.b64encode(h.digest()) # Put it all together return '%s %s:%s' % (self.auth_id, self.access_key, signature)
nilq/baby-python
python
import os import json import logging import tba_config from google.appengine.api import taskqueue from google.appengine.ext import ndb from google.appengine.ext import deferred from google.appengine.ext.webapp import template from controllers.base_controller import LoggedInHandler from datafeeds.datafeed_fms_api import DatafeedFMSAPI from models.event import Event from models.event_details import EventDetails from models.match import Match from helpers.event_details_manipulator import EventDetailsManipulator from helpers.match_helper import MatchHelper from helpers.match_manipulator import MatchManipulator from helpers.rankings_helper import RankingsHelper def create_event_details(event_key): event = Event.get_by_id(event_key) if event.alliance_selections or event.district_points or event.matchstats or event.rankings: event_details = EventDetails( id=event_key, alliance_selections=event.alliance_selections, district_points=event.district_points, matchstats=event.matchstats, rankings=event.rankings) EventDetailsManipulator.createOrUpdate(event_details) class AdminMigration(LoggedInHandler): def get(self): self._require_admin() path = os.path.join(os.path.dirname(__file__), '../../templates/admin/migration.html') self.response.out.write(template.render(path, self.template_values)) class AdminMigrationCreateEventDetails(LoggedInHandler): def get(self): self._require_admin() for event_key in Event.query().fetch(keys_only=True): deferred.defer(create_event_details, event_key.id(), _queue="admin") self.response.out.write("DONE") class AdminMigrationRankings(LoggedInHandler): def get(self, year): self._require_admin() event_keys = Event.query(Event.year==int(year)).fetch(keys_only=True) event_details = ndb.get_multi([ndb.Key(EventDetails, key.id()) for key in event_keys]) updated = [] for event_detail in event_details: if event_detail: logging.info(event_detail.key.id()) event_detail.rankings2 = RankingsHelper.convert_rankings(event_detail) updated.append(event_detail) EventDetailsManipulator.createOrUpdate(updated) self.response.out.write("DONE") class AdminMigrationPlayoffAdvancementAll(LoggedInHandler): def get(self): VALID_YEARS = tba_config.VALID_YEARS for year in VALID_YEARS: taskqueue.add(url='/admin/migration/backfill_playoff_advancement/{}'.format(year), method='GET') self.response.out.write("Enqueued migrations for {} - {}".format(VALID_YEARS[0], VALID_YEARS[-1])) class AdminMigrationPlayoffAdvancement(LoggedInHandler): def get(self, year): self._require_admin() event_keys = Event.query(Event.year==int(year)).fetch(keys_only=True) for event_key in event_keys: taskqueue.add(url='/tasks/math/do/playoff_advancement_update/{}'.format(event_key.id()), method='GET') self.response.out.write("Enqueued {} migrations".format(len(event_keys))) class AdminMigrationAddSurrogates(LoggedInHandler): def get(self, year): self._require_admin() events = Event.query(Event.year==int(year)).fetch() for event in events: deferred.defer(MatchHelper.add_surrogates, event, _queue="admin") self.response.out.write("DONE") class AdminMigrationBackfillYearDQ(LoggedInHandler): def get(self, year): self._require_admin() # This technically isn't needed because of app.yaml event_keys = Event.query( Event.year==int(year), Event.official==True, ).fetch(keys_only=True) for event_key in event_keys: taskqueue.add( url='/admin/migration/backfill_event_dq/{}'.format(event_key.id()), method='GET', queue_name='admin', ) self.response.out.write("DONE") class AdminMigrationBackfillEventDQ(LoggedInHandler): def get(self, event_key): df = DatafeedFMSAPI('v2.0', save_response=True) updated_matches = [] for m1 in df.getMatches(event_key): m2 = m1.key.get() # Only update if teams and scores are equal if m2 and (m1.alliances['red']['teams'] == m2.alliances['red']['teams'] and m1.alliances['blue']['teams'] == m2.alliances['blue']['teams'] and m1.alliances['red']['score'] == m2.alliances['red']['score'] and m1.alliances['blue']['score'] == m2.alliances['blue']['score']): old_alliances = m2.alliances old_alliances['red']['dqs'] = m1.alliances['red']['dqs'] old_alliances['blue']['dqs'] = m1.alliances['blue']['dqs'] m2.alliances_json = json.dumps(old_alliances) updated_matches.append(m2) else: logging.warning("Match not equal: {}".format(m1.key.id())) MatchManipulator.createOrUpdate(updated_matches) self.response.out.write("DONE")
nilq/baby-python
python
""" Posix platform main process. """ from ....base import EventBus def run_petronia(bus: EventBus) -> int: print("Petronia for Posix environments started.") return 0
nilq/baby-python
python
# Copyright (c) OpenMMLab. All rights reserved. from .anchor import * # noqa: F401, F403 from .bbox import * # noqa: F401, F403 from .evaluation import * # noqa: F401, F403 from .patch import * # noqa: F401, F403 from .post_processing import * # noqa: F401, F403 from .visualization import * # noqa: F401, F403
nilq/baby-python
python
#!/usr/bin/env python #appion from appionlib import apPrepXmipp3D from appionlib import apDisplay class XmippPrepML3DRefinement(apPrepXmipp3D.XmippPrep3DRefinement): def setRefineMethod(self): self.refinemethod = 'xmippml3d' #===================== if __name__ == "__main__": app = XmippPrepML3DRefinement() app.start() app.close()
nilq/baby-python
python
from openbiolink.graph_creation.file_processor.fileProcessor import FileProcessor from openbiolink.graph_creation.metadata_infile import InMetaOntoUberonIsA from openbiolink.graph_creation.types.infileType import InfileType from openbiolink.graph_creation.types.readerType import ReaderType class OntoUberonIsAProcessor(FileProcessor): IN_META_CLASS = InMetaOntoUberonIsA def __init__(self): self.use_cols = self.IN_META_CLASS.USE_COLS super().__init__(self.use_cols, readerType=ReaderType.READER_ONTO_UBERON, infileType=InfileType.IN_ONTO_UBERON_IS_A, mapping_sep=self.IN_META_CLASS.MAPPING_SEP) def individual_postprocessing(self, data): # bgee is only mapping on CL and UBERON terms data = data[data['ID'].str.startswith('UBERON:') | data['ID'].str.startswith('CL:') ] data = data[data['IS_A'].str.startswith('UBERON:') | data['IS_A'].str.startswith('CL:') ] return data
nilq/baby-python
python
""" Implements Lydian converter. """ alpha_to_lydian = [ (r"a", "𐤠"), (r"b", "𐤡"), (r"p", "𐤡"), (r"g", "𐤢"), (r"d", "𐤣"), (r"e", "𐤤"), (r"v", "𐤥"), (r"w", "𐤥"), (r"i", "𐤦"), (r"y", "𐤧"), (r"k", "𐤨"), (r"l", "𐤩"), (r"m", "𐤪"), (r"n", "𐤫"), (r"o", "𐤬"), (r"r", "𐤭"), (r"S", "𐤮"), (r"ś", "𐤮"), (r"t", "𐤯"), (r"u", "𐤰"), (r"f", "𐤱"), (r"q", "𐤲"), (r"s", "𐤳"), (r"sh", "𐤳"), (r"T", "𐤴"), (r"ã", "𐤵"), (r"A", "𐤵"), (r"ẽ", "𐤶"), (r"E", "𐤶"), (r"L", "𐤷"), (r"N", "𐤸"), (r"c", "𐤹"), (r"\.", "")]
nilq/baby-python
python
# Python - 3.6.0 cookie = lambda x: f'Who ate the last cookie? It was {"Zach" if type(x) is str else "Monica" if (type(x) is float) or (type(x) is int) else "the dog"}!'
nilq/baby-python
python
import doctest from insights.parsers import ls_var_opt_mssql from insights.tests import context_wrap LS_VAR_OPT_MSSQL_WRONG_PERM = """ drwxrwx---. 5 root root 58 Apr 16 07:20 /var/opt/mssql """.strip() LS_VAR_OPT_MSSQL_WRONG_PERM_2 = """ drwxrwx---. 5 mssql root 58 Apr 16 07:20 /var/opt/mssql """.strip() LS_VAR_OPT_MSSQL = """ drwxrwx---. 5 mssql mssql 58 Apr 16 07:20 /var/opt/mssql """.strip() def test_ls_var_opt_mssql(): content = ls_var_opt_mssql.LsDVarOptMSSql(context_wrap(LS_VAR_OPT_MSSQL_WRONG_PERM, path='ls_-ld_.var.opt.mssql')) content_attr = content.listing_of('/var/opt/mssql').get('/var/opt/mssql') assert content_attr.get('owner') != "mssql" assert content_attr.get('group') != "mssql" content = ls_var_opt_mssql.LsDVarOptMSSql(context_wrap(LS_VAR_OPT_MSSQL_WRONG_PERM_2, path='ls_-ld_.var.opt.mssql')) content_attr = content.listing_of('/var/opt/mssql').get('/var/opt/mssql') assert content_attr.get('owner') == "mssql" assert content_attr.get('group') != "mssql" content = ls_var_opt_mssql.LsDVarOptMSSql(context_wrap(LS_VAR_OPT_MSSQL, path='ls_-ld_.var.opt.mssql')) content_attr = content.listing_of('/var/opt/mssql').get('/var/opt/mssql') assert content_attr.get('owner') == "mssql" assert content_attr.get('group') == "mssql" def _failed_without_insights_command_as_path(): # Fails with KeyError: '/var/opt/mssql'" unless path is defined foo = ls_var_opt_mssql.LsDVarOptMSSql(context_wrap(LS_VAR_OPT_MSSQL_WRONG_PERM_2)) content_attr = foo.listing_of('/var/opt/mssql').get('/var/opt/mssql') assert content_attr.get('owner') != "mssql" assert content_attr.get('group') != "mssql" def _failed_with_standard_path(): # Fails with KeyError: '/var/opt/mssql'". bar = ls_var_opt_mssql.LsDVarOptMSSql(context_wrap(LS_VAR_OPT_MSSQL_WRONG_PERM_2, path='/var/opt/mssql')) content_attr = bar.listing_of('/var/opt/mssql').get('/var/opt/mssql') assert content_attr.get('owner') != "mssql" assert content_attr.get('group') != "mssql" def test_ls_var_opt_mssql_docs(): failed_count, tests = doctest.testmod( ls_var_opt_mssql, globs={'content': ls_var_opt_mssql.LsDVarOptMSSql(context_wrap(LS_VAR_OPT_MSSQL_WRONG_PERM, path='ls_-ld_.var.opt.mssql'))} ) assert failed_count == 0
nilq/baby-python
python
import os import sys from typing import List import click from ruamel.yaml import YAML from great_expectations import DataContext from great_expectations.checkpoint.types.checkpoint_result import CheckpointResult from great_expectations.cli import toolkit from great_expectations.cli.pretty_printing import cli_message, cli_message_list from great_expectations.data_context.util import file_relative_path from great_expectations.exceptions import InvalidTopLevelConfigKeyError from great_expectations.render.renderer.checkpoint_new_notebook_renderer import ( CheckpointNewNotebookRenderer, ) from great_expectations.util import lint_code try: from sqlalchemy.exc import SQLAlchemyError except ImportError: SQLAlchemyError = RuntimeError try: from sqlalchemy.exc import SQLAlchemyError except ImportError: SQLAlchemyError = RuntimeError yaml = YAML() yaml.indent(mapping=2, sequence=4, offset=2) """ --ge-feature-maturity-info-- id: checkpoint_command_line title: LegacyCheckpoint - Command Line icon: short_description: Run a configured checkpoint from a command line. description: Run a configured checkpoint from a command line in a Terminal shell. how_to_guide_url: https://docs.greatexpectations.io/en/latest/guides/how_to_guides/validation/how_to_run_a_checkpoint_in_terminal.html maturity: Experimental maturity_details: api_stability: Unstable (expect changes to batch request) implementation_completeness: Complete unit_test_coverage: Complete integration_infrastructure_test_coverage: N/A documentation_completeness: Complete bug_risk: Low --ge-feature-maturity-info-- """ @click.group(short_help="Checkpoint operations") @click.pass_context def checkpoint(ctx): """ Checkpoint operations A Checkpoint is a bundle of one or more batches of data with one or more Expectation Suites. A Checkpoint can be as simple as one batch of data paired with one Expectation Suite. A Checkpoint can be as complex as many batches of data across different datasources paired with one or more Expectation Suites each. """ directory: str = toolkit.parse_cli_config_file_location( config_file_location=ctx.obj.config_file_location ).get("directory") context: DataContext = toolkit.load_data_context_with_error_handling( directory=directory, from_cli_upgrade_command=False, ) # TODO consider moving this all the way up in to the CLIState constructor ctx.obj.data_context = context @checkpoint.command(name="new") @click.argument("name") @click.option( "--jupyter/--no-jupyter", is_flag=True, help="By default launch jupyter notebooks unless you specify the --no-jupyter flag", default=True, ) @click.pass_context def checkpoint_new(ctx, name, jupyter): """Create a new Checkpoint for easy deployments. NAME is the name of the Checkpoint to create. """ _checkpoint_new(ctx=ctx, checkpoint_name=name, jupyter=jupyter) def _checkpoint_new(ctx, checkpoint_name, jupyter): usage_event: str = "cli.checkpoint.new" context = ctx.obj.data_context try: _verify_checkpoint_does_not_exist(context, checkpoint_name, usage_event) # Create notebook on disk notebook_name = f"edit_checkpoint_{checkpoint_name}.ipynb" notebook_file_path = _get_notebook_path(context, notebook_name) checkpoint_new_notebook_renderer = CheckpointNewNotebookRenderer( context=context, checkpoint_name=checkpoint_name ) checkpoint_new_notebook_renderer.render_to_disk( notebook_file_path=notebook_file_path ) if not jupyter: cli_message( f"To continue editing this Checkpoint, run <green>jupyter notebook {notebook_file_path}</green>" ) toolkit.send_usage_message(context, event=usage_event, success=True) if jupyter: cli_message( """<green>Because you requested to create a new Checkpoint, we'll open a notebook for you now to edit it! If you wish to avoid this you can add the `--no-jupyter` flag.</green>\n\n""" ) toolkit.launch_jupyter_notebook(notebook_file_path) except Exception as e: toolkit.exit_with_failure_message_and_stats( context=context, usage_event=usage_event, message=f"<red>{e}</red>", ) return def _verify_checkpoint_does_not_exist( context: DataContext, checkpoint_name: str, usage_event: str ) -> None: try: if checkpoint_name in context.list_checkpoints(): toolkit.exit_with_failure_message_and_stats( context, usage_event, f"A Checkpoint named `{checkpoint_name}` already exists. Please choose a new name.", ) except InvalidTopLevelConfigKeyError as e: toolkit.exit_with_failure_message_and_stats( context, usage_event, f"<red>{e}</red>" ) def _get_notebook_path(context, notebook_name): return os.path.abspath( os.path.join( context.root_directory, context.GE_EDIT_NOTEBOOK_DIR, notebook_name ) ) @checkpoint.command(name="list") @click.pass_context def checkpoint_list(ctx): """List configured Checkpoints.""" context: DataContext = ctx.obj.data_context checkpoints: List[str] = context.list_checkpoints() if not checkpoints: cli_message( "No Checkpoints found.\n" " - Use the command `great_expectations checkpoint new` to create one." ) toolkit.send_usage_message(context, event="cli.checkpoint.list", success=True) sys.exit(0) number_found: int = len(checkpoints) plural: str = "s" if number_found > 1 else "" message: str = f"Found {number_found} Checkpoint{plural}." pretty_list: list = [f" - <cyan>{cp}</cyan>" for cp in checkpoints] cli_message_list(pretty_list, list_intro_string=message) toolkit.send_usage_message(context, event="cli.checkpoint.list", success=True) @checkpoint.command(name="delete") @click.argument("checkpoint") @click.pass_context def checkpoint_delete(ctx, checkpoint): """Delete a Checkpoint.""" usage_event: str = "cli.checkpoint.delete" context: DataContext = ctx.obj.data_context try: toolkit.delete_checkpoint( context=context, checkpoint_name=checkpoint, usage_event=usage_event, ) toolkit.send_usage_message(context, event="cli.checkpoint.delete", success=True) except Exception as e: toolkit.exit_with_failure_message_and_stats( context=context, usage_event=usage_event, message=f"<red>{e}</red>", ) return cli_message(f'Checkpoint "{checkpoint}" deleted.') sys.exit(0) @checkpoint.command(name="run") @click.argument("checkpoint") @click.pass_context def checkpoint_run(ctx, checkpoint): """Run a Checkpoint.""" usage_event: str = "cli.checkpoint.run" context: DataContext = ctx.obj.data_context try: result: CheckpointResult = toolkit.run_checkpoint( context=context, checkpoint_name=checkpoint, usage_event=usage_event, ) except Exception as e: toolkit.exit_with_failure_message_and_stats( context=context, usage_event=usage_event, message=f"<red>{e}</red>", ) return if not result["success"]: cli_message(string="Validation failed!") toolkit.send_usage_message(context, event=usage_event, success=True) print_validation_operator_results_details(result=result) sys.exit(1) cli_message("Validation succeeded!") toolkit.send_usage_message(context, event=usage_event, success=True) print_validation_operator_results_details(result=result) sys.exit(0) def print_validation_operator_results_details( result: CheckpointResult, ) -> None: max_suite_display_width = 40 cli_message( f""" {'Suite Name'.ljust(max_suite_display_width)} Status Expectations met""" ) for result_id, result_item in result.run_results.items(): vr = result_item["validation_result"] stats = vr.statistics passed = stats["successful_expectations"] evaluated = stats["evaluated_expectations"] percentage_slug = ( f"{round(passed / evaluated * 100, 2) if evaluated > 0 else 100} %" ) stats_slug = f"{passed} of {evaluated} ({percentage_slug})" if vr.success: status_slug = "<green>✔ Passed</green>" else: status_slug = "<red>✖ Failed</red>" suite_name: str = str(vr.meta["expectation_suite_name"]) if len(suite_name) > max_suite_display_width: suite_name = suite_name[0:max_suite_display_width] suite_name = suite_name[:-1] + "…" status_line: str = f"- {suite_name.ljust(max_suite_display_width)} {status_slug} {stats_slug}" cli_message(status_line) @checkpoint.command(name="script") @click.argument("checkpoint") @click.pass_context def checkpoint_script(ctx, checkpoint): """ Create a python script to run a Checkpoint. Checkpoints can be run directly without this script using the `great_expectations Checkpoint run` command. This script is provided for those who wish to run Checkpoints via python. """ usage_event: str = "cli.checkpoint.script" context: DataContext = ctx.obj.data_context toolkit.validate_checkpoint( context=context, checkpoint_name=checkpoint, usage_event=usage_event ) script_name: str = f"run_{checkpoint}.py" script_path: str = os.path.join( context.root_directory, context.GE_UNCOMMITTED_DIR, script_name ) if os.path.isfile(script_path): toolkit.exit_with_failure_message_and_stats( context, usage_event, f"""<red>Warning! A script named {script_name} already exists and this command will not overwrite it.</red> - Existing file path: {script_path}""", ) _write_checkpoint_script_to_disk( context_directory=context.root_directory, checkpoint_name=checkpoint, script_path=script_path, ) cli_message( f"""<green>A python script was created that runs the Checkpoint named: `{checkpoint}`</green> - The script is located in `great_expectations/uncommitted/run_{checkpoint}.py` - The script can be run with `python great_expectations/uncommitted/run_{checkpoint}.py`""" ) toolkit.send_usage_message(context, event=usage_event, success=True) def _write_checkpoint_script_to_disk( context_directory: str, checkpoint_name: str, script_path: str ) -> None: script_full_path: str = os.path.abspath(os.path.join(script_path)) template: str = _load_script_template().format(checkpoint_name, context_directory) linted_code: str = lint_code(code=template) with open(script_full_path, "w") as f: f.write(linted_code) def _load_script_template() -> str: with open(file_relative_path(__file__, "checkpoint_script_template.py")) as f: template = f.read() return template
nilq/baby-python
python
from unittest import TestCase from countpigs import CountPigs, directcountpigs, expect_val, f, p class TestSimulation(TestCase): def test_direct_count_cls(self): c = CountPigs(5) # (n, q, m, k) = (5, 1, 3, 2) choice = c.choice(1) choices = c.choices(1, 3) choose = c.choose(1, 3, 2) self.assertEqual(len(choice), 5) self.assertEqual(len(choices), 125) self.assertEqual(len(choose), 60) # (n, q, m, k) = (5, 2, 3, 3) choice = c.choice(2) choices = c.choices(2, 3) choose = c.choose(2, 3, 3) self.assertEqual(len(choice), 10) self.assertEqual(len(choices), 1000) self.assertEqual(len(choose), 240) def test_direct_count_func(self): # (n, q, m, k) = (5, 1, 3, 2) x0, x1 = directcountpigs(5, 1, 3, 2) self.assertEqual(x0, 125) self.assertEqual(x1, 60) # (n, q, m, k) = (5, 2, 3, 3) x0, x1 = directcountpigs(5, 2, 3, 3) self.assertEqual(x0, 1000) self.assertEqual(x1, 240) class TestRecursion(TestCase): def test_counting_factor(self): self.assertEqual(f(5, 1, 3), 150) self.assertEqual(f(5, 2, 3), 240) self.assertEqual(f(10, 3, 7), 2687077316815500) def test_probability(self): self.assertEqual(p(5, 3, 1, 2), 12/25) self.assertEqual(p(5, 3, 2, 3), 6/25) self.assertEqual(p(4, 3, 1, 1) + p(4, 3, 1, 2) + p(4, 3, 1, 3), 1) def test_expectation(self): self.assertEqual(expect_val(4, 3, 1), 37/16) self.assertEqual(expect_val(10, 5, 3), 83193/10000) # self.assertEqual(expect_val(15, 7, 3), 185223/15625) fail self.assertAlmostEqual(expect_val(15, 7, 3), 185223/15625) self.assertEqual(expect_val(20, 10, 3), 8224006099551/512000000000)
nilq/baby-python
python
import torch import torch.nn.functional as F from torch import nn from torchvision import models from torch.hub import load_state_dict_from_url from ast import literal_eval from itertools import chain from .utils import gram_matrix class ContentLoss(nn.Module): def __init__(self, mode): super(ContentLoss, self).__init__() self.mode = mode def forward(self, input): if self.mode == 'loss' and input.size() == self.target.size(): self.loss = F.mse_loss(input, self.target) elif self.mode == 'target': self.target = input return input class StyleLoss(nn.Module): def __init__(self, mode, feature_norm): super(StyleLoss, self).__init__() self.mode = mode self.feature_norm = feature_norm def forward(self, input): if self.mode == 'loss': self.loss = F.mse_loss(gram_matrix(input, self.feature_norm), self.target) elif self.mode == 'target': self.target = gram_matrix(input, self.feature_norm) return input class VGG19Loss(nn.Module): def __init__(self, content_weight, style_weight, content_weights, style_weights, avg_pool, feature_norm, weights, device): super(VGG19Loss, self).__init__() content_weights = literal_eval(content_weights) style_weights = literal_eval(style_weights) self.content_weight, self.style_weight = content_weight, style_weight self.style_weights = {layer: weight / sum(style_weights.values()) for layer, weight in style_weights.items()} self.content_weights = {layer: weight / sum(content_weights.values()) for layer, weight in content_weights.items()} self._build_vgg_loss(avg_pool, feature_norm, weights, device) def forward(self, input): self.vgg_loss(input) content_loss, style_loss = 0, 0 content_losses, style_losses = {}, {} for layer in self.content_weights: content_losses[layer] = self.content_losses[layer].loss content_loss += content_losses[layer] * self.content_weights[layer] for layer in self.style_weights: style_losses[layer] = self.style_losses[layer].loss style_loss += style_losses[layer] * self.style_weights[layer] total_loss = content_loss * self.content_weight + \ style_loss * self.style_weight return (total_loss, content_loss, style_loss, content_losses, style_losses) def set_targets(self, content, style): self._set_modes('target', 'none') self.vgg_loss(content) self._set_modes('none', 'target') self.vgg_loss(style) self._set_modes('loss', 'loss') def reset(self): for loss in chain(self.content_losses.values(), self.style_losses.values()): if hasattr(loss, 'target'): delattr(loss, 'target') if hasattr(loss, 'loss'): delattr(loss, 'loss') self._set_modes('none', 'none') def _set_modes(self, content_mode, style_mode): for loss in self.content_losses.values(): loss.mode = content_mode for loss in self.style_losses.values(): loss.mode = style_mode def _build_vgg_loss(self, avg_pool, feature_norm, weights, device): self.content_losses, self.style_losses = {}, {} self.vgg_loss = nn.Sequential() vgg = models.vgg19(pretrained=False).features if weights in ('original', 'normalized'): state_dict = load_state_dict_from_url('https://storage.googleapis' f'.com/prism-weights/vgg19-{weights}.pth') else: state_dict = torch.load(weights) vgg.load_state_dict(state_dict) vgg = vgg.eval() for param in vgg.parameters(): param.requires_grad_(False) i_pool, i_conv = 1, 0 for layer in vgg.children(): if isinstance(layer, nn.Conv2d): i_conv += 1 name = f'conv_{i_pool}_{i_conv}' elif isinstance(layer, nn.ReLU): name = f'relu_{i_pool}_{i_conv}' layer = nn.ReLU(inplace=False) elif isinstance(layer, nn.MaxPool2d): name = f'pool_{i_pool}' if avg_pool: layer = nn.AvgPool2d(kernel_size=2, stride=2, padding=0) i_pool += 1 i_conv = 0 self.vgg_loss.add_module(name, layer) if name in self.content_weights: content_loss = ContentLoss('none') self.vgg_loss.add_module(f'content_loss_{i_pool}_{i_conv}', content_loss) self.content_losses[name] = content_loss if name in self.style_weights: style_loss = StyleLoss('none', feature_norm) self.vgg_loss.add_module(f'style_loss_{i_pool}_{i_conv}', style_loss) self.style_losses[name] = style_loss if (len(self.style_weights) == len(self.style_losses) and len(self.content_weights) == len(self.content_losses)): break self.vgg_loss.to(device)
nilq/baby-python
python
""" The tests in this module compare the RESPY package to the original RESTUD code for the special cases where they overlap. """ from pandas.util.testing import assert_frame_equal import pandas as pd import numpy as np import subprocess import pytest from codes.random_init import generate_random_dict from respy.python.shared.shared_auxiliary import dist_class_attributes from respy.python.shared.shared_auxiliary import print_init_dict from respy.python.shared.shared_constants import TEST_RESOURCES_DIR from respy.python.shared.shared_constants import IS_FORTRAN from respy import RespyCls from respy import simulate def transform_respy_to_restud(model_paras, edu_start, edu_max, num_agents_sim, num_periods, num_draws_emax, delta): """ Transform a RESPY initialization file to a RESTUD file. """ # Ensure restrictions assert (edu_start == 10) assert (edu_max == 20) # Write to initialization file with open('in.txt', 'w') as file_: # Write out some basic information about the problem. file_.write(' {0:03d} {1:05d} {2:06d} {3:06f}' ' {4:06f}\n'.format(num_periods, num_agents_sim, num_draws_emax, -99.0, 500.0)) # Write out coefficients for the two occupations. coeffs_a, coeffs_b = model_paras['coeffs_a'], model_paras['coeffs_b'] for coeffs in [coeffs_a, coeffs_b]: line = ' {0:10.6f} {1:10.6f} {2:10.6f} {3:10.6f} {4:10.6f}' \ ' {5:10.6f}\n'.format(*coeffs) file_.write(line) # Write out coefficients for education and home payoffs as well as # the discount factor. The intercept is scaled. This is later undone # again in the original FORTRAN code. coeffs_edu = model_paras['coeffs_edu'] coeffs_home = model_paras['coeffs_home'] edu_int = coeffs_edu[0] / 1000 edu_coeffs = [edu_int] home = coeffs_home[0] / 1000 for j in range(2): edu_coeffs += [-coeffs_edu[j + 1] / 1000] coeffs = edu_coeffs + [home, delta] fmt = ' {0:10.6f} {1:10.6f} {2:10.6f} {3:10.6f} {4:10.6f}\n' line = fmt.format(*coeffs) file_.write(line) # Write out coefficients of correlation and standard deviations in the # standard deviations in the education and home equation required. # This is undone again in the original FORTRAN code. All this is # working only under the imposed absence of any randomness. rho = np.zeros((4, 4)) for j in range(4): line = ' {0:10.5f} {1:10.5f} {2:10.5f} ' \ ' {3:10.5f}\n'.format(*rho[j, :]) file_.write(line) file_.write(line) @pytest.mark.skipif(not IS_FORTRAN, reason='No FORTRAN available') @pytest.mark.usefixtures('fresh_directory', 'set_seed') class TestClass(object): """ This class groups together some tests. """ def test_1(self): """ Compare results from the RESTUD program and the RESPY package. """ # Impose some constraints on the initialization file which ensures that # the problem can be solved by the RESTUD code. The code is adjusted to # run with zero draws. constraints = dict() constraints['edu'] = (10, 20) constraints['is_deterministic'] = True # Generate random initialization file. The RESTUD code uses the same # random draws for the solution and simulation of the model. Thus, # the number of draws is required to be less or equal to the number # of agents. init_dict = generate_random_dict(constraints) num_agents_sim = init_dict['SIMULATION']['agents'] num_draws_emax = init_dict['SOLUTION']['draws'] if num_draws_emax < num_agents_sim: init_dict['SOLUTION']['draws'] = num_agents_sim print_init_dict(init_dict) # Indicate RESTUD code the special case of zero disturbance. open('.restud.testing.scratch', 'a').close() # Perform toolbox actions respy_obj = RespyCls('test.respy.ini') # This flag aligns the random components between the RESTUD program and # RESPY package. The existence of the file leads to the RESTUD program # to write out the random components. model_paras, edu_start, edu_max, num_agents_sim, num_periods, \ num_draws_emax, delta = \ dist_class_attributes(respy_obj, 'model_paras', 'edu_start', 'edu_max', 'num_agents_sim', 'num_periods', 'num_draws_emax', 'delta') transform_respy_to_restud(model_paras, edu_start, edu_max, num_agents_sim, num_periods, num_draws_emax, delta) # Solve model using RESTUD code. cmd = TEST_RESOURCES_DIR + '/kw_dp3asim' subprocess.check_call(cmd, shell=True) # Solve model using RESPY package. simulate(respy_obj) # Compare the simulated datasets generated by the programs. py = pd.DataFrame(np.array(np.genfromtxt('data.respy.dat', missing_values='.'), ndmin=2)[:, -4:]) fort = pd.DataFrame(np.array(np.genfromtxt('ftest.txt', missing_values='.'), ndmin=2)[:, -4:]) assert_frame_equal(py, fort)
nilq/baby-python
python
from suplemon.linelight.color_map import color_map class Syntax: def get_comment(self): return ("/*", "*/") def get_color(self, raw_line): color = color_map["white"] line = str(raw_line) if line.startswith("+"): color = color_map["green"] elif line.startswith("-"): color = color_map["red"] elif line.startswith("@@"): color = color_map["blue"] return color
nilq/baby-python
python
import numpy as np def read_input(): with open("input.txt", "r") as file: return [[int(c) for c in l] for l in file.read().splitlines()] def get_neighbors(grid, y, x, v): neighbors = [p for p in [(y-1,x),(y+1,x),(y,x-1),(y,x+1)] \ if 0 <= p[0] < grid.shape[0] and 0 <= p[1] < grid.shape[1] and p not in v] return neighbors def ext_grid(grid): for ax in [0,1]: tmp = grid.copy() for i in range(4): tmp += 1 tmp[tmp > 9] = 1 grid = np.concatenate((grid, tmp), axis=ax) return grid def dijkstra(grid, ext): if ext: grid = ext_grid(grid) e = tuple(np.subtract(grid.shape, (1,1))) stack = [[0, (0,0)]] v = set([(0,0)]) while True: c = stack[0][0] p = stack[0][1] if p == e: break vs = get_neighbors(grid, p[0], p[1], v) v|=set(vs) del stack[0] stack += [[c+grid[p], p] for p in vs] stack = sorted(stack, key=lambda x: x[0]) print(stack[0][0]) def main(): grid = np.array(read_input()) dijkstra(grid, False) dijkstra(grid, True) if __name__ == "__main__": main()
nilq/baby-python
python
import nltk import random import feedparser urls = { 'mlb': 'https://sports.yahoo.com/mlb/rss.xml', 'nfl': 'https://sports.yahoo.com/nfl/rss.xml', } feedmap = {} stopwords = nltk.corpus.stopwords.words('english') def featureExtractor(words): features = {} for word in words: if word not in stopwords: features["word({})".format(word)] = True return features sentences = [] for category in urls.keys(): feedmap[category] = feedparser.parse(urls[category]) print("downloading {}".format(urls[category])) for entry in feedmap[category]['entries']: data = entry['summary'] words = data.split() sentences.append((category, words)) featuresets = [(featureExtractor(words), category) for category, words in sentences] random.shuffle(featuresets) total = len(featuresets) off = int(total/2) trainset = featuresets[off:] testset = featuresets[:off] classifier = nltk.NaiveBayesClassifier.train(trainset) print(nltk.classify.accuracy(classifier, testset)) classifier.show_most_informative_features(5) for (i, entry) in enumerate(feedmap['nfl']['entries']): if i < 4: features = featureExtractor(entry['title'].split()) category = classifier.classify(features) print('{} -> {}'.format(category, entry['summary']))
nilq/baby-python
python